Patrick McHardy | 7750f40 | 2008-07-08 03:23:36 -0700 | [diff] [blame] | 1 | #include <linux/skbuff.h> |
| 2 | #include <linux/netdevice.h> |
| 3 | #include <linux/if_vlan.h> |
Herbert Xu | 4ead443 | 2009-03-01 00:11:52 -0800 | [diff] [blame] | 4 | #include <linux/netpoll.h> |
Patrick McHardy | 7750f40 | 2008-07-08 03:23:36 -0700 | [diff] [blame] | 5 | #include "vlan.h" |
| 6 | |
Jiri Pirko | bcc6d47 | 2011-04-07 19:48:33 +0000 | [diff] [blame^] | 7 | bool vlan_do_receive(struct sk_buff **skbp) |
Patrick McHardy | 7750f40 | 2008-07-08 03:23:36 -0700 | [diff] [blame] | 8 | { |
Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 9 | struct sk_buff *skb = *skbp; |
| 10 | u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; |
Pedro Garcia | ad1afb0 | 2010-07-18 15:38:44 -0700 | [diff] [blame] | 11 | struct net_device *vlan_dev; |
Eric Dumazet | 4af429d | 2010-11-10 23:42:00 +0000 | [diff] [blame] | 12 | struct vlan_pcpu_stats *rx_stats; |
Pedro Garcia | ad1afb0 | 2010-07-18 15:38:44 -0700 | [diff] [blame] | 13 | |
Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 14 | vlan_dev = vlan_find_dev(skb->dev, vlan_id); |
| 15 | if (!vlan_dev) { |
| 16 | if (vlan_id) |
| 17 | skb->pkt_type = PACKET_OTHERHOST; |
| 18 | return false; |
Eric Dumazet | 173e79f | 2010-09-30 02:16:44 +0000 | [diff] [blame] | 19 | } |
Patrick McHardy | 9b22ea5 | 2008-11-04 14:49:57 -0800 | [diff] [blame] | 20 | |
Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 21 | skb = *skbp = skb_share_check(skb, GFP_ATOMIC); |
| 22 | if (unlikely(!skb)) |
| 23 | return false; |
Herbert Xu | e1c096e | 2009-01-06 10:50:09 -0800 | [diff] [blame] | 24 | |
Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 25 | skb->dev = vlan_dev; |
| 26 | skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); |
Patrick McHardy | bc1d041 | 2008-07-14 22:49:30 -0700 | [diff] [blame] | 27 | skb->vlan_tci = 0; |
Patrick McHardy | 7750f40 | 2008-07-08 03:23:36 -0700 | [diff] [blame] | 28 | |
Eric Dumazet | 4af429d | 2010-11-10 23:42:00 +0000 | [diff] [blame] | 29 | rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats); |
Eric Dumazet | 9793241 | 2009-11-17 04:53:09 +0000 | [diff] [blame] | 30 | |
Eric Dumazet | 9618e2f | 2010-06-24 00:55:06 +0000 | [diff] [blame] | 31 | u64_stats_update_begin(&rx_stats->syncp); |
Eric Dumazet | 9793241 | 2009-11-17 04:53:09 +0000 | [diff] [blame] | 32 | rx_stats->rx_packets++; |
| 33 | rx_stats->rx_bytes += skb->len; |
Patrick McHardy | 7750f40 | 2008-07-08 03:23:36 -0700 | [diff] [blame] | 34 | |
Patrick McHardy | 7750f40 | 2008-07-08 03:23:36 -0700 | [diff] [blame] | 35 | switch (skb->pkt_type) { |
| 36 | case PACKET_BROADCAST: |
| 37 | break; |
| 38 | case PACKET_MULTICAST: |
Eric Dumazet | 9618e2f | 2010-06-24 00:55:06 +0000 | [diff] [blame] | 39 | rx_stats->rx_multicast++; |
Patrick McHardy | 7750f40 | 2008-07-08 03:23:36 -0700 | [diff] [blame] | 40 | break; |
| 41 | case PACKET_OTHERHOST: |
| 42 | /* Our lower layer thinks this is not local, let's make sure. |
| 43 | * This allows the VLAN to have a different MAC than the |
| 44 | * underlying device, and still route correctly. */ |
| 45 | if (!compare_ether_addr(eth_hdr(skb)->h_dest, |
Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 46 | vlan_dev->dev_addr)) |
Patrick McHardy | 7750f40 | 2008-07-08 03:23:36 -0700 | [diff] [blame] | 47 | skb->pkt_type = PACKET_HOST; |
| 48 | break; |
Joe Perches | ccbd6a5 | 2010-05-14 10:58:26 +0000 | [diff] [blame] | 49 | } |
Eric Dumazet | 9618e2f | 2010-06-24 00:55:06 +0000 | [diff] [blame] | 50 | u64_stats_update_end(&rx_stats->syncp); |
Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 51 | |
| 52 | return true; |
Patrick McHardy | 7750f40 | 2008-07-08 03:23:36 -0700 | [diff] [blame] | 53 | } |
Patrick McHardy | 22d1ba7 | 2008-07-08 03:23:57 -0700 | [diff] [blame] | 54 | |
| 55 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
| 56 | { |
| 57 | return vlan_dev_info(dev)->real_dev; |
| 58 | } |
Ben Greear | 116cb42 | 2009-01-26 12:37:53 -0800 | [diff] [blame] | 59 | EXPORT_SYMBOL(vlan_dev_real_dev); |
Patrick McHardy | 22d1ba7 | 2008-07-08 03:23:57 -0700 | [diff] [blame] | 60 | |
| 61 | u16 vlan_dev_vlan_id(const struct net_device *dev) |
| 62 | { |
| 63 | return vlan_dev_info(dev)->vlan_id; |
| 64 | } |
Ben Greear | 116cb42 | 2009-01-26 12:37:53 -0800 | [diff] [blame] | 65 | EXPORT_SYMBOL(vlan_dev_vlan_id); |
Herbert Xu | e1c096e | 2009-01-06 10:50:09 -0800 | [diff] [blame] | 66 | |
Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 67 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ |
| 68 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
| 69 | u16 vlan_tci, int polling) |
Herbert Xu | e1c096e | 2009-01-06 10:50:09 -0800 | [diff] [blame] | 70 | { |
Eric Dumazet | b93ab83 | 2009-11-13 06:33:11 +0000 | [diff] [blame] | 71 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 72 | return polling ? netif_receive_skb(skb) : netif_rx(skb); |
Herbert Xu | e1c096e | 2009-01-06 10:50:09 -0800 | [diff] [blame] | 73 | } |
Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 74 | EXPORT_SYMBOL(__vlan_hwaccel_rx); |
Herbert Xu | e1c096e | 2009-01-06 10:50:09 -0800 | [diff] [blame] | 75 | |
Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 76 | gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, |
| 77 | unsigned int vlan_tci, struct sk_buff *skb) |
Herbert Xu | e1c096e | 2009-01-06 10:50:09 -0800 | [diff] [blame] | 78 | { |
Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 79 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
| 80 | return napi_gro_receive(napi, skb); |
Herbert Xu | e1c096e | 2009-01-06 10:50:09 -0800 | [diff] [blame] | 81 | } |
| 82 | EXPORT_SYMBOL(vlan_gro_receive); |
| 83 | |
Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 84 | gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, |
| 85 | unsigned int vlan_tci) |
Herbert Xu | e1c096e | 2009-01-06 10:50:09 -0800 | [diff] [blame] | 86 | { |
Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 87 | __vlan_hwaccel_put_tag(napi->skb, vlan_tci); |
| 88 | return napi_gro_frags(napi); |
Herbert Xu | e1c096e | 2009-01-06 10:50:09 -0800 | [diff] [blame] | 89 | } |
| 90 | EXPORT_SYMBOL(vlan_gro_frags); |
Jiri Pirko | bcc6d47 | 2011-04-07 19:48:33 +0000 | [diff] [blame^] | 91 | |
| 92 | static struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) |
| 93 | { |
| 94 | if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) { |
| 95 | if (skb_cow(skb, skb_headroom(skb)) < 0) |
| 96 | skb = NULL; |
| 97 | if (skb) { |
| 98 | /* Lifted from Gleb's VLAN code... */ |
| 99 | memmove(skb->data - ETH_HLEN, |
| 100 | skb->data - VLAN_ETH_HLEN, 12); |
| 101 | skb->mac_header += VLAN_HLEN; |
| 102 | } |
| 103 | } |
| 104 | return skb; |
| 105 | } |
| 106 | |
| 107 | static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) |
| 108 | { |
| 109 | __be16 proto; |
| 110 | unsigned char *rawp; |
| 111 | |
| 112 | /* |
| 113 | * Was a VLAN packet, grab the encapsulated protocol, which the layer |
| 114 | * three protocols care about. |
| 115 | */ |
| 116 | |
| 117 | proto = vhdr->h_vlan_encapsulated_proto; |
| 118 | if (ntohs(proto) >= 1536) { |
| 119 | skb->protocol = proto; |
| 120 | return; |
| 121 | } |
| 122 | |
| 123 | rawp = skb->data; |
| 124 | if (*(unsigned short *) rawp == 0xFFFF) |
| 125 | /* |
| 126 | * This is a magic hack to spot IPX packets. Older Novell |
| 127 | * breaks the protocol design and runs IPX over 802.3 without |
| 128 | * an 802.2 LLC layer. We look for FFFF which isn't a used |
| 129 | * 802.2 SSAP/DSAP. This won't work for fault tolerant netware |
| 130 | * but does for the rest. |
| 131 | */ |
| 132 | skb->protocol = htons(ETH_P_802_3); |
| 133 | else |
| 134 | /* |
| 135 | * Real 802.2 LLC |
| 136 | */ |
| 137 | skb->protocol = htons(ETH_P_802_2); |
| 138 | } |
| 139 | |
| 140 | struct sk_buff *vlan_untag(struct sk_buff *skb) |
| 141 | { |
| 142 | struct vlan_hdr *vhdr; |
| 143 | u16 vlan_tci; |
| 144 | |
| 145 | if (unlikely(vlan_tx_tag_present(skb))) { |
| 146 | /* vlan_tci is already set-up so leave this for another time */ |
| 147 | return skb; |
| 148 | } |
| 149 | |
| 150 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 151 | if (unlikely(!skb)) |
| 152 | goto err_free; |
| 153 | |
| 154 | if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) |
| 155 | goto err_free; |
| 156 | |
| 157 | vhdr = (struct vlan_hdr *) skb->data; |
| 158 | vlan_tci = ntohs(vhdr->h_vlan_TCI); |
| 159 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
| 160 | |
| 161 | skb_pull_rcsum(skb, VLAN_HLEN); |
| 162 | vlan_set_encap_proto(skb, vhdr); |
| 163 | |
| 164 | skb = vlan_check_reorder_header(skb); |
| 165 | if (unlikely(!skb)) |
| 166 | goto err_free; |
| 167 | |
| 168 | return skb; |
| 169 | |
| 170 | err_free: |
| 171 | kfree_skb(skb); |
| 172 | return NULL; |
| 173 | } |