Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* -*- linux-c -*- |
| 2 | * INET 802.1Q VLAN |
| 3 | * Ethernet-type device handling. |
| 4 | * |
| 5 | * Authors: Ben Greear <greearb@candelatech.com> |
| 6 | * Please send support related email to: vlan@scry.wanfear.com |
| 7 | * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html |
| 8 | * |
| 9 | * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com> |
| 10 | * - reset skb->pkt_type on incoming packets when MAC was changed |
| 11 | * - see that changed MAC is saddr for outgoing packets |
| 12 | * Oct 20, 2001: Ard van Breeman: |
| 13 | * - Fix MC-list, finally. |
| 14 | * - Flush MC-list on VLAN destroy. |
| 15 | * |
| 16 | * |
| 17 | * This program is free software; you can redistribute it and/or |
| 18 | * modify it under the terms of the GNU General Public License |
| 19 | * as published by the Free Software Foundation; either version |
| 20 | * 2 of the License, or (at your option) any later version. |
| 21 | */ |
| 22 | |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/mm.h> |
| 25 | #include <linux/in.h> |
| 26 | #include <linux/init.h> |
| 27 | #include <asm/uaccess.h> /* for copy_from_user */ |
| 28 | #include <linux/skbuff.h> |
| 29 | #include <linux/netdevice.h> |
| 30 | #include <linux/etherdevice.h> |
| 31 | #include <net/datalink.h> |
| 32 | #include <net/p8022.h> |
| 33 | #include <net/arp.h> |
| 34 | |
| 35 | #include "vlan.h" |
| 36 | #include "vlanproc.h" |
| 37 | #include <linux/if_vlan.h> |
| 38 | #include <net/ip.h> |
| 39 | |
| 40 | /* |
| 41 | * Rebuild the Ethernet MAC header. This is called after an ARP |
| 42 | * (or in future other address resolution) has completed on this |
| 43 | * sk_buff. We now let ARP fill in the other fields. |
| 44 | * |
| 45 | * This routine CANNOT use cached dst->neigh! |
| 46 | * Really, it is used only when dst->neigh is wrong. |
| 47 | * |
| 48 | * TODO: This needs a checkup, I'm ignorant here. --BLG |
| 49 | */ |
| 50 | int vlan_dev_rebuild_header(struct sk_buff *skb) |
| 51 | { |
| 52 | struct net_device *dev = skb->dev; |
| 53 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); |
| 54 | |
| 55 | switch (veth->h_vlan_encapsulated_proto) { |
| 56 | #ifdef CONFIG_INET |
| 57 | case __constant_htons(ETH_P_IP): |
| 58 | |
| 59 | /* TODO: Confirm this will work with VLAN headers... */ |
| 60 | return arp_find(veth->h_dest, skb); |
| 61 | #endif |
| 62 | default: |
| 63 | printk(VLAN_DBG |
| 64 | "%s: unable to resolve type %X addresses.\n", |
| 65 | dev->name, (int)veth->h_vlan_encapsulated_proto); |
| 66 | |
| 67 | memcpy(veth->h_source, dev->dev_addr, ETH_ALEN); |
| 68 | break; |
| 69 | }; |
| 70 | |
| 71 | return 0; |
| 72 | } |
| 73 | |
| 74 | static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) |
| 75 | { |
| 76 | if (VLAN_DEV_INFO(skb->dev)->flags & 1) { |
| 77 | if (skb_shared(skb) || skb_cloned(skb)) { |
| 78 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); |
| 79 | kfree_skb(skb); |
| 80 | skb = nskb; |
| 81 | } |
| 82 | if (skb) { |
| 83 | /* Lifted from Gleb's VLAN code... */ |
| 84 | memmove(skb->data - ETH_HLEN, |
| 85 | skb->data - VLAN_ETH_HLEN, 12); |
| 86 | skb->mac.raw += VLAN_HLEN; |
| 87 | } |
| 88 | } |
| 89 | |
| 90 | return skb; |
| 91 | } |
| 92 | |
| 93 | /* |
| 94 | * Determine the packet's protocol ID. The rule here is that we |
| 95 | * assume 802.3 if the type field is short enough to be a length. |
| 96 | * This is normal practice and works for any 'now in use' protocol. |
| 97 | * |
| 98 | * Also, at this point we assume that we ARE dealing exclusively with |
| 99 | * VLAN packets, or packets that should be made into VLAN packets based |
| 100 | * on a default VLAN ID. |
| 101 | * |
| 102 | * NOTE: Should be similar to ethernet/eth.c. |
| 103 | * |
| 104 | * SANITY NOTE: This method is called when a packet is moving up the stack |
| 105 | * towards userland. To get here, it would have already passed |
| 106 | * through the ethernet/eth.c eth_type_trans() method. |
| 107 | * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be |
| 108 | * stored UNALIGNED in the memory. RISC systems don't like |
| 109 | * such cases very much... |
| 110 | * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be aligned, |
| 111 | * so there doesn't need to be any of the unaligned stuff. It has |
| 112 | * been commented out now... --Ben |
| 113 | * |
| 114 | */ |
| 115 | int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, |
David S. Miller | f2ccd8f | 2005-08-09 19:34:12 -0700 | [diff] [blame] | 116 | struct packet_type* ptype, struct net_device *orig_dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | { |
| 118 | unsigned char *rawp = NULL; |
| 119 | struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data); |
| 120 | unsigned short vid; |
| 121 | struct net_device_stats *stats; |
| 122 | unsigned short vlan_TCI; |
Alexey Dobriyan | 3c3f8f2 | 2005-09-19 15:41:28 -0700 | [diff] [blame] | 123 | __be16 proto; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | |
| 125 | /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */ |
| 126 | vlan_TCI = ntohs(vhdr->h_vlan_TCI); |
| 127 | |
| 128 | vid = (vlan_TCI & VLAN_VID_MASK); |
| 129 | |
| 130 | #ifdef VLAN_DEBUG |
| 131 | printk(VLAN_DBG "%s: skb: %p vlan_id: %hx\n", |
| 132 | __FUNCTION__, skb, vid); |
| 133 | #endif |
| 134 | |
| 135 | /* Ok, we will find the correct VLAN device, strip the header, |
| 136 | * and then go on as usual. |
| 137 | */ |
| 138 | |
| 139 | /* We have 12 bits of vlan ID. |
| 140 | * |
| 141 | * We must not drop allow preempt until we hold a |
| 142 | * reference to the device (netif_rx does that) or we |
| 143 | * fail. |
| 144 | */ |
| 145 | |
| 146 | rcu_read_lock(); |
| 147 | skb->dev = __find_vlan_dev(dev, vid); |
| 148 | if (!skb->dev) { |
| 149 | rcu_read_unlock(); |
| 150 | |
| 151 | #ifdef VLAN_DEBUG |
| 152 | printk(VLAN_DBG "%s: ERROR: No net_device for VID: %i on dev: %s [%i]\n", |
| 153 | __FUNCTION__, (unsigned int)(vid), dev->name, dev->ifindex); |
| 154 | #endif |
| 155 | kfree_skb(skb); |
| 156 | return -1; |
| 157 | } |
| 158 | |
| 159 | skb->dev->last_rx = jiffies; |
| 160 | |
| 161 | /* Bump the rx counters for the VLAN device. */ |
| 162 | stats = vlan_dev_get_stats(skb->dev); |
| 163 | stats->rx_packets++; |
| 164 | stats->rx_bytes += skb->len; |
| 165 | |
Herbert Xu | cbb042f9 | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 166 | /* Take off the VLAN header (4 bytes currently) */ |
| 167 | skb_pull_rcsum(skb, VLAN_HLEN); |
Stephen Hemminger | a388442 | 2005-12-14 16:23:16 -0800 | [diff] [blame] | 168 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | /* Ok, lets check to make sure the device (dev) we |
| 170 | * came in on is what this VLAN is attached to. |
| 171 | */ |
| 172 | |
| 173 | if (dev != VLAN_DEV_INFO(skb->dev)->real_dev) { |
| 174 | rcu_read_unlock(); |
| 175 | |
| 176 | #ifdef VLAN_DEBUG |
| 177 | printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s real_dev: %s, skb_dev: %s\n", |
| 178 | __FUNCTION__, skb, dev->name, |
| 179 | VLAN_DEV_INFO(skb->dev)->real_dev->name, |
| 180 | skb->dev->name); |
| 181 | #endif |
| 182 | kfree_skb(skb); |
| 183 | stats->rx_errors++; |
| 184 | return -1; |
| 185 | } |
| 186 | |
| 187 | /* |
| 188 | * Deal with ingress priority mapping. |
| 189 | */ |
| 190 | skb->priority = vlan_get_ingress_priority(skb->dev, ntohs(vhdr->h_vlan_TCI)); |
| 191 | |
| 192 | #ifdef VLAN_DEBUG |
| 193 | printk(VLAN_DBG "%s: priority: %lu for TCI: %hu (hbo)\n", |
| 194 | __FUNCTION__, (unsigned long)(skb->priority), |
| 195 | ntohs(vhdr->h_vlan_TCI)); |
| 196 | #endif |
| 197 | |
| 198 | /* The ethernet driver already did the pkt_type calculations |
| 199 | * for us... |
| 200 | */ |
| 201 | switch (skb->pkt_type) { |
| 202 | case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ |
| 203 | // stats->broadcast ++; // no such counter :-( |
| 204 | break; |
| 205 | |
| 206 | case PACKET_MULTICAST: |
| 207 | stats->multicast++; |
| 208 | break; |
| 209 | |
| 210 | case PACKET_OTHERHOST: |
| 211 | /* Our lower layer thinks this is not local, let's make sure. |
| 212 | * This allows the VLAN to have a different MAC than the underlying |
| 213 | * device, and still route correctly. |
| 214 | */ |
Kris Katterjohn | d3f4a68 | 2006-01-09 16:01:43 -0800 | [diff] [blame] | 215 | if (!compare_ether_addr(eth_hdr(skb)->h_dest, skb->dev->dev_addr)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | /* It is for our (changed) MAC-address! */ |
| 217 | skb->pkt_type = PACKET_HOST; |
| 218 | } |
| 219 | break; |
| 220 | default: |
| 221 | break; |
| 222 | }; |
| 223 | |
| 224 | /* Was a VLAN packet, grab the encapsulated protocol, which the layer |
| 225 | * three protocols care about. |
| 226 | */ |
| 227 | /* proto = get_unaligned(&vhdr->h_vlan_encapsulated_proto); */ |
| 228 | proto = vhdr->h_vlan_encapsulated_proto; |
| 229 | |
| 230 | skb->protocol = proto; |
| 231 | if (ntohs(proto) >= 1536) { |
| 232 | /* place it back on the queue to be handled by |
| 233 | * true layer 3 protocols. |
| 234 | */ |
| 235 | |
| 236 | /* See if we are configured to re-write the VLAN header |
| 237 | * to make it look like ethernet... |
| 238 | */ |
| 239 | skb = vlan_check_reorder_header(skb); |
| 240 | |
| 241 | /* Can be null if skb-clone fails when re-ordering */ |
| 242 | if (skb) { |
| 243 | netif_rx(skb); |
| 244 | } else { |
| 245 | /* TODO: Add a more specific counter here. */ |
| 246 | stats->rx_errors++; |
| 247 | } |
| 248 | rcu_read_unlock(); |
| 249 | return 0; |
| 250 | } |
| 251 | |
| 252 | rawp = skb->data; |
| 253 | |
| 254 | /* |
| 255 | * This is a magic hack to spot IPX packets. Older Novell breaks |
| 256 | * the protocol design and runs IPX over 802.3 without an 802.2 LLC |
| 257 | * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This |
| 258 | * won't work for fault tolerant netware but does for the rest. |
| 259 | */ |
| 260 | if (*(unsigned short *)rawp == 0xFFFF) { |
| 261 | skb->protocol = __constant_htons(ETH_P_802_3); |
| 262 | /* place it back on the queue to be handled by true layer 3 protocols. |
| 263 | */ |
| 264 | |
| 265 | /* See if we are configured to re-write the VLAN header |
| 266 | * to make it look like ethernet... |
| 267 | */ |
| 268 | skb = vlan_check_reorder_header(skb); |
| 269 | |
| 270 | /* Can be null if skb-clone fails when re-ordering */ |
| 271 | if (skb) { |
| 272 | netif_rx(skb); |
| 273 | } else { |
| 274 | /* TODO: Add a more specific counter here. */ |
| 275 | stats->rx_errors++; |
| 276 | } |
| 277 | rcu_read_unlock(); |
| 278 | return 0; |
| 279 | } |
| 280 | |
| 281 | /* |
| 282 | * Real 802.2 LLC |
| 283 | */ |
| 284 | skb->protocol = __constant_htons(ETH_P_802_2); |
| 285 | /* place it back on the queue to be handled by upper layer protocols. |
| 286 | */ |
| 287 | |
| 288 | /* See if we are configured to re-write the VLAN header |
| 289 | * to make it look like ethernet... |
| 290 | */ |
| 291 | skb = vlan_check_reorder_header(skb); |
| 292 | |
| 293 | /* Can be null if skb-clone fails when re-ordering */ |
| 294 | if (skb) { |
| 295 | netif_rx(skb); |
| 296 | } else { |
| 297 | /* TODO: Add a more specific counter here. */ |
| 298 | stats->rx_errors++; |
| 299 | } |
| 300 | rcu_read_unlock(); |
| 301 | return 0; |
| 302 | } |
| 303 | |
| 304 | static inline unsigned short vlan_dev_get_egress_qos_mask(struct net_device* dev, |
| 305 | struct sk_buff* skb) |
| 306 | { |
| 307 | struct vlan_priority_tci_mapping *mp = |
| 308 | VLAN_DEV_INFO(dev)->egress_priority_map[(skb->priority & 0xF)]; |
| 309 | |
| 310 | while (mp) { |
| 311 | if (mp->priority == skb->priority) { |
| 312 | return mp->vlan_qos; /* This should already be shifted to mask |
| 313 | * correctly with the VLAN's TCI |
| 314 | */ |
| 315 | } |
| 316 | mp = mp->next; |
| 317 | } |
| 318 | return 0; |
| 319 | } |
| 320 | |
| 321 | /* |
| 322 | * Create the VLAN header for an arbitrary protocol layer |
| 323 | * |
| 324 | * saddr=NULL means use device source address |
| 325 | * daddr=NULL means leave destination address (eg unresolved arp) |
| 326 | * |
| 327 | * This is called when the SKB is moving down the stack towards the |
| 328 | * physical devices. |
| 329 | */ |
| 330 | int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
| 331 | unsigned short type, void *daddr, void *saddr, |
| 332 | unsigned len) |
| 333 | { |
| 334 | struct vlan_hdr *vhdr; |
| 335 | unsigned short veth_TCI = 0; |
| 336 | int rc = 0; |
| 337 | int build_vlan_header = 0; |
| 338 | struct net_device *vdev = dev; /* save this for the bottom of the method */ |
| 339 | |
| 340 | #ifdef VLAN_DEBUG |
| 341 | printk(VLAN_DBG "%s: skb: %p type: %hx len: %x vlan_id: %hx, daddr: %p\n", |
| 342 | __FUNCTION__, skb, type, len, VLAN_DEV_INFO(dev)->vlan_id, daddr); |
| 343 | #endif |
| 344 | |
| 345 | /* build vlan header only if re_order_header flag is NOT set. This |
| 346 | * fixes some programs that get confused when they see a VLAN device |
| 347 | * sending a frame that is VLAN encoded (the consensus is that the VLAN |
| 348 | * device should look completely like an Ethernet device when the |
| 349 | * REORDER_HEADER flag is set) The drawback to this is some extra |
| 350 | * header shuffling in the hard_start_xmit. Users can turn off this |
| 351 | * REORDER behaviour with the vconfig tool. |
| 352 | */ |
| 353 | build_vlan_header = ((VLAN_DEV_INFO(dev)->flags & 1) == 0); |
| 354 | |
| 355 | if (build_vlan_header) { |
| 356 | vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); |
| 357 | |
| 358 | /* build the four bytes that make this a VLAN header. */ |
| 359 | |
| 360 | /* Now, construct the second two bytes. This field looks something |
| 361 | * like: |
| 362 | * usr_priority: 3 bits (high bits) |
| 363 | * CFI 1 bit |
| 364 | * VLAN ID 12 bits (low bits) |
| 365 | * |
| 366 | */ |
| 367 | veth_TCI = VLAN_DEV_INFO(dev)->vlan_id; |
| 368 | veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); |
| 369 | |
| 370 | vhdr->h_vlan_TCI = htons(veth_TCI); |
| 371 | |
| 372 | /* |
| 373 | * Set the protocol type. |
| 374 | * For a packet of type ETH_P_802_3 we put the length in here instead. |
| 375 | * It is up to the 802.2 layer to carry protocol information. |
| 376 | */ |
| 377 | |
| 378 | if (type != ETH_P_802_3) { |
| 379 | vhdr->h_vlan_encapsulated_proto = htons(type); |
| 380 | } else { |
| 381 | vhdr->h_vlan_encapsulated_proto = htons(len); |
| 382 | } |
| 383 | } |
| 384 | |
| 385 | /* Before delegating work to the lower layer, enter our MAC-address */ |
| 386 | if (saddr == NULL) |
| 387 | saddr = dev->dev_addr; |
| 388 | |
| 389 | dev = VLAN_DEV_INFO(dev)->real_dev; |
| 390 | |
| 391 | /* MPLS can send us skbuffs w/out enough space. This check will grow the |
| 392 | * skb if it doesn't have enough headroom. Not a beautiful solution, so |
| 393 | * I'll tick a counter so that users can know it's happening... If they |
| 394 | * care... |
| 395 | */ |
| 396 | |
| 397 | /* NOTE: This may still break if the underlying device is not the final |
| 398 | * device (and thus there are more headers to add...) It should work for |
| 399 | * good-ole-ethernet though. |
| 400 | */ |
| 401 | if (skb_headroom(skb) < dev->hard_header_len) { |
| 402 | struct sk_buff *sk_tmp = skb; |
| 403 | skb = skb_realloc_headroom(sk_tmp, dev->hard_header_len); |
| 404 | kfree_skb(sk_tmp); |
| 405 | if (skb == NULL) { |
| 406 | struct net_device_stats *stats = vlan_dev_get_stats(vdev); |
| 407 | stats->tx_dropped++; |
| 408 | return -ENOMEM; |
| 409 | } |
| 410 | VLAN_DEV_INFO(vdev)->cnt_inc_headroom_on_tx++; |
| 411 | #ifdef VLAN_DEBUG |
| 412 | printk(VLAN_DBG "%s: %s: had to grow skb.\n", __FUNCTION__, vdev->name); |
| 413 | #endif |
| 414 | } |
| 415 | |
| 416 | if (build_vlan_header) { |
| 417 | /* Now make the underlying real hard header */ |
| 418 | rc = dev->hard_header(skb, dev, ETH_P_8021Q, daddr, saddr, len + VLAN_HLEN); |
| 419 | |
| 420 | if (rc > 0) { |
| 421 | rc += VLAN_HLEN; |
| 422 | } else if (rc < 0) { |
| 423 | rc -= VLAN_HLEN; |
| 424 | } |
| 425 | } else { |
| 426 | /* If here, then we'll just make a normal looking ethernet frame, |
| 427 | * but, the hard_start_xmit method will insert the tag (it has to |
| 428 | * be able to do this for bridged and other skbs that don't come |
| 429 | * down the protocol stack in an orderly manner. |
| 430 | */ |
| 431 | rc = dev->hard_header(skb, dev, type, daddr, saddr, len); |
| 432 | } |
| 433 | |
| 434 | return rc; |
| 435 | } |
| 436 | |
| 437 | int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 438 | { |
| 439 | struct net_device_stats *stats = vlan_dev_get_stats(dev); |
| 440 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); |
| 441 | |
| 442 | /* Handle non-VLAN frames if they are sent to us, for example by DHCP. |
| 443 | * |
| 444 | * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING |
| 445 | * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... |
| 446 | */ |
| 447 | |
| 448 | if (veth->h_vlan_proto != __constant_htons(ETH_P_8021Q)) { |
| 449 | int orig_headroom = skb_headroom(skb); |
| 450 | unsigned short veth_TCI; |
| 451 | |
| 452 | /* This is not a VLAN frame...but we can fix that! */ |
| 453 | VLAN_DEV_INFO(dev)->cnt_encap_on_xmit++; |
| 454 | |
| 455 | #ifdef VLAN_DEBUG |
| 456 | printk(VLAN_DBG "%s: proto to encap: 0x%hx (hbo)\n", |
| 457 | __FUNCTION__, htons(veth->h_vlan_proto)); |
| 458 | #endif |
| 459 | /* Construct the second two bytes. This field looks something |
| 460 | * like: |
| 461 | * usr_priority: 3 bits (high bits) |
| 462 | * CFI 1 bit |
| 463 | * VLAN ID 12 bits (low bits) |
| 464 | */ |
| 465 | veth_TCI = VLAN_DEV_INFO(dev)->vlan_id; |
| 466 | veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); |
| 467 | |
| 468 | skb = __vlan_put_tag(skb, veth_TCI); |
| 469 | if (!skb) { |
| 470 | stats->tx_dropped++; |
| 471 | return 0; |
| 472 | } |
| 473 | |
| 474 | if (orig_headroom < VLAN_HLEN) { |
| 475 | VLAN_DEV_INFO(dev)->cnt_inc_headroom_on_tx++; |
| 476 | } |
| 477 | } |
| 478 | |
| 479 | #ifdef VLAN_DEBUG |
| 480 | printk(VLAN_DBG "%s: about to send skb: %p to dev: %s\n", |
| 481 | __FUNCTION__, skb, skb->dev->name); |
| 482 | printk(VLAN_DBG " %2hx.%2hx.%2hx.%2xh.%2hx.%2hx %2hx.%2hx.%2hx.%2hx.%2hx.%2hx %4hx %4hx %4hx\n", |
| 483 | veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], veth->h_dest[3], veth->h_dest[4], veth->h_dest[5], |
| 484 | veth->h_source[0], veth->h_source[1], veth->h_source[2], veth->h_source[3], veth->h_source[4], veth->h_source[5], |
| 485 | veth->h_vlan_proto, veth->h_vlan_TCI, veth->h_vlan_encapsulated_proto); |
| 486 | #endif |
| 487 | |
| 488 | stats->tx_packets++; /* for statics only */ |
| 489 | stats->tx_bytes += skb->len; |
| 490 | |
| 491 | skb->dev = VLAN_DEV_INFO(dev)->real_dev; |
| 492 | dev_queue_xmit(skb); |
| 493 | |
| 494 | return 0; |
| 495 | } |
| 496 | |
| 497 | int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 498 | { |
| 499 | struct net_device_stats *stats = vlan_dev_get_stats(dev); |
| 500 | unsigned short veth_TCI; |
| 501 | |
| 502 | /* Construct the second two bytes. This field looks something |
| 503 | * like: |
| 504 | * usr_priority: 3 bits (high bits) |
| 505 | * CFI 1 bit |
| 506 | * VLAN ID 12 bits (low bits) |
| 507 | */ |
| 508 | veth_TCI = VLAN_DEV_INFO(dev)->vlan_id; |
| 509 | veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); |
| 510 | skb = __vlan_hwaccel_put_tag(skb, veth_TCI); |
| 511 | |
| 512 | stats->tx_packets++; |
| 513 | stats->tx_bytes += skb->len; |
| 514 | |
| 515 | skb->dev = VLAN_DEV_INFO(dev)->real_dev; |
| 516 | dev_queue_xmit(skb); |
| 517 | |
| 518 | return 0; |
| 519 | } |
| 520 | |
| 521 | int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) |
| 522 | { |
| 523 | /* TODO: gotta make sure the underlying layer can handle it, |
| 524 | * maybe an IFF_VLAN_CAPABLE flag for devices? |
| 525 | */ |
| 526 | if (VLAN_DEV_INFO(dev)->real_dev->mtu < new_mtu) |
| 527 | return -ERANGE; |
| 528 | |
| 529 | dev->mtu = new_mtu; |
| 530 | |
| 531 | return 0; |
| 532 | } |
| 533 | |
| 534 | int vlan_dev_set_ingress_priority(char *dev_name, __u32 skb_prio, short vlan_prio) |
| 535 | { |
| 536 | struct net_device *dev = dev_get_by_name(dev_name); |
| 537 | |
| 538 | if (dev) { |
| 539 | if (dev->priv_flags & IFF_802_1Q_VLAN) { |
| 540 | /* see if a priority mapping exists.. */ |
| 541 | VLAN_DEV_INFO(dev)->ingress_priority_map[vlan_prio & 0x7] = skb_prio; |
| 542 | dev_put(dev); |
| 543 | return 0; |
| 544 | } |
| 545 | |
| 546 | dev_put(dev); |
| 547 | } |
| 548 | return -EINVAL; |
| 549 | } |
| 550 | |
| 551 | int vlan_dev_set_egress_priority(char *dev_name, __u32 skb_prio, short vlan_prio) |
| 552 | { |
| 553 | struct net_device *dev = dev_get_by_name(dev_name); |
| 554 | struct vlan_priority_tci_mapping *mp = NULL; |
| 555 | struct vlan_priority_tci_mapping *np; |
| 556 | |
| 557 | if (dev) { |
| 558 | if (dev->priv_flags & IFF_802_1Q_VLAN) { |
| 559 | /* See if a priority mapping exists.. */ |
| 560 | mp = VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF]; |
| 561 | while (mp) { |
| 562 | if (mp->priority == skb_prio) { |
| 563 | mp->vlan_qos = ((vlan_prio << 13) & 0xE000); |
| 564 | dev_put(dev); |
| 565 | return 0; |
| 566 | } |
| 567 | mp = mp->next; |
| 568 | } |
| 569 | |
| 570 | /* Create a new mapping then. */ |
| 571 | mp = VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF]; |
| 572 | np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL); |
| 573 | if (np) { |
| 574 | np->next = mp; |
| 575 | np->priority = skb_prio; |
| 576 | np->vlan_qos = ((vlan_prio << 13) & 0xE000); |
| 577 | VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF] = np; |
| 578 | dev_put(dev); |
| 579 | return 0; |
| 580 | } else { |
| 581 | dev_put(dev); |
| 582 | return -ENOBUFS; |
| 583 | } |
| 584 | } |
| 585 | dev_put(dev); |
| 586 | } |
| 587 | return -EINVAL; |
| 588 | } |
| 589 | |
| 590 | /* Flags are defined in the vlan_dev_info class in include/linux/if_vlan.h file. */ |
| 591 | int vlan_dev_set_vlan_flag(char *dev_name, __u32 flag, short flag_val) |
| 592 | { |
| 593 | struct net_device *dev = dev_get_by_name(dev_name); |
| 594 | |
| 595 | if (dev) { |
| 596 | if (dev->priv_flags & IFF_802_1Q_VLAN) { |
| 597 | /* verify flag is supported */ |
| 598 | if (flag == 1) { |
| 599 | if (flag_val) { |
| 600 | VLAN_DEV_INFO(dev)->flags |= 1; |
| 601 | } else { |
| 602 | VLAN_DEV_INFO(dev)->flags &= ~1; |
| 603 | } |
| 604 | dev_put(dev); |
| 605 | return 0; |
| 606 | } else { |
| 607 | printk(KERN_ERR "%s: flag %i is not valid.\n", |
| 608 | __FUNCTION__, (int)(flag)); |
| 609 | dev_put(dev); |
| 610 | return -EINVAL; |
| 611 | } |
| 612 | } else { |
| 613 | printk(KERN_ERR |
| 614 | "%s: %s is not a vlan device, priv_flags: %hX.\n", |
| 615 | __FUNCTION__, dev->name, dev->priv_flags); |
| 616 | dev_put(dev); |
| 617 | } |
| 618 | } else { |
| 619 | printk(KERN_ERR "%s: Could not find device: %s\n", |
| 620 | __FUNCTION__, dev_name); |
| 621 | } |
| 622 | |
| 623 | return -EINVAL; |
| 624 | } |
| 625 | |
| 626 | |
| 627 | int vlan_dev_get_realdev_name(const char *dev_name, char* result) |
| 628 | { |
| 629 | struct net_device *dev = dev_get_by_name(dev_name); |
| 630 | int rv = 0; |
| 631 | if (dev) { |
| 632 | if (dev->priv_flags & IFF_802_1Q_VLAN) { |
| 633 | strncpy(result, VLAN_DEV_INFO(dev)->real_dev->name, 23); |
| 634 | rv = 0; |
| 635 | } else { |
| 636 | rv = -EINVAL; |
| 637 | } |
| 638 | dev_put(dev); |
| 639 | } else { |
| 640 | rv = -ENODEV; |
| 641 | } |
| 642 | return rv; |
| 643 | } |
| 644 | |
| 645 | int vlan_dev_get_vid(const char *dev_name, unsigned short* result) |
| 646 | { |
| 647 | struct net_device *dev = dev_get_by_name(dev_name); |
| 648 | int rv = 0; |
| 649 | if (dev) { |
| 650 | if (dev->priv_flags & IFF_802_1Q_VLAN) { |
| 651 | *result = VLAN_DEV_INFO(dev)->vlan_id; |
| 652 | rv = 0; |
| 653 | } else { |
| 654 | rv = -EINVAL; |
| 655 | } |
| 656 | dev_put(dev); |
| 657 | } else { |
| 658 | rv = -ENODEV; |
| 659 | } |
| 660 | return rv; |
| 661 | } |
| 662 | |
| 663 | |
| 664 | int vlan_dev_set_mac_address(struct net_device *dev, void *addr_struct_p) |
| 665 | { |
| 666 | struct sockaddr *addr = (struct sockaddr *)(addr_struct_p); |
| 667 | int i; |
| 668 | |
| 669 | if (netif_running(dev)) |
| 670 | return -EBUSY; |
| 671 | |
| 672 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
| 673 | |
| 674 | printk("%s: Setting MAC address to ", dev->name); |
| 675 | for (i = 0; i < 6; i++) |
| 676 | printk(" %2.2x", dev->dev_addr[i]); |
| 677 | printk(".\n"); |
| 678 | |
| 679 | if (memcmp(VLAN_DEV_INFO(dev)->real_dev->dev_addr, |
| 680 | dev->dev_addr, |
| 681 | dev->addr_len) != 0) { |
| 682 | if (!(VLAN_DEV_INFO(dev)->real_dev->flags & IFF_PROMISC)) { |
| 683 | int flgs = VLAN_DEV_INFO(dev)->real_dev->flags; |
| 684 | |
| 685 | /* Increment our in-use promiscuity counter */ |
| 686 | dev_set_promiscuity(VLAN_DEV_INFO(dev)->real_dev, 1); |
| 687 | |
| 688 | /* Make PROMISC visible to the user. */ |
| 689 | flgs |= IFF_PROMISC; |
| 690 | printk("VLAN (%s): Setting underlying device (%s) to promiscious mode.\n", |
| 691 | dev->name, VLAN_DEV_INFO(dev)->real_dev->name); |
| 692 | dev_change_flags(VLAN_DEV_INFO(dev)->real_dev, flgs); |
| 693 | } |
| 694 | } else { |
| 695 | printk("VLAN (%s): Underlying device (%s) has same MAC, not checking promiscious mode.\n", |
| 696 | dev->name, VLAN_DEV_INFO(dev)->real_dev->name); |
| 697 | } |
| 698 | |
| 699 | return 0; |
| 700 | } |
| 701 | |
| 702 | static inline int vlan_dmi_equals(struct dev_mc_list *dmi1, |
| 703 | struct dev_mc_list *dmi2) |
| 704 | { |
| 705 | return ((dmi1->dmi_addrlen == dmi2->dmi_addrlen) && |
| 706 | (memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0)); |
| 707 | } |
| 708 | |
| 709 | /** dmi is a single entry into a dev_mc_list, a single node. mc_list is |
| 710 | * an entire list, and we'll iterate through it. |
| 711 | */ |
| 712 | static int vlan_should_add_mc(struct dev_mc_list *dmi, struct dev_mc_list *mc_list) |
| 713 | { |
| 714 | struct dev_mc_list *idmi; |
| 715 | |
| 716 | for (idmi = mc_list; idmi != NULL; ) { |
| 717 | if (vlan_dmi_equals(dmi, idmi)) { |
| 718 | if (dmi->dmi_users > idmi->dmi_users) |
| 719 | return 1; |
| 720 | else |
| 721 | return 0; |
| 722 | } else { |
| 723 | idmi = idmi->next; |
| 724 | } |
| 725 | } |
| 726 | |
| 727 | return 1; |
| 728 | } |
| 729 | |
| 730 | static inline void vlan_destroy_mc_list(struct dev_mc_list *mc_list) |
| 731 | { |
| 732 | struct dev_mc_list *dmi = mc_list; |
| 733 | struct dev_mc_list *next; |
| 734 | |
| 735 | while(dmi) { |
| 736 | next = dmi->next; |
| 737 | kfree(dmi); |
| 738 | dmi = next; |
| 739 | } |
| 740 | } |
| 741 | |
| 742 | static void vlan_copy_mc_list(struct dev_mc_list *mc_list, struct vlan_dev_info *vlan_info) |
| 743 | { |
| 744 | struct dev_mc_list *dmi, *new_dmi; |
| 745 | |
| 746 | vlan_destroy_mc_list(vlan_info->old_mc_list); |
| 747 | vlan_info->old_mc_list = NULL; |
| 748 | |
| 749 | for (dmi = mc_list; dmi != NULL; dmi = dmi->next) { |
| 750 | new_dmi = kmalloc(sizeof(*new_dmi), GFP_ATOMIC); |
| 751 | if (new_dmi == NULL) { |
| 752 | printk(KERN_ERR "vlan: cannot allocate memory. " |
| 753 | "Multicast may not work properly from now.\n"); |
| 754 | return; |
| 755 | } |
| 756 | |
| 757 | /* Copy whole structure, then make new 'next' pointer */ |
| 758 | *new_dmi = *dmi; |
| 759 | new_dmi->next = vlan_info->old_mc_list; |
| 760 | vlan_info->old_mc_list = new_dmi; |
| 761 | } |
| 762 | } |
| 763 | |
| 764 | static void vlan_flush_mc_list(struct net_device *dev) |
| 765 | { |
| 766 | struct dev_mc_list *dmi = dev->mc_list; |
| 767 | |
| 768 | while (dmi) { |
| 769 | printk(KERN_DEBUG "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from vlan interface\n", |
| 770 | dev->name, |
| 771 | dmi->dmi_addr[0], |
| 772 | dmi->dmi_addr[1], |
| 773 | dmi->dmi_addr[2], |
| 774 | dmi->dmi_addr[3], |
| 775 | dmi->dmi_addr[4], |
| 776 | dmi->dmi_addr[5]); |
| 777 | dev_mc_delete(dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); |
| 778 | dmi = dev->mc_list; |
| 779 | } |
| 780 | |
| 781 | /* dev->mc_list is NULL by the time we get here. */ |
| 782 | vlan_destroy_mc_list(VLAN_DEV_INFO(dev)->old_mc_list); |
| 783 | VLAN_DEV_INFO(dev)->old_mc_list = NULL; |
| 784 | } |
| 785 | |
| 786 | int vlan_dev_open(struct net_device *dev) |
| 787 | { |
| 788 | if (!(VLAN_DEV_INFO(dev)->real_dev->flags & IFF_UP)) |
| 789 | return -ENETDOWN; |
| 790 | |
| 791 | return 0; |
| 792 | } |
| 793 | |
| 794 | int vlan_dev_stop(struct net_device *dev) |
| 795 | { |
| 796 | vlan_flush_mc_list(dev); |
| 797 | return 0; |
| 798 | } |
| 799 | |
| 800 | int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
| 801 | { |
| 802 | struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev; |
| 803 | struct ifreq ifrr; |
| 804 | int err = -EOPNOTSUPP; |
| 805 | |
| 806 | strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ); |
| 807 | ifrr.ifr_ifru = ifr->ifr_ifru; |
| 808 | |
| 809 | switch(cmd) { |
| 810 | case SIOCGMIIPHY: |
| 811 | case SIOCGMIIREG: |
| 812 | case SIOCSMIIREG: |
| 813 | if (real_dev->do_ioctl && netif_device_present(real_dev)) |
| 814 | err = real_dev->do_ioctl(real_dev, &ifrr, cmd); |
| 815 | break; |
| 816 | |
| 817 | case SIOCETHTOOL: |
| 818 | err = dev_ethtool(&ifrr); |
| 819 | } |
| 820 | |
| 821 | if (!err) |
| 822 | ifr->ifr_ifru = ifrr.ifr_ifru; |
| 823 | |
| 824 | return err; |
| 825 | } |
| 826 | |
| 827 | /** Taken from Gleb + Lennert's VLAN code, and modified... */ |
| 828 | void vlan_dev_set_multicast_list(struct net_device *vlan_dev) |
| 829 | { |
| 830 | struct dev_mc_list *dmi; |
| 831 | struct net_device *real_dev; |
| 832 | int inc; |
| 833 | |
| 834 | if (vlan_dev && (vlan_dev->priv_flags & IFF_802_1Q_VLAN)) { |
| 835 | /* Then it's a real vlan device, as far as we can tell.. */ |
| 836 | real_dev = VLAN_DEV_INFO(vlan_dev)->real_dev; |
| 837 | |
| 838 | /* compare the current promiscuity to the last promisc we had.. */ |
| 839 | inc = vlan_dev->promiscuity - VLAN_DEV_INFO(vlan_dev)->old_promiscuity; |
| 840 | if (inc) { |
| 841 | printk(KERN_INFO "%s: dev_set_promiscuity(master, %d)\n", |
| 842 | vlan_dev->name, inc); |
| 843 | dev_set_promiscuity(real_dev, inc); /* found in dev.c */ |
| 844 | VLAN_DEV_INFO(vlan_dev)->old_promiscuity = vlan_dev->promiscuity; |
| 845 | } |
| 846 | |
| 847 | inc = vlan_dev->allmulti - VLAN_DEV_INFO(vlan_dev)->old_allmulti; |
| 848 | if (inc) { |
| 849 | printk(KERN_INFO "%s: dev_set_allmulti(master, %d)\n", |
| 850 | vlan_dev->name, inc); |
| 851 | dev_set_allmulti(real_dev, inc); /* dev.c */ |
| 852 | VLAN_DEV_INFO(vlan_dev)->old_allmulti = vlan_dev->allmulti; |
| 853 | } |
| 854 | |
| 855 | /* looking for addresses to add to master's list */ |
| 856 | for (dmi = vlan_dev->mc_list; dmi != NULL; dmi = dmi->next) { |
| 857 | if (vlan_should_add_mc(dmi, VLAN_DEV_INFO(vlan_dev)->old_mc_list)) { |
| 858 | dev_mc_add(real_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); |
| 859 | printk(KERN_DEBUG "%s: add %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address to master interface\n", |
| 860 | vlan_dev->name, |
| 861 | dmi->dmi_addr[0], |
| 862 | dmi->dmi_addr[1], |
| 863 | dmi->dmi_addr[2], |
| 864 | dmi->dmi_addr[3], |
| 865 | dmi->dmi_addr[4], |
| 866 | dmi->dmi_addr[5]); |
| 867 | } |
| 868 | } |
| 869 | |
| 870 | /* looking for addresses to delete from master's list */ |
| 871 | for (dmi = VLAN_DEV_INFO(vlan_dev)->old_mc_list; dmi != NULL; dmi = dmi->next) { |
| 872 | if (vlan_should_add_mc(dmi, vlan_dev->mc_list)) { |
| 873 | /* if we think we should add it to the new list, then we should really |
| 874 | * delete it from the real list on the underlying device. |
| 875 | */ |
| 876 | dev_mc_delete(real_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); |
| 877 | printk(KERN_DEBUG "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from master interface\n", |
| 878 | vlan_dev->name, |
| 879 | dmi->dmi_addr[0], |
| 880 | dmi->dmi_addr[1], |
| 881 | dmi->dmi_addr[2], |
| 882 | dmi->dmi_addr[3], |
| 883 | dmi->dmi_addr[4], |
| 884 | dmi->dmi_addr[5]); |
| 885 | } |
| 886 | } |
| 887 | |
| 888 | /* save multicast list */ |
| 889 | vlan_copy_mc_list(vlan_dev->mc_list, VLAN_DEV_INFO(vlan_dev)); |
| 890 | } |
| 891 | } |