blob: 145f5cde96cf7a0ece22d77da07e0c4fc0311e53 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* -*- linux-c -*-
2 * INET 802.1Q VLAN
3 * Ethernet-type device handling.
4 *
5 * Authors: Ben Greear <greearb@candelatech.com>
6 * Please send support related email to: vlan@scry.wanfear.com
7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
8 *
9 * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
10 * - reset skb->pkt_type on incoming packets when MAC was changed
11 * - see that changed MAC is saddr for outgoing packets
12 * Oct 20, 2001: Ard van Breeman:
13 * - Fix MC-list, finally.
14 * - Flush MC-list on VLAN destroy.
15 *
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/in.h>
26#include <linux/init.h>
27#include <asm/uaccess.h> /* for copy_from_user */
28#include <linux/skbuff.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <net/datalink.h>
32#include <net/p8022.h>
33#include <net/arp.h>
34
35#include "vlan.h"
36#include "vlanproc.h"
37#include <linux/if_vlan.h>
38#include <net/ip.h>
39
40/*
41 * Rebuild the Ethernet MAC header. This is called after an ARP
42 * (or in future other address resolution) has completed on this
43 * sk_buff. We now let ARP fill in the other fields.
44 *
45 * This routine CANNOT use cached dst->neigh!
46 * Really, it is used only when dst->neigh is wrong.
47 *
48 * TODO: This needs a checkup, I'm ignorant here. --BLG
49 */
50int vlan_dev_rebuild_header(struct sk_buff *skb)
51{
52 struct net_device *dev = skb->dev;
53 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
54
55 switch (veth->h_vlan_encapsulated_proto) {
56#ifdef CONFIG_INET
57 case __constant_htons(ETH_P_IP):
58
59 /* TODO: Confirm this will work with VLAN headers... */
60 return arp_find(veth->h_dest, skb);
61#endif
62 default:
63 printk(VLAN_DBG
64 "%s: unable to resolve type %X addresses.\n",
65 dev->name, (int)veth->h_vlan_encapsulated_proto);
66
67 memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
68 break;
69 };
70
71 return 0;
72}
73
74static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
75{
76 if (VLAN_DEV_INFO(skb->dev)->flags & 1) {
77 if (skb_shared(skb) || skb_cloned(skb)) {
78 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
79 kfree_skb(skb);
80 skb = nskb;
81 }
82 if (skb) {
83 /* Lifted from Gleb's VLAN code... */
84 memmove(skb->data - ETH_HLEN,
85 skb->data - VLAN_ETH_HLEN, 12);
86 skb->mac.raw += VLAN_HLEN;
87 }
88 }
89
90 return skb;
91}
92
93/*
94 * Determine the packet's protocol ID. The rule here is that we
95 * assume 802.3 if the type field is short enough to be a length.
96 * This is normal practice and works for any 'now in use' protocol.
97 *
98 * Also, at this point we assume that we ARE dealing exclusively with
99 * VLAN packets, or packets that should be made into VLAN packets based
100 * on a default VLAN ID.
101 *
102 * NOTE: Should be similar to ethernet/eth.c.
103 *
104 * SANITY NOTE: This method is called when a packet is moving up the stack
105 * towards userland. To get here, it would have already passed
106 * through the ethernet/eth.c eth_type_trans() method.
107 * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
108 * stored UNALIGNED in the memory. RISC systems don't like
109 * such cases very much...
110 * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be aligned,
111 * so there doesn't need to be any of the unaligned stuff. It has
112 * been commented out now... --Ben
113 *
114 */
115int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
David S. Millerf2ccd8f2005-08-09 19:34:12 -0700116 struct packet_type* ptype, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
118 unsigned char *rawp = NULL;
119 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data);
120 unsigned short vid;
121 struct net_device_stats *stats;
122 unsigned short vlan_TCI;
123 unsigned short proto;
124
125 /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */
126 vlan_TCI = ntohs(vhdr->h_vlan_TCI);
127
128 vid = (vlan_TCI & VLAN_VID_MASK);
129
130#ifdef VLAN_DEBUG
131 printk(VLAN_DBG "%s: skb: %p vlan_id: %hx\n",
132 __FUNCTION__, skb, vid);
133#endif
134
135 /* Ok, we will find the correct VLAN device, strip the header,
136 * and then go on as usual.
137 */
138
139 /* We have 12 bits of vlan ID.
140 *
141 * We must not drop allow preempt until we hold a
142 * reference to the device (netif_rx does that) or we
143 * fail.
144 */
145
146 rcu_read_lock();
147 skb->dev = __find_vlan_dev(dev, vid);
148 if (!skb->dev) {
149 rcu_read_unlock();
150
151#ifdef VLAN_DEBUG
152 printk(VLAN_DBG "%s: ERROR: No net_device for VID: %i on dev: %s [%i]\n",
153 __FUNCTION__, (unsigned int)(vid), dev->name, dev->ifindex);
154#endif
155 kfree_skb(skb);
156 return -1;
157 }
158
159 skb->dev->last_rx = jiffies;
160
161 /* Bump the rx counters for the VLAN device. */
162 stats = vlan_dev_get_stats(skb->dev);
163 stats->rx_packets++;
164 stats->rx_bytes += skb->len;
165
166 skb_pull(skb, VLAN_HLEN); /* take off the VLAN header (4 bytes currently) */
167
168 /* Ok, lets check to make sure the device (dev) we
169 * came in on is what this VLAN is attached to.
170 */
171
172 if (dev != VLAN_DEV_INFO(skb->dev)->real_dev) {
173 rcu_read_unlock();
174
175#ifdef VLAN_DEBUG
176 printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s real_dev: %s, skb_dev: %s\n",
177 __FUNCTION__, skb, dev->name,
178 VLAN_DEV_INFO(skb->dev)->real_dev->name,
179 skb->dev->name);
180#endif
181 kfree_skb(skb);
182 stats->rx_errors++;
183 return -1;
184 }
185
186 /*
187 * Deal with ingress priority mapping.
188 */
189 skb->priority = vlan_get_ingress_priority(skb->dev, ntohs(vhdr->h_vlan_TCI));
190
191#ifdef VLAN_DEBUG
192 printk(VLAN_DBG "%s: priority: %lu for TCI: %hu (hbo)\n",
193 __FUNCTION__, (unsigned long)(skb->priority),
194 ntohs(vhdr->h_vlan_TCI));
195#endif
196
197 /* The ethernet driver already did the pkt_type calculations
198 * for us...
199 */
200 switch (skb->pkt_type) {
201 case PACKET_BROADCAST: /* Yeah, stats collect these together.. */
202 // stats->broadcast ++; // no such counter :-(
203 break;
204
205 case PACKET_MULTICAST:
206 stats->multicast++;
207 break;
208
209 case PACKET_OTHERHOST:
210 /* Our lower layer thinks this is not local, let's make sure.
211 * This allows the VLAN to have a different MAC than the underlying
212 * device, and still route correctly.
213 */
214 if (memcmp(eth_hdr(skb)->h_dest, skb->dev->dev_addr, ETH_ALEN) == 0) {
215 /* It is for our (changed) MAC-address! */
216 skb->pkt_type = PACKET_HOST;
217 }
218 break;
219 default:
220 break;
221 };
222
223 /* Was a VLAN packet, grab the encapsulated protocol, which the layer
224 * three protocols care about.
225 */
226 /* proto = get_unaligned(&vhdr->h_vlan_encapsulated_proto); */
227 proto = vhdr->h_vlan_encapsulated_proto;
228
229 skb->protocol = proto;
230 if (ntohs(proto) >= 1536) {
231 /* place it back on the queue to be handled by
232 * true layer 3 protocols.
233 */
234
235 /* See if we are configured to re-write the VLAN header
236 * to make it look like ethernet...
237 */
238 skb = vlan_check_reorder_header(skb);
239
240 /* Can be null if skb-clone fails when re-ordering */
241 if (skb) {
242 netif_rx(skb);
243 } else {
244 /* TODO: Add a more specific counter here. */
245 stats->rx_errors++;
246 }
247 rcu_read_unlock();
248 return 0;
249 }
250
251 rawp = skb->data;
252
253 /*
254 * This is a magic hack to spot IPX packets. Older Novell breaks
255 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
256 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
257 * won't work for fault tolerant netware but does for the rest.
258 */
259 if (*(unsigned short *)rawp == 0xFFFF) {
260 skb->protocol = __constant_htons(ETH_P_802_3);
261 /* place it back on the queue to be handled by true layer 3 protocols.
262 */
263
264 /* See if we are configured to re-write the VLAN header
265 * to make it look like ethernet...
266 */
267 skb = vlan_check_reorder_header(skb);
268
269 /* Can be null if skb-clone fails when re-ordering */
270 if (skb) {
271 netif_rx(skb);
272 } else {
273 /* TODO: Add a more specific counter here. */
274 stats->rx_errors++;
275 }
276 rcu_read_unlock();
277 return 0;
278 }
279
280 /*
281 * Real 802.2 LLC
282 */
283 skb->protocol = __constant_htons(ETH_P_802_2);
284 /* place it back on the queue to be handled by upper layer protocols.
285 */
286
287 /* See if we are configured to re-write the VLAN header
288 * to make it look like ethernet...
289 */
290 skb = vlan_check_reorder_header(skb);
291
292 /* Can be null if skb-clone fails when re-ordering */
293 if (skb) {
294 netif_rx(skb);
295 } else {
296 /* TODO: Add a more specific counter here. */
297 stats->rx_errors++;
298 }
299 rcu_read_unlock();
300 return 0;
301}
302
303static inline unsigned short vlan_dev_get_egress_qos_mask(struct net_device* dev,
304 struct sk_buff* skb)
305{
306 struct vlan_priority_tci_mapping *mp =
307 VLAN_DEV_INFO(dev)->egress_priority_map[(skb->priority & 0xF)];
308
309 while (mp) {
310 if (mp->priority == skb->priority) {
311 return mp->vlan_qos; /* This should already be shifted to mask
312 * correctly with the VLAN's TCI
313 */
314 }
315 mp = mp->next;
316 }
317 return 0;
318}
319
320/*
321 * Create the VLAN header for an arbitrary protocol layer
322 *
323 * saddr=NULL means use device source address
324 * daddr=NULL means leave destination address (eg unresolved arp)
325 *
326 * This is called when the SKB is moving down the stack towards the
327 * physical devices.
328 */
329int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
330 unsigned short type, void *daddr, void *saddr,
331 unsigned len)
332{
333 struct vlan_hdr *vhdr;
334 unsigned short veth_TCI = 0;
335 int rc = 0;
336 int build_vlan_header = 0;
337 struct net_device *vdev = dev; /* save this for the bottom of the method */
338
339#ifdef VLAN_DEBUG
340 printk(VLAN_DBG "%s: skb: %p type: %hx len: %x vlan_id: %hx, daddr: %p\n",
341 __FUNCTION__, skb, type, len, VLAN_DEV_INFO(dev)->vlan_id, daddr);
342#endif
343
344 /* build vlan header only if re_order_header flag is NOT set. This
345 * fixes some programs that get confused when they see a VLAN device
346 * sending a frame that is VLAN encoded (the consensus is that the VLAN
347 * device should look completely like an Ethernet device when the
348 * REORDER_HEADER flag is set) The drawback to this is some extra
349 * header shuffling in the hard_start_xmit. Users can turn off this
350 * REORDER behaviour with the vconfig tool.
351 */
352 build_vlan_header = ((VLAN_DEV_INFO(dev)->flags & 1) == 0);
353
354 if (build_vlan_header) {
355 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
356
357 /* build the four bytes that make this a VLAN header. */
358
359 /* Now, construct the second two bytes. This field looks something
360 * like:
361 * usr_priority: 3 bits (high bits)
362 * CFI 1 bit
363 * VLAN ID 12 bits (low bits)
364 *
365 */
366 veth_TCI = VLAN_DEV_INFO(dev)->vlan_id;
367 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);
368
369 vhdr->h_vlan_TCI = htons(veth_TCI);
370
371 /*
372 * Set the protocol type.
373 * For a packet of type ETH_P_802_3 we put the length in here instead.
374 * It is up to the 802.2 layer to carry protocol information.
375 */
376
377 if (type != ETH_P_802_3) {
378 vhdr->h_vlan_encapsulated_proto = htons(type);
379 } else {
380 vhdr->h_vlan_encapsulated_proto = htons(len);
381 }
382 }
383
384 /* Before delegating work to the lower layer, enter our MAC-address */
385 if (saddr == NULL)
386 saddr = dev->dev_addr;
387
388 dev = VLAN_DEV_INFO(dev)->real_dev;
389
390 /* MPLS can send us skbuffs w/out enough space. This check will grow the
391 * skb if it doesn't have enough headroom. Not a beautiful solution, so
392 * I'll tick a counter so that users can know it's happening... If they
393 * care...
394 */
395
396 /* NOTE: This may still break if the underlying device is not the final
397 * device (and thus there are more headers to add...) It should work for
398 * good-ole-ethernet though.
399 */
400 if (skb_headroom(skb) < dev->hard_header_len) {
401 struct sk_buff *sk_tmp = skb;
402 skb = skb_realloc_headroom(sk_tmp, dev->hard_header_len);
403 kfree_skb(sk_tmp);
404 if (skb == NULL) {
405 struct net_device_stats *stats = vlan_dev_get_stats(vdev);
406 stats->tx_dropped++;
407 return -ENOMEM;
408 }
409 VLAN_DEV_INFO(vdev)->cnt_inc_headroom_on_tx++;
410#ifdef VLAN_DEBUG
411 printk(VLAN_DBG "%s: %s: had to grow skb.\n", __FUNCTION__, vdev->name);
412#endif
413 }
414
415 if (build_vlan_header) {
416 /* Now make the underlying real hard header */
417 rc = dev->hard_header(skb, dev, ETH_P_8021Q, daddr, saddr, len + VLAN_HLEN);
418
419 if (rc > 0) {
420 rc += VLAN_HLEN;
421 } else if (rc < 0) {
422 rc -= VLAN_HLEN;
423 }
424 } else {
425 /* If here, then we'll just make a normal looking ethernet frame,
426 * but, the hard_start_xmit method will insert the tag (it has to
427 * be able to do this for bridged and other skbs that don't come
428 * down the protocol stack in an orderly manner.
429 */
430 rc = dev->hard_header(skb, dev, type, daddr, saddr, len);
431 }
432
433 return rc;
434}
435
436int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
437{
438 struct net_device_stats *stats = vlan_dev_get_stats(dev);
439 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
440
441 /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
442 *
443 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
444 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
445 */
446
447 if (veth->h_vlan_proto != __constant_htons(ETH_P_8021Q)) {
448 int orig_headroom = skb_headroom(skb);
449 unsigned short veth_TCI;
450
451 /* This is not a VLAN frame...but we can fix that! */
452 VLAN_DEV_INFO(dev)->cnt_encap_on_xmit++;
453
454#ifdef VLAN_DEBUG
455 printk(VLAN_DBG "%s: proto to encap: 0x%hx (hbo)\n",
456 __FUNCTION__, htons(veth->h_vlan_proto));
457#endif
458 /* Construct the second two bytes. This field looks something
459 * like:
460 * usr_priority: 3 bits (high bits)
461 * CFI 1 bit
462 * VLAN ID 12 bits (low bits)
463 */
464 veth_TCI = VLAN_DEV_INFO(dev)->vlan_id;
465 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);
466
467 skb = __vlan_put_tag(skb, veth_TCI);
468 if (!skb) {
469 stats->tx_dropped++;
470 return 0;
471 }
472
473 if (orig_headroom < VLAN_HLEN) {
474 VLAN_DEV_INFO(dev)->cnt_inc_headroom_on_tx++;
475 }
476 }
477
478#ifdef VLAN_DEBUG
479 printk(VLAN_DBG "%s: about to send skb: %p to dev: %s\n",
480 __FUNCTION__, skb, skb->dev->name);
481 printk(VLAN_DBG " %2hx.%2hx.%2hx.%2xh.%2hx.%2hx %2hx.%2hx.%2hx.%2hx.%2hx.%2hx %4hx %4hx %4hx\n",
482 veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], veth->h_dest[3], veth->h_dest[4], veth->h_dest[5],
483 veth->h_source[0], veth->h_source[1], veth->h_source[2], veth->h_source[3], veth->h_source[4], veth->h_source[5],
484 veth->h_vlan_proto, veth->h_vlan_TCI, veth->h_vlan_encapsulated_proto);
485#endif
486
487 stats->tx_packets++; /* for statics only */
488 stats->tx_bytes += skb->len;
489
490 skb->dev = VLAN_DEV_INFO(dev)->real_dev;
491 dev_queue_xmit(skb);
492
493 return 0;
494}
495
496int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
497{
498 struct net_device_stats *stats = vlan_dev_get_stats(dev);
499 unsigned short veth_TCI;
500
501 /* Construct the second two bytes. This field looks something
502 * like:
503 * usr_priority: 3 bits (high bits)
504 * CFI 1 bit
505 * VLAN ID 12 bits (low bits)
506 */
507 veth_TCI = VLAN_DEV_INFO(dev)->vlan_id;
508 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);
509 skb = __vlan_hwaccel_put_tag(skb, veth_TCI);
510
511 stats->tx_packets++;
512 stats->tx_bytes += skb->len;
513
514 skb->dev = VLAN_DEV_INFO(dev)->real_dev;
515 dev_queue_xmit(skb);
516
517 return 0;
518}
519
520int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
521{
522 /* TODO: gotta make sure the underlying layer can handle it,
523 * maybe an IFF_VLAN_CAPABLE flag for devices?
524 */
525 if (VLAN_DEV_INFO(dev)->real_dev->mtu < new_mtu)
526 return -ERANGE;
527
528 dev->mtu = new_mtu;
529
530 return 0;
531}
532
533int vlan_dev_set_ingress_priority(char *dev_name, __u32 skb_prio, short vlan_prio)
534{
535 struct net_device *dev = dev_get_by_name(dev_name);
536
537 if (dev) {
538 if (dev->priv_flags & IFF_802_1Q_VLAN) {
539 /* see if a priority mapping exists.. */
540 VLAN_DEV_INFO(dev)->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
541 dev_put(dev);
542 return 0;
543 }
544
545 dev_put(dev);
546 }
547 return -EINVAL;
548}
549
550int vlan_dev_set_egress_priority(char *dev_name, __u32 skb_prio, short vlan_prio)
551{
552 struct net_device *dev = dev_get_by_name(dev_name);
553 struct vlan_priority_tci_mapping *mp = NULL;
554 struct vlan_priority_tci_mapping *np;
555
556 if (dev) {
557 if (dev->priv_flags & IFF_802_1Q_VLAN) {
558 /* See if a priority mapping exists.. */
559 mp = VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF];
560 while (mp) {
561 if (mp->priority == skb_prio) {
562 mp->vlan_qos = ((vlan_prio << 13) & 0xE000);
563 dev_put(dev);
564 return 0;
565 }
566 mp = mp->next;
567 }
568
569 /* Create a new mapping then. */
570 mp = VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF];
571 np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
572 if (np) {
573 np->next = mp;
574 np->priority = skb_prio;
575 np->vlan_qos = ((vlan_prio << 13) & 0xE000);
576 VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF] = np;
577 dev_put(dev);
578 return 0;
579 } else {
580 dev_put(dev);
581 return -ENOBUFS;
582 }
583 }
584 dev_put(dev);
585 }
586 return -EINVAL;
587}
588
589/* Flags are defined in the vlan_dev_info class in include/linux/if_vlan.h file. */
590int vlan_dev_set_vlan_flag(char *dev_name, __u32 flag, short flag_val)
591{
592 struct net_device *dev = dev_get_by_name(dev_name);
593
594 if (dev) {
595 if (dev->priv_flags & IFF_802_1Q_VLAN) {
596 /* verify flag is supported */
597 if (flag == 1) {
598 if (flag_val) {
599 VLAN_DEV_INFO(dev)->flags |= 1;
600 } else {
601 VLAN_DEV_INFO(dev)->flags &= ~1;
602 }
603 dev_put(dev);
604 return 0;
605 } else {
606 printk(KERN_ERR "%s: flag %i is not valid.\n",
607 __FUNCTION__, (int)(flag));
608 dev_put(dev);
609 return -EINVAL;
610 }
611 } else {
612 printk(KERN_ERR
613 "%s: %s is not a vlan device, priv_flags: %hX.\n",
614 __FUNCTION__, dev->name, dev->priv_flags);
615 dev_put(dev);
616 }
617 } else {
618 printk(KERN_ERR "%s: Could not find device: %s\n",
619 __FUNCTION__, dev_name);
620 }
621
622 return -EINVAL;
623}
624
625
626int vlan_dev_get_realdev_name(const char *dev_name, char* result)
627{
628 struct net_device *dev = dev_get_by_name(dev_name);
629 int rv = 0;
630 if (dev) {
631 if (dev->priv_flags & IFF_802_1Q_VLAN) {
632 strncpy(result, VLAN_DEV_INFO(dev)->real_dev->name, 23);
633 rv = 0;
634 } else {
635 rv = -EINVAL;
636 }
637 dev_put(dev);
638 } else {
639 rv = -ENODEV;
640 }
641 return rv;
642}
643
644int vlan_dev_get_vid(const char *dev_name, unsigned short* result)
645{
646 struct net_device *dev = dev_get_by_name(dev_name);
647 int rv = 0;
648 if (dev) {
649 if (dev->priv_flags & IFF_802_1Q_VLAN) {
650 *result = VLAN_DEV_INFO(dev)->vlan_id;
651 rv = 0;
652 } else {
653 rv = -EINVAL;
654 }
655 dev_put(dev);
656 } else {
657 rv = -ENODEV;
658 }
659 return rv;
660}
661
662
663int vlan_dev_set_mac_address(struct net_device *dev, void *addr_struct_p)
664{
665 struct sockaddr *addr = (struct sockaddr *)(addr_struct_p);
666 int i;
667
668 if (netif_running(dev))
669 return -EBUSY;
670
671 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
672
673 printk("%s: Setting MAC address to ", dev->name);
674 for (i = 0; i < 6; i++)
675 printk(" %2.2x", dev->dev_addr[i]);
676 printk(".\n");
677
678 if (memcmp(VLAN_DEV_INFO(dev)->real_dev->dev_addr,
679 dev->dev_addr,
680 dev->addr_len) != 0) {
681 if (!(VLAN_DEV_INFO(dev)->real_dev->flags & IFF_PROMISC)) {
682 int flgs = VLAN_DEV_INFO(dev)->real_dev->flags;
683
684 /* Increment our in-use promiscuity counter */
685 dev_set_promiscuity(VLAN_DEV_INFO(dev)->real_dev, 1);
686
687 /* Make PROMISC visible to the user. */
688 flgs |= IFF_PROMISC;
689 printk("VLAN (%s): Setting underlying device (%s) to promiscious mode.\n",
690 dev->name, VLAN_DEV_INFO(dev)->real_dev->name);
691 dev_change_flags(VLAN_DEV_INFO(dev)->real_dev, flgs);
692 }
693 } else {
694 printk("VLAN (%s): Underlying device (%s) has same MAC, not checking promiscious mode.\n",
695 dev->name, VLAN_DEV_INFO(dev)->real_dev->name);
696 }
697
698 return 0;
699}
700
701static inline int vlan_dmi_equals(struct dev_mc_list *dmi1,
702 struct dev_mc_list *dmi2)
703{
704 return ((dmi1->dmi_addrlen == dmi2->dmi_addrlen) &&
705 (memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0));
706}
707
708/** dmi is a single entry into a dev_mc_list, a single node. mc_list is
709 * an entire list, and we'll iterate through it.
710 */
711static int vlan_should_add_mc(struct dev_mc_list *dmi, struct dev_mc_list *mc_list)
712{
713 struct dev_mc_list *idmi;
714
715 for (idmi = mc_list; idmi != NULL; ) {
716 if (vlan_dmi_equals(dmi, idmi)) {
717 if (dmi->dmi_users > idmi->dmi_users)
718 return 1;
719 else
720 return 0;
721 } else {
722 idmi = idmi->next;
723 }
724 }
725
726 return 1;
727}
728
729static inline void vlan_destroy_mc_list(struct dev_mc_list *mc_list)
730{
731 struct dev_mc_list *dmi = mc_list;
732 struct dev_mc_list *next;
733
734 while(dmi) {
735 next = dmi->next;
736 kfree(dmi);
737 dmi = next;
738 }
739}
740
741static void vlan_copy_mc_list(struct dev_mc_list *mc_list, struct vlan_dev_info *vlan_info)
742{
743 struct dev_mc_list *dmi, *new_dmi;
744
745 vlan_destroy_mc_list(vlan_info->old_mc_list);
746 vlan_info->old_mc_list = NULL;
747
748 for (dmi = mc_list; dmi != NULL; dmi = dmi->next) {
749 new_dmi = kmalloc(sizeof(*new_dmi), GFP_ATOMIC);
750 if (new_dmi == NULL) {
751 printk(KERN_ERR "vlan: cannot allocate memory. "
752 "Multicast may not work properly from now.\n");
753 return;
754 }
755
756 /* Copy whole structure, then make new 'next' pointer */
757 *new_dmi = *dmi;
758 new_dmi->next = vlan_info->old_mc_list;
759 vlan_info->old_mc_list = new_dmi;
760 }
761}
762
763static void vlan_flush_mc_list(struct net_device *dev)
764{
765 struct dev_mc_list *dmi = dev->mc_list;
766
767 while (dmi) {
768 printk(KERN_DEBUG "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from vlan interface\n",
769 dev->name,
770 dmi->dmi_addr[0],
771 dmi->dmi_addr[1],
772 dmi->dmi_addr[2],
773 dmi->dmi_addr[3],
774 dmi->dmi_addr[4],
775 dmi->dmi_addr[5]);
776 dev_mc_delete(dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
777 dmi = dev->mc_list;
778 }
779
780 /* dev->mc_list is NULL by the time we get here. */
781 vlan_destroy_mc_list(VLAN_DEV_INFO(dev)->old_mc_list);
782 VLAN_DEV_INFO(dev)->old_mc_list = NULL;
783}
784
785int vlan_dev_open(struct net_device *dev)
786{
787 if (!(VLAN_DEV_INFO(dev)->real_dev->flags & IFF_UP))
788 return -ENETDOWN;
789
790 return 0;
791}
792
793int vlan_dev_stop(struct net_device *dev)
794{
795 vlan_flush_mc_list(dev);
796 return 0;
797}
798
799int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
800{
801 struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev;
802 struct ifreq ifrr;
803 int err = -EOPNOTSUPP;
804
805 strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
806 ifrr.ifr_ifru = ifr->ifr_ifru;
807
808 switch(cmd) {
809 case SIOCGMIIPHY:
810 case SIOCGMIIREG:
811 case SIOCSMIIREG:
812 if (real_dev->do_ioctl && netif_device_present(real_dev))
813 err = real_dev->do_ioctl(real_dev, &ifrr, cmd);
814 break;
815
816 case SIOCETHTOOL:
817 err = dev_ethtool(&ifrr);
818 }
819
820 if (!err)
821 ifr->ifr_ifru = ifrr.ifr_ifru;
822
823 return err;
824}
825
826/** Taken from Gleb + Lennert's VLAN code, and modified... */
827void vlan_dev_set_multicast_list(struct net_device *vlan_dev)
828{
829 struct dev_mc_list *dmi;
830 struct net_device *real_dev;
831 int inc;
832
833 if (vlan_dev && (vlan_dev->priv_flags & IFF_802_1Q_VLAN)) {
834 /* Then it's a real vlan device, as far as we can tell.. */
835 real_dev = VLAN_DEV_INFO(vlan_dev)->real_dev;
836
837 /* compare the current promiscuity to the last promisc we had.. */
838 inc = vlan_dev->promiscuity - VLAN_DEV_INFO(vlan_dev)->old_promiscuity;
839 if (inc) {
840 printk(KERN_INFO "%s: dev_set_promiscuity(master, %d)\n",
841 vlan_dev->name, inc);
842 dev_set_promiscuity(real_dev, inc); /* found in dev.c */
843 VLAN_DEV_INFO(vlan_dev)->old_promiscuity = vlan_dev->promiscuity;
844 }
845
846 inc = vlan_dev->allmulti - VLAN_DEV_INFO(vlan_dev)->old_allmulti;
847 if (inc) {
848 printk(KERN_INFO "%s: dev_set_allmulti(master, %d)\n",
849 vlan_dev->name, inc);
850 dev_set_allmulti(real_dev, inc); /* dev.c */
851 VLAN_DEV_INFO(vlan_dev)->old_allmulti = vlan_dev->allmulti;
852 }
853
854 /* looking for addresses to add to master's list */
855 for (dmi = vlan_dev->mc_list; dmi != NULL; dmi = dmi->next) {
856 if (vlan_should_add_mc(dmi, VLAN_DEV_INFO(vlan_dev)->old_mc_list)) {
857 dev_mc_add(real_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
858 printk(KERN_DEBUG "%s: add %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address to master interface\n",
859 vlan_dev->name,
860 dmi->dmi_addr[0],
861 dmi->dmi_addr[1],
862 dmi->dmi_addr[2],
863 dmi->dmi_addr[3],
864 dmi->dmi_addr[4],
865 dmi->dmi_addr[5]);
866 }
867 }
868
869 /* looking for addresses to delete from master's list */
870 for (dmi = VLAN_DEV_INFO(vlan_dev)->old_mc_list; dmi != NULL; dmi = dmi->next) {
871 if (vlan_should_add_mc(dmi, vlan_dev->mc_list)) {
872 /* if we think we should add it to the new list, then we should really
873 * delete it from the real list on the underlying device.
874 */
875 dev_mc_delete(real_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
876 printk(KERN_DEBUG "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from master interface\n",
877 vlan_dev->name,
878 dmi->dmi_addr[0],
879 dmi->dmi_addr[1],
880 dmi->dmi_addr[2],
881 dmi->dmi_addr[3],
882 dmi->dmi_addr[4],
883 dmi->dmi_addr[5]);
884 }
885 }
886
887 /* save multicast list */
888 vlan_copy_mc_list(vlan_dev->mc_list, VLAN_DEV_INFO(vlan_dev));
889 }
890}