blob: 6186f6e92e3393689b184926626a559e5d57ce30 [file] [log] [blame]
Simon Wunderlich23721382012-01-22 20:00:19 +01001/*
2 * Copyright (C) 2011 B.A.T.M.A.N. contributors:
3 *
4 * Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "hash.h"
24#include "hard-interface.h"
25#include "originator.h"
26#include "bridge_loop_avoidance.h"
Simon Wunderlich20ff9d52012-01-22 20:00:23 +010027#include "translation-table.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010028#include "send.h"
29
30#include <linux/etherdevice.h>
31#include <linux/crc16.h>
32#include <linux/if_arp.h>
33#include <net/arp.h>
34#include <linux/if_vlan.h>
35
36static const uint8_t claim_dest[6] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
37static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
38
39static void bla_periodic_work(struct work_struct *work);
40static void bla_send_announce(struct bat_priv *bat_priv,
41 struct backbone_gw *backbone_gw);
42
43/* return the index of the claim */
44static inline uint32_t choose_claim(const void *data, uint32_t size)
45{
46 const unsigned char *key = data;
47 uint32_t hash = 0;
48 size_t i;
49
50 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
51 hash += key[i];
52 hash += (hash << 10);
53 hash ^= (hash >> 6);
54 }
55
56 hash += (hash << 3);
57 hash ^= (hash >> 11);
58 hash += (hash << 15);
59
60 return hash % size;
61}
62
63/* return the index of the backbone gateway */
64static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
65{
66 const unsigned char *key = data;
67 uint32_t hash = 0;
68 size_t i;
69
70 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
71 hash += key[i];
72 hash += (hash << 10);
73 hash ^= (hash >> 6);
74 }
75
76 hash += (hash << 3);
77 hash ^= (hash >> 11);
78 hash += (hash << 15);
79
80 return hash % size;
81}
82
83
84/* compares address and vid of two backbone gws */
85static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
86{
87 const void *data1 = container_of(node, struct backbone_gw,
88 hash_entry);
89
90 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
91}
92
93/* compares address and vid of two claims */
94static int compare_claim(const struct hlist_node *node, const void *data2)
95{
96 const void *data1 = container_of(node, struct claim,
97 hash_entry);
98
99 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
100}
101
102/* free a backbone gw */
103static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
104{
105 if (atomic_dec_and_test(&backbone_gw->refcount))
106 kfree_rcu(backbone_gw, rcu);
107}
108
109/* finally deinitialize the claim */
110static void claim_free_rcu(struct rcu_head *rcu)
111{
112 struct claim *claim;
113
114 claim = container_of(rcu, struct claim, rcu);
115
116 backbone_gw_free_ref(claim->backbone_gw);
117 kfree(claim);
118}
119
120/* free a claim, call claim_free_rcu if its the last reference */
121static void claim_free_ref(struct claim *claim)
122{
123 if (atomic_dec_and_test(&claim->refcount))
124 call_rcu(&claim->rcu, claim_free_rcu);
125}
126
127/**
128 * @bat_priv: the bat priv with all the soft interface information
129 * @data: search data (may be local/static data)
130 *
131 * looks for a claim in the hash, and returns it if found
132 * or NULL otherwise.
133 */
134static struct claim *claim_hash_find(struct bat_priv *bat_priv,
135 struct claim *data)
136{
137 struct hashtable_t *hash = bat_priv->claim_hash;
138 struct hlist_head *head;
139 struct hlist_node *node;
140 struct claim *claim;
141 struct claim *claim_tmp = NULL;
142 int index;
143
144 if (!hash)
145 return NULL;
146
147 index = choose_claim(data, hash->size);
148 head = &hash->table[index];
149
150 rcu_read_lock();
151 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
152 if (!compare_claim(&claim->hash_entry, data))
153 continue;
154
155 if (!atomic_inc_not_zero(&claim->refcount))
156 continue;
157
158 claim_tmp = claim;
159 break;
160 }
161 rcu_read_unlock();
162
163 return claim_tmp;
164}
165
166/**
167 * @bat_priv: the bat priv with all the soft interface information
168 * @addr: the address of the originator
169 * @vid: the VLAN ID
170 *
171 * looks for a claim in the hash, and returns it if found
172 * or NULL otherwise.
173 */
174static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
175 uint8_t *addr, short vid)
176{
177 struct hashtable_t *hash = bat_priv->backbone_hash;
178 struct hlist_head *head;
179 struct hlist_node *node;
180 struct backbone_gw search_entry, *backbone_gw;
181 struct backbone_gw *backbone_gw_tmp = NULL;
182 int index;
183
184 if (!hash)
185 return NULL;
186
187 memcpy(search_entry.orig, addr, ETH_ALEN);
188 search_entry.vid = vid;
189
190 index = choose_backbone_gw(&search_entry, hash->size);
191 head = &hash->table[index];
192
193 rcu_read_lock();
194 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
195 if (!compare_backbone_gw(&backbone_gw->hash_entry,
196 &search_entry))
197 continue;
198
199 if (!atomic_inc_not_zero(&backbone_gw->refcount))
200 continue;
201
202 backbone_gw_tmp = backbone_gw;
203 break;
204 }
205 rcu_read_unlock();
206
207 return backbone_gw_tmp;
208}
209
210/* delete all claims for a backbone */
211static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
212{
213 struct hashtable_t *hash;
214 struct hlist_node *node, *node_tmp;
215 struct hlist_head *head;
216 struct claim *claim;
217 int i;
218 spinlock_t *list_lock; /* protects write access to the hash lists */
219
220 hash = backbone_gw->bat_priv->claim_hash;
221 if (!hash)
222 return;
223
224 for (i = 0; i < hash->size; i++) {
225 head = &hash->table[i];
226 list_lock = &hash->list_locks[i];
227
228 spin_lock_bh(list_lock);
229 hlist_for_each_entry_safe(claim, node, node_tmp,
230 head, hash_entry) {
231
232 if (claim->backbone_gw != backbone_gw)
233 continue;
234
235 claim_free_ref(claim);
236 hlist_del_rcu(node);
237 }
238 spin_unlock_bh(list_lock);
239 }
240
241 /* all claims gone, intialize CRC */
242 backbone_gw->crc = BLA_CRC_INIT;
243}
244
245/**
246 * @bat_priv: the bat priv with all the soft interface information
247 * @orig: the mac address to be announced within the claim
248 * @vid: the VLAN ID
249 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
250 *
251 * sends a claim frame according to the provided info.
252 */
253static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
254 short vid, int claimtype)
255{
256 struct sk_buff *skb;
257 struct ethhdr *ethhdr;
258 struct hard_iface *primary_if;
259 struct net_device *soft_iface;
260 uint8_t *hw_src;
261 struct bla_claim_dst local_claim_dest;
262 uint32_t zeroip = 0;
263
264 primary_if = primary_if_get_selected(bat_priv);
265 if (!primary_if)
266 return;
267
268 memcpy(&local_claim_dest, claim_dest, sizeof(local_claim_dest));
269 local_claim_dest.type = claimtype;
270
271 soft_iface = primary_if->soft_iface;
272
273 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
274 /* IP DST: 0.0.0.0 */
275 zeroip,
276 primary_if->soft_iface,
277 /* IP SRC: 0.0.0.0 */
278 zeroip,
279 /* Ethernet DST: Broadcast */
280 NULL,
281 /* Ethernet SRC/HW SRC: originator mac */
282 primary_if->net_dev->dev_addr,
283 /* HW DST: FF:43:05:XX:00:00
284 * with XX = claim type
285 */
286 (uint8_t *)&local_claim_dest);
287
288 if (!skb)
289 goto out;
290
291 ethhdr = (struct ethhdr *)skb->data;
292 hw_src = (uint8_t *)ethhdr +
293 sizeof(struct ethhdr) +
294 sizeof(struct arphdr);
295
296 /* now we pretend that the client would have sent this ... */
297 switch (claimtype) {
298 case CLAIM_TYPE_ADD:
299 /* normal claim frame
300 * set Ethernet SRC to the clients mac
301 */
302 memcpy(ethhdr->h_source, mac, ETH_ALEN);
303 bat_dbg(DBG_BLA, bat_priv,
304 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
305 break;
306 case CLAIM_TYPE_DEL:
307 /* unclaim frame
308 * set HW SRC to the clients mac
309 */
310 memcpy(hw_src, mac, ETH_ALEN);
311 bat_dbg(DBG_BLA, bat_priv,
312 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
313 break;
314 case CLAIM_TYPE_ANNOUNCE:
315 /* announcement frame
316 * set HW SRC to the special mac containg the crc
317 */
318 memcpy(hw_src, mac, ETH_ALEN);
319 bat_dbg(DBG_BLA, bat_priv,
320 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
321 ethhdr->h_source, vid);
322 break;
323 case CLAIM_TYPE_REQUEST:
324 /* request frame
325 * set HW SRC to the special mac containg the crc
326 */
327 memcpy(hw_src, mac, ETH_ALEN);
328 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
329 bat_dbg(DBG_BLA, bat_priv,
330 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
331 ethhdr->h_source, ethhdr->h_dest, vid);
332 break;
333
334 }
335
336 if (vid != -1)
337 skb = vlan_insert_tag(skb, vid);
338
339 skb_reset_mac_header(skb);
340 skb->protocol = eth_type_trans(skb, soft_iface);
341 bat_priv->stats.rx_packets++;
342 bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
343 soft_iface->last_rx = jiffies;
344
345 netif_rx(skb);
346out:
347 if (primary_if)
348 hardif_free_ref(primary_if);
349}
350
351/**
352 * @bat_priv: the bat priv with all the soft interface information
353 * @orig: the mac address of the originator
354 * @vid: the VLAN ID
355 *
356 * searches for the backbone gw or creates a new one if it could not
357 * be found.
358 */
359static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
360 uint8_t *orig, short vid)
361{
362 struct backbone_gw *entry;
Simon Wunderlich20ff9d52012-01-22 20:00:23 +0100363 struct orig_node *orig_node;
Simon Wunderlich23721382012-01-22 20:00:19 +0100364 int hash_added;
365
366 entry = backbone_hash_find(bat_priv, orig, vid);
367
368 if (entry)
369 return entry;
370
371 bat_dbg(DBG_BLA, bat_priv,
372 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
373 orig, vid);
374
375 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
376 if (!entry)
377 return NULL;
378
379 entry->vid = vid;
380 entry->lasttime = jiffies;
381 entry->crc = BLA_CRC_INIT;
382 entry->bat_priv = bat_priv;
383 atomic_set(&entry->request_sent, 0);
384 memcpy(entry->orig, orig, ETH_ALEN);
385
386 /* one for the hash, one for returning */
387 atomic_set(&entry->refcount, 2);
388
389 hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
390 choose_backbone_gw, entry, &entry->hash_entry);
391
392 if (unlikely(hash_added != 0)) {
393 /* hash failed, free the structure */
394 kfree(entry);
395 return NULL;
396 }
397
Simon Wunderlich20ff9d52012-01-22 20:00:23 +0100398 /* this is a gateway now, remove any tt entries */
399 orig_node = orig_hash_find(bat_priv, orig);
400 if (orig_node) {
401 tt_global_del_orig(bat_priv, orig_node,
402 "became a backbone gateway");
403 orig_node_free_ref(orig_node);
404 }
Simon Wunderlich23721382012-01-22 20:00:19 +0100405 return entry;
406}
407
408/* update or add the own backbone gw to make sure we announce
409 * where we receive other backbone gws
410 */
411static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
412 struct hard_iface *primary_if,
413 short vid)
414{
415 struct backbone_gw *backbone_gw;
416
417 backbone_gw = bla_get_backbone_gw(bat_priv,
418 primary_if->net_dev->dev_addr, vid);
419 if (unlikely(!backbone_gw))
420 return;
421
422 backbone_gw->lasttime = jiffies;
423 backbone_gw_free_ref(backbone_gw);
424}
425
426/**
427 * @bat_priv: the bat priv with all the soft interface information
428 * @vid: the vid where the request came on
429 *
430 * Repeat all of our own claims, and finally send an ANNOUNCE frame
431 * to allow the requester another check if the CRC is correct now.
432 */
433static void bla_answer_request(struct bat_priv *bat_priv,
434 struct hard_iface *primary_if, short vid)
435{
436 struct hlist_node *node;
437 struct hlist_head *head;
438 struct hashtable_t *hash;
439 struct claim *claim;
440 struct backbone_gw *backbone_gw;
441 int i;
442
443 bat_dbg(DBG_BLA, bat_priv,
444 "bla_answer_request(): received a claim request, send all of our own claims again\n");
445
446 backbone_gw = backbone_hash_find(bat_priv,
447 primary_if->net_dev->dev_addr, vid);
448 if (!backbone_gw)
449 return;
450
451 hash = bat_priv->claim_hash;
452 for (i = 0; i < hash->size; i++) {
453 head = &hash->table[i];
454
455 rcu_read_lock();
456 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
457 /* only own claims are interesting */
458 if (claim->backbone_gw != backbone_gw)
459 continue;
460
461 bla_send_claim(bat_priv, claim->addr, claim->vid,
462 CLAIM_TYPE_ADD);
463 }
464 rcu_read_unlock();
465 }
466
467 /* finally, send an announcement frame */
468 bla_send_announce(bat_priv, backbone_gw);
469 backbone_gw_free_ref(backbone_gw);
470}
471
472/**
473 * @backbone_gw: the backbone gateway from whom we are out of sync
474 *
475 * When the crc is wrong, ask the backbone gateway for a full table update.
476 * After the request, it will repeat all of his own claims and finally
477 * send an announcement claim with which we can check again.
478 */
479static void bla_send_request(struct backbone_gw *backbone_gw)
480{
481 /* first, remove all old entries */
482 bla_del_backbone_claims(backbone_gw);
483
484 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
485 "Sending REQUEST to %pM\n",
486 backbone_gw->orig);
487
488 /* send request */
489 bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
490 backbone_gw->vid, CLAIM_TYPE_REQUEST);
491
492 /* no local broadcasts should be sent or received, for now. */
493 if (!atomic_read(&backbone_gw->request_sent)) {
494 atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
495 atomic_set(&backbone_gw->request_sent, 1);
496 }
497}
498
499/**
500 * @bat_priv: the bat priv with all the soft interface information
501 * @backbone_gw: our backbone gateway which should be announced
502 *
503 * This function sends an announcement. It is called from multiple
504 * places.
505 */
506static void bla_send_announce(struct bat_priv *bat_priv,
507 struct backbone_gw *backbone_gw)
508{
509 uint8_t mac[ETH_ALEN];
510 uint16_t crc;
511
512 memcpy(mac, announce_mac, 4);
513 crc = htons(backbone_gw->crc);
514 memcpy(&mac[4], (uint8_t *)&crc, 2);
515
516 bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
517
518}
519
520/**
521 * @bat_priv: the bat priv with all the soft interface information
522 * @mac: the mac address of the claim
523 * @vid: the VLAN ID of the frame
524 * @backbone_gw: the backbone gateway which claims it
525 *
526 * Adds a claim in the claim hash.
527 */
528static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
529 const short vid, struct backbone_gw *backbone_gw)
530{
531 struct claim *claim;
532 struct claim search_claim;
533 int hash_added;
534
535 memcpy(search_claim.addr, mac, ETH_ALEN);
536 search_claim.vid = vid;
537 claim = claim_hash_find(bat_priv, &search_claim);
538
539 /* create a new claim entry if it does not exist yet. */
540 if (!claim) {
541 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
542 if (!claim)
543 return;
544
545 memcpy(claim->addr, mac, ETH_ALEN);
546 claim->vid = vid;
547 claim->lasttime = jiffies;
548 claim->backbone_gw = backbone_gw;
549
550 atomic_set(&claim->refcount, 2);
551 bat_dbg(DBG_BLA, bat_priv,
552 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
553 mac, vid);
554 hash_added = hash_add(bat_priv->claim_hash, compare_claim,
555 choose_claim, claim, &claim->hash_entry);
556
557 if (unlikely(hash_added != 0)) {
558 /* only local changes happened. */
559 kfree(claim);
560 return;
561 }
562 } else {
563 claim->lasttime = jiffies;
564 if (claim->backbone_gw == backbone_gw)
565 /* no need to register a new backbone */
566 goto claim_free_ref;
567
568 bat_dbg(DBG_BLA, bat_priv,
569 "bla_add_claim(): changing ownership for %pM, vid %d\n",
570 mac, vid);
571
572 claim->backbone_gw->crc ^=
573 crc16(0, claim->addr, ETH_ALEN);
574 backbone_gw_free_ref(claim->backbone_gw);
575
576 }
577 /* set (new) backbone gw */
578 atomic_inc(&backbone_gw->refcount);
579 claim->backbone_gw = backbone_gw;
580
581 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
582 backbone_gw->lasttime = jiffies;
583
584claim_free_ref:
585 claim_free_ref(claim);
586}
587
588/* Delete a claim from the claim hash which has the
589 * given mac address and vid.
590 */
591static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
592 const short vid)
593{
594 struct claim search_claim, *claim;
595
596 memcpy(search_claim.addr, mac, ETH_ALEN);
597 search_claim.vid = vid;
598 claim = claim_hash_find(bat_priv, &search_claim);
599 if (!claim)
600 return;
601
602 bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
603
604 hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
605 claim_free_ref(claim); /* reference from the hash is gone */
606
607 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
608
609 /* don't need the reference from hash_find() anymore */
610 claim_free_ref(claim);
611}
612
613/* check for ANNOUNCE frame, return 1 if handled */
614static int handle_announce(struct bat_priv *bat_priv,
615 uint8_t *an_addr, uint8_t *backbone_addr, short vid)
616{
617 struct backbone_gw *backbone_gw;
618 uint16_t crc;
619
620 if (memcmp(an_addr, announce_mac, 4) != 0)
621 return 0;
622
623 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
624
625 if (unlikely(!backbone_gw))
626 return 1;
627
628
629 /* handle as ANNOUNCE frame */
630 backbone_gw->lasttime = jiffies;
631 crc = ntohs(*((uint16_t *)(&an_addr[4])));
632
633 bat_dbg(DBG_BLA, bat_priv,
634 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
635 vid, backbone_gw->orig, crc);
636
637 if (backbone_gw->crc != crc) {
638 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
639 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
640 backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
641 crc);
642
643 bla_send_request(backbone_gw);
644 } else {
645 /* if we have sent a request and the crc was OK,
646 * we can allow traffic again.
647 */
648 if (atomic_read(&backbone_gw->request_sent)) {
649 atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
650 atomic_set(&backbone_gw->request_sent, 0);
651 }
652 }
653
654 backbone_gw_free_ref(backbone_gw);
655 return 1;
656}
657
658/* check for REQUEST frame, return 1 if handled */
659static int handle_request(struct bat_priv *bat_priv,
660 struct hard_iface *primary_if,
661 uint8_t *backbone_addr,
662 struct ethhdr *ethhdr, short vid)
663{
664 /* check for REQUEST frame */
665 if (!compare_eth(backbone_addr, ethhdr->h_dest))
666 return 0;
667
668 /* sanity check, this should not happen on a normal switch,
669 * we ignore it in this case.
670 */
671 if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
672 return 1;
673
674 bat_dbg(DBG_BLA, bat_priv,
675 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
676 vid, ethhdr->h_source);
677
678 bla_answer_request(bat_priv, primary_if, vid);
679 return 1;
680}
681
682/* check for UNCLAIM frame, return 1 if handled */
683static int handle_unclaim(struct bat_priv *bat_priv,
684 struct hard_iface *primary_if,
685 uint8_t *backbone_addr,
686 uint8_t *claim_addr, short vid)
687{
688 struct backbone_gw *backbone_gw;
689
690 /* unclaim in any case if it is our own */
691 if (primary_if && compare_eth(backbone_addr,
692 primary_if->net_dev->dev_addr))
693 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
694
695 backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
696
697 if (!backbone_gw)
698 return 1;
699
700 /* this must be an UNCLAIM frame */
701 bat_dbg(DBG_BLA, bat_priv,
702 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
703 claim_addr, vid, backbone_gw->orig);
704
705 bla_del_claim(bat_priv, claim_addr, vid);
706 backbone_gw_free_ref(backbone_gw);
707 return 1;
708}
709
710/* check for CLAIM frame, return 1 if handled */
711static int handle_claim(struct bat_priv *bat_priv,
712 struct hard_iface *primary_if, uint8_t *backbone_addr,
713 uint8_t *claim_addr, short vid)
714{
715 struct backbone_gw *backbone_gw;
716
717 /* register the gateway if not yet available, and add the claim. */
718
719 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
720
721 if (unlikely(!backbone_gw))
722 return 1;
723
724 /* this must be a CLAIM frame */
725 bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
726 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
727 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
728
729 /* TODO: we could call something like tt_local_del() here. */
730
731 backbone_gw_free_ref(backbone_gw);
732 return 1;
733}
734
735/**
736 * @bat_priv: the bat priv with all the soft interface information
737 * @skb: the frame to be checked
738 *
739 * Check if this is a claim frame, and process it accordingly.
740 *
741 * returns 1 if it was a claim frame, otherwise return 0 to
742 * tell the callee that it can use the frame on its own.
743 */
744static int bla_process_claim(struct bat_priv *bat_priv,
745 struct hard_iface *primary_if,
746 struct sk_buff *skb)
747{
748 struct ethhdr *ethhdr;
749 struct vlan_ethhdr *vhdr;
750 struct arphdr *arphdr;
751 uint8_t *hw_src, *hw_dst;
752 struct bla_claim_dst *bla_dst;
753 uint16_t proto;
754 int headlen;
755 short vid = -1;
756
757 ethhdr = (struct ethhdr *)skb_mac_header(skb);
758
759 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
760 vhdr = (struct vlan_ethhdr *)ethhdr;
761 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
762 proto = ntohs(vhdr->h_vlan_encapsulated_proto);
763 headlen = sizeof(*vhdr);
764 } else {
765 proto = ntohs(ethhdr->h_proto);
766 headlen = sizeof(*ethhdr);
767 }
768
769 if (proto != ETH_P_ARP)
770 return 0; /* not a claim frame */
771
772 /* this must be a ARP frame. check if it is a claim. */
773
774 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
775 return 0;
776
777 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
778 ethhdr = (struct ethhdr *)skb_mac_header(skb);
779 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
780
781 /* Check whether the ARP frame carries a valid
782 * IP information
783 */
784
785 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
786 return 0;
787 if (arphdr->ar_pro != htons(ETH_P_IP))
788 return 0;
789 if (arphdr->ar_hln != ETH_ALEN)
790 return 0;
791 if (arphdr->ar_pln != 4)
792 return 0;
793
794 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
795 hw_dst = hw_src + ETH_ALEN + 4;
796 bla_dst = (struct bla_claim_dst *)hw_dst;
797
798 /* check if it is a claim frame. */
799 if (memcmp(hw_dst, claim_dest, 3) != 0)
800 return 0;
801
802 /* become a backbone gw ourselves on this vlan if not happened yet */
803 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
804
805 /* check for the different types of claim frames ... */
806 switch (bla_dst->type) {
807 case CLAIM_TYPE_ADD:
808 if (handle_claim(bat_priv, primary_if, hw_src,
809 ethhdr->h_source, vid))
810 return 1;
811 break;
812 case CLAIM_TYPE_DEL:
813 if (handle_unclaim(bat_priv, primary_if,
814 ethhdr->h_source, hw_src, vid))
815 return 1;
816 break;
817
818 case CLAIM_TYPE_ANNOUNCE:
819 if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
820 return 1;
821 break;
822 case CLAIM_TYPE_REQUEST:
823 if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
824 return 1;
825 break;
826 }
827
828 bat_dbg(DBG_BLA, bat_priv,
829 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
830 ethhdr->h_source, vid, hw_src, hw_dst);
831 return 1;
832}
833
834/* Check when we last heard from other nodes, and remove them in case of
835 * a time out, or clean all backbone gws if now is set.
836 */
837static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
838{
839 struct backbone_gw *backbone_gw;
840 struct hlist_node *node, *node_tmp;
841 struct hlist_head *head;
842 struct hashtable_t *hash;
843 spinlock_t *list_lock; /* protects write access to the hash lists */
844 int i;
845
846 hash = bat_priv->backbone_hash;
847 if (!hash)
848 return;
849
850 for (i = 0; i < hash->size; i++) {
851 head = &hash->table[i];
852 list_lock = &hash->list_locks[i];
853
854 spin_lock_bh(list_lock);
855 hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
856 head, hash_entry) {
857 if (now)
858 goto purge_now;
859 if (!has_timed_out(backbone_gw->lasttime,
860 BLA_BACKBONE_TIMEOUT))
861 continue;
862
863 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
864 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
865 backbone_gw->orig);
866
867purge_now:
868 /* don't wait for the pending request anymore */
869 if (atomic_read(&backbone_gw->request_sent))
870 atomic_dec(&bat_priv->bla_num_requests);
871
872 bla_del_backbone_claims(backbone_gw);
873
874 hlist_del_rcu(node);
875 backbone_gw_free_ref(backbone_gw);
876 }
877 spin_unlock_bh(list_lock);
878 }
879}
880
881/**
882 * @bat_priv: the bat priv with all the soft interface information
883 * @primary_if: the selected primary interface, may be NULL if now is set
884 * @now: whether the whole hash shall be wiped now
885 *
886 * Check when we heard last time from our own claims, and remove them in case of
887 * a time out, or clean all claims if now is set
888 */
889static void bla_purge_claims(struct bat_priv *bat_priv,
890 struct hard_iface *primary_if, int now)
891{
892 struct claim *claim;
893 struct hlist_node *node;
894 struct hlist_head *head;
895 struct hashtable_t *hash;
896 int i;
897
898 hash = bat_priv->claim_hash;
899 if (!hash)
900 return;
901
902 for (i = 0; i < hash->size; i++) {
903 head = &hash->table[i];
904
905 rcu_read_lock();
906 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
907 if (now)
908 goto purge_now;
909 if (!compare_eth(claim->backbone_gw->orig,
910 primary_if->net_dev->dev_addr))
911 continue;
912 if (!has_timed_out(claim->lasttime,
913 BLA_CLAIM_TIMEOUT))
914 continue;
915
916 bat_dbg(DBG_BLA, bat_priv,
917 "bla_purge_claims(): %pM, vid %d, time out\n",
918 claim->addr, claim->vid);
919
920purge_now:
921 handle_unclaim(bat_priv, primary_if,
922 claim->backbone_gw->orig,
923 claim->addr, claim->vid);
924 }
925 rcu_read_unlock();
926 }
927}
928
929/**
930 * @bat_priv: the bat priv with all the soft interface information
931 * @primary_if: the new selected primary_if
932 * @oldif: the old primary interface, may be NULL
933 *
934 * Update the backbone gateways when the own orig address changes.
935 *
936 */
937void bla_update_orig_address(struct bat_priv *bat_priv,
938 struct hard_iface *primary_if,
939 struct hard_iface *oldif)
940{
941 struct backbone_gw *backbone_gw;
942 struct hlist_node *node;
943 struct hlist_head *head;
944 struct hashtable_t *hash;
945 int i;
946
947 if (!oldif) {
948 bla_purge_claims(bat_priv, NULL, 1);
949 bla_purge_backbone_gw(bat_priv, 1);
950 return;
951 }
952
953 hash = bat_priv->backbone_hash;
954 if (!hash)
955 return;
956
957 for (i = 0; i < hash->size; i++) {
958 head = &hash->table[i];
959
960 rcu_read_lock();
961 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
962 /* own orig still holds the old value. */
963 if (!compare_eth(backbone_gw->orig,
964 oldif->net_dev->dev_addr))
965 continue;
966
967 memcpy(backbone_gw->orig,
968 primary_if->net_dev->dev_addr, ETH_ALEN);
969 /* send an announce frame so others will ask for our
970 * claims and update their tables.
971 */
972 bla_send_announce(bat_priv, backbone_gw);
973 }
974 rcu_read_unlock();
975 }
976}
977
978
979
980/* (re)start the timer */
981static void bla_start_timer(struct bat_priv *bat_priv)
982{
983 INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
984 queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
985 msecs_to_jiffies(BLA_PERIOD_LENGTH));
986}
987
988/* periodic work to do:
989 * * purge structures when they are too old
990 * * send announcements
991 */
992static void bla_periodic_work(struct work_struct *work)
993{
994 struct delayed_work *delayed_work =
995 container_of(work, struct delayed_work, work);
996 struct bat_priv *bat_priv =
997 container_of(delayed_work, struct bat_priv, bla_work);
998 struct hlist_node *node;
999 struct hlist_head *head;
1000 struct backbone_gw *backbone_gw;
1001 struct hashtable_t *hash;
1002 struct hard_iface *primary_if;
1003 int i;
1004
1005 primary_if = primary_if_get_selected(bat_priv);
1006 if (!primary_if)
1007 goto out;
1008
1009 bla_purge_claims(bat_priv, primary_if, 0);
1010 bla_purge_backbone_gw(bat_priv, 0);
1011
1012 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1013 goto out;
1014
1015 hash = bat_priv->backbone_hash;
1016 if (!hash)
1017 goto out;
1018
1019 for (i = 0; i < hash->size; i++) {
1020 head = &hash->table[i];
1021
1022 rcu_read_lock();
1023 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1024 if (!compare_eth(backbone_gw->orig,
1025 primary_if->net_dev->dev_addr))
1026 continue;
1027
1028 backbone_gw->lasttime = jiffies;
1029
1030 bla_send_announce(bat_priv, backbone_gw);
1031 }
1032 rcu_read_unlock();
1033 }
1034out:
1035 if (primary_if)
1036 hardif_free_ref(primary_if);
1037
1038 bla_start_timer(bat_priv);
1039}
1040
1041/* initialize all bla structures */
1042int bla_init(struct bat_priv *bat_priv)
1043{
1044 bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
1045
1046 if (bat_priv->claim_hash)
1047 return 1;
1048
1049 bat_priv->claim_hash = hash_new(128);
1050 bat_priv->backbone_hash = hash_new(32);
1051
1052 if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
1053 return -1;
1054
1055 bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
1056
1057 bla_start_timer(bat_priv);
1058 return 1;
1059}
1060
1061/**
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001062 * @bat_priv: the bat priv with all the soft interface information
1063 * @orig: originator mac address
1064 *
1065 * check if the originator is a gateway for any VLAN ID.
1066 *
1067 * returns 1 if it is found, 0 otherwise
1068 *
1069 */
1070
1071int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1072{
1073 struct hashtable_t *hash = bat_priv->backbone_hash;
1074 struct hlist_head *head;
1075 struct hlist_node *node;
1076 struct backbone_gw *backbone_gw;
1077 int i;
1078
1079 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1080 return 0;
1081
1082 if (!hash)
1083 return 0;
1084
1085 for (i = 0; i < hash->size; i++) {
1086 head = &hash->table[i];
1087
1088 rcu_read_lock();
1089 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1090 if (compare_eth(backbone_gw->orig, orig)) {
1091 rcu_read_unlock();
1092 return 1;
1093 }
1094 }
1095 rcu_read_unlock();
1096 }
1097
1098 return 0;
1099}
1100
1101
1102/**
Simon Wunderlich23721382012-01-22 20:00:19 +01001103 * @skb: the frame to be checked
1104 * @orig_node: the orig_node of the frame
1105 * @hdr_size: maximum length of the frame
1106 *
1107 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1108 * if the orig_node is also a gateway on the soft interface, otherwise it
1109 * returns 0.
1110 *
1111 */
1112int bla_is_backbone_gw(struct sk_buff *skb,
1113 struct orig_node *orig_node, int hdr_size)
1114{
1115 struct ethhdr *ethhdr;
1116 struct vlan_ethhdr *vhdr;
1117 struct backbone_gw *backbone_gw;
1118 short vid = -1;
1119
1120 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1121 return 0;
1122
1123 /* first, find out the vid. */
1124 if (!pskb_may_pull(skb, hdr_size + sizeof(struct ethhdr)))
1125 return 0;
1126
1127 ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
1128
1129 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
1130 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
1131 return 0;
1132
1133 vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
1134 hdr_size);
1135 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1136 }
1137
1138 /* see if this originator is a backbone gw for this VLAN */
1139
1140 backbone_gw = backbone_hash_find(orig_node->bat_priv,
1141 orig_node->orig, vid);
1142 if (!backbone_gw)
1143 return 0;
1144
1145 backbone_gw_free_ref(backbone_gw);
1146 return 1;
1147}
1148
1149/* free all bla structures (for softinterface free or module unload) */
1150void bla_free(struct bat_priv *bat_priv)
1151{
1152 struct hard_iface *primary_if;
1153
1154 cancel_delayed_work_sync(&bat_priv->bla_work);
1155 primary_if = primary_if_get_selected(bat_priv);
1156
1157 if (bat_priv->claim_hash) {
1158 bla_purge_claims(bat_priv, primary_if, 1);
1159 hash_destroy(bat_priv->claim_hash);
1160 bat_priv->claim_hash = NULL;
1161 }
1162 if (bat_priv->backbone_hash) {
1163 bla_purge_backbone_gw(bat_priv, 1);
1164 hash_destroy(bat_priv->backbone_hash);
1165 bat_priv->backbone_hash = NULL;
1166 }
1167 if (primary_if)
1168 hardif_free_ref(primary_if);
1169}
1170
1171/**
1172 * @bat_priv: the bat priv with all the soft interface information
1173 * @skb: the frame to be checked
1174 * @vid: the VLAN ID of the frame
1175 *
1176 * bla_rx avoidance checks if:
1177 * * we have to race for a claim
1178 * * if the frame is allowed on the LAN
1179 *
1180 * in these cases, the skb is further handled by this function and
1181 * returns 1, otherwise it returns 0 and the caller shall further
1182 * process the skb.
1183 *
1184 */
1185int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1186{
1187 struct ethhdr *ethhdr;
1188 struct claim search_claim, *claim = NULL;
1189 struct hard_iface *primary_if;
1190 int ret;
1191
1192 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1193
1194 primary_if = primary_if_get_selected(bat_priv);
1195 if (!primary_if)
1196 goto handled;
1197
1198 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1199 goto allow;
1200
1201
1202 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1203 /* don't allow broadcasts while requests are in flight */
1204 if (is_multicast_ether_addr(ethhdr->h_dest))
1205 goto handled;
1206
1207 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1208 search_claim.vid = vid;
1209 claim = claim_hash_find(bat_priv, &search_claim);
1210
1211 if (!claim) {
1212 /* possible optimization: race for a claim */
1213 /* No claim exists yet, claim it for us!
1214 */
1215 handle_claim(bat_priv, primary_if,
1216 primary_if->net_dev->dev_addr,
1217 ethhdr->h_source, vid);
1218 goto allow;
1219 }
1220
1221 /* if it is our own claim ... */
1222 if (compare_eth(claim->backbone_gw->orig,
1223 primary_if->net_dev->dev_addr)) {
1224 /* ... allow it in any case */
1225 claim->lasttime = jiffies;
1226 goto allow;
1227 }
1228
1229 /* if it is a broadcast ... */
1230 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1231 /* ... drop it. the responsible gateway is in charge. */
1232 goto handled;
1233 } else {
1234 /* seems the client considers us as its best gateway.
1235 * send a claim and update the claim table
1236 * immediately.
1237 */
1238 handle_claim(bat_priv, primary_if,
1239 primary_if->net_dev->dev_addr,
1240 ethhdr->h_source, vid);
1241 goto allow;
1242 }
1243allow:
1244 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1245 ret = 0;
1246 goto out;
1247
1248handled:
1249 kfree_skb(skb);
1250 ret = 1;
1251
1252out:
1253 if (primary_if)
1254 hardif_free_ref(primary_if);
1255 if (claim)
1256 claim_free_ref(claim);
1257 return ret;
1258}
1259
1260/**
1261 * @bat_priv: the bat priv with all the soft interface information
1262 * @skb: the frame to be checked
1263 * @vid: the VLAN ID of the frame
1264 *
1265 * bla_tx checks if:
1266 * * a claim was received which has to be processed
1267 * * the frame is allowed on the mesh
1268 *
1269 * in these cases, the skb is further handled by this function and
1270 * returns 1, otherwise it returns 0 and the caller shall further
1271 * process the skb.
1272 *
1273 */
1274int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1275{
1276 struct ethhdr *ethhdr;
1277 struct claim search_claim, *claim = NULL;
1278 struct hard_iface *primary_if;
1279 int ret = 0;
1280
1281 primary_if = primary_if_get_selected(bat_priv);
1282 if (!primary_if)
1283 goto out;
1284
1285 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1286 goto allow;
1287
1288 /* in VLAN case, the mac header might not be set. */
1289 skb_reset_mac_header(skb);
1290
1291 if (bla_process_claim(bat_priv, primary_if, skb))
1292 goto handled;
1293
1294 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1295
1296 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1297 /* don't allow broadcasts while requests are in flight */
1298 if (is_multicast_ether_addr(ethhdr->h_dest))
1299 goto handled;
1300
1301 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1302 search_claim.vid = vid;
1303
1304 claim = claim_hash_find(bat_priv, &search_claim);
1305
1306 /* if no claim exists, allow it. */
1307 if (!claim)
1308 goto allow;
1309
1310 /* check if we are responsible. */
1311 if (compare_eth(claim->backbone_gw->orig,
1312 primary_if->net_dev->dev_addr)) {
1313 /* if yes, the client has roamed and we have
1314 * to unclaim it.
1315 */
1316 handle_unclaim(bat_priv, primary_if,
1317 primary_if->net_dev->dev_addr,
1318 ethhdr->h_source, vid);
1319 goto allow;
1320 }
1321
1322 /* check if it is a multicast/broadcast frame */
1323 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1324 /* drop it. the responsible gateway has forwarded it into
1325 * the backbone network.
1326 */
1327 goto handled;
1328 } else {
1329 /* we must allow it. at least if we are
1330 * responsible for the DESTINATION.
1331 */
1332 goto allow;
1333 }
1334allow:
1335 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1336 ret = 0;
1337 goto out;
1338handled:
1339 ret = 1;
1340out:
1341 if (primary_if)
1342 hardif_free_ref(primary_if);
1343 if (claim)
1344 claim_free_ref(claim);
1345 return ret;
1346}
Simon Wunderlich9bf8e4d2012-01-22 20:00:21 +01001347
1348int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1349{
1350 struct net_device *net_dev = (struct net_device *)seq->private;
1351 struct bat_priv *bat_priv = netdev_priv(net_dev);
1352 struct hashtable_t *hash = bat_priv->claim_hash;
1353 struct claim *claim;
1354 struct hard_iface *primary_if;
1355 struct hlist_node *node;
1356 struct hlist_head *head;
1357 uint32_t i;
1358 bool is_own;
1359 int ret = 0;
1360
1361 primary_if = primary_if_get_selected(bat_priv);
1362 if (!primary_if) {
1363 ret = seq_printf(seq,
1364 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1365 net_dev->name);
1366 goto out;
1367 }
1368
1369 if (primary_if->if_status != IF_ACTIVE) {
1370 ret = seq_printf(seq,
1371 "BATMAN mesh %s disabled - primary interface not active\n",
1372 net_dev->name);
1373 goto out;
1374 }
1375
1376 seq_printf(seq, "Claims announced for the mesh %s (orig %pM)\n",
1377 net_dev->name, primary_if->net_dev->dev_addr);
1378 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
1379 "Client", "VID", "Originator", "CRC");
1380 for (i = 0; i < hash->size; i++) {
1381 head = &hash->table[i];
1382
1383 rcu_read_lock();
1384 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
1385 is_own = compare_eth(claim->backbone_gw->orig,
1386 primary_if->net_dev->dev_addr);
1387 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
1388 claim->addr, claim->vid,
1389 claim->backbone_gw->orig,
1390 (is_own ? 'x' : ' '),
1391 claim->backbone_gw->crc);
1392 }
1393 rcu_read_unlock();
1394 }
1395out:
1396 if (primary_if)
1397 hardif_free_ref(primary_if);
1398 return ret;
1399}