blob: 4f6b44a5b128f1bd2a74f196737979dd72358be7 [file] [log] [blame]
Simon Wunderlich23721382012-01-22 20:00:19 +01001/*
2 * Copyright (C) 2011 B.A.T.M.A.N. contributors:
3 *
4 * Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "hash.h"
24#include "hard-interface.h"
25#include "originator.h"
26#include "bridge_loop_avoidance.h"
Simon Wunderlich20ff9d52012-01-22 20:00:23 +010027#include "translation-table.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010028#include "send.h"
29
30#include <linux/etherdevice.h>
31#include <linux/crc16.h>
32#include <linux/if_arp.h>
33#include <net/arp.h>
34#include <linux/if_vlan.h>
35
36static const uint8_t claim_dest[6] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
37static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
38
39static void bla_periodic_work(struct work_struct *work);
40static void bla_send_announce(struct bat_priv *bat_priv,
41 struct backbone_gw *backbone_gw);
42
43/* return the index of the claim */
44static inline uint32_t choose_claim(const void *data, uint32_t size)
45{
46 const unsigned char *key = data;
47 uint32_t hash = 0;
48 size_t i;
49
50 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
51 hash += key[i];
52 hash += (hash << 10);
53 hash ^= (hash >> 6);
54 }
55
56 hash += (hash << 3);
57 hash ^= (hash >> 11);
58 hash += (hash << 15);
59
60 return hash % size;
61}
62
63/* return the index of the backbone gateway */
64static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
65{
66 const unsigned char *key = data;
67 uint32_t hash = 0;
68 size_t i;
69
70 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
71 hash += key[i];
72 hash += (hash << 10);
73 hash ^= (hash >> 6);
74 }
75
76 hash += (hash << 3);
77 hash ^= (hash >> 11);
78 hash += (hash << 15);
79
80 return hash % size;
81}
82
83
84/* compares address and vid of two backbone gws */
85static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
86{
87 const void *data1 = container_of(node, struct backbone_gw,
88 hash_entry);
89
90 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
91}
92
93/* compares address and vid of two claims */
94static int compare_claim(const struct hlist_node *node, const void *data2)
95{
96 const void *data1 = container_of(node, struct claim,
97 hash_entry);
98
99 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
100}
101
102/* free a backbone gw */
103static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
104{
105 if (atomic_dec_and_test(&backbone_gw->refcount))
106 kfree_rcu(backbone_gw, rcu);
107}
108
109/* finally deinitialize the claim */
110static void claim_free_rcu(struct rcu_head *rcu)
111{
112 struct claim *claim;
113
114 claim = container_of(rcu, struct claim, rcu);
115
116 backbone_gw_free_ref(claim->backbone_gw);
117 kfree(claim);
118}
119
120/* free a claim, call claim_free_rcu if its the last reference */
121static void claim_free_ref(struct claim *claim)
122{
123 if (atomic_dec_and_test(&claim->refcount))
124 call_rcu(&claim->rcu, claim_free_rcu);
125}
126
127/**
128 * @bat_priv: the bat priv with all the soft interface information
129 * @data: search data (may be local/static data)
130 *
131 * looks for a claim in the hash, and returns it if found
132 * or NULL otherwise.
133 */
134static struct claim *claim_hash_find(struct bat_priv *bat_priv,
135 struct claim *data)
136{
137 struct hashtable_t *hash = bat_priv->claim_hash;
138 struct hlist_head *head;
139 struct hlist_node *node;
140 struct claim *claim;
141 struct claim *claim_tmp = NULL;
142 int index;
143
144 if (!hash)
145 return NULL;
146
147 index = choose_claim(data, hash->size);
148 head = &hash->table[index];
149
150 rcu_read_lock();
151 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
152 if (!compare_claim(&claim->hash_entry, data))
153 continue;
154
155 if (!atomic_inc_not_zero(&claim->refcount))
156 continue;
157
158 claim_tmp = claim;
159 break;
160 }
161 rcu_read_unlock();
162
163 return claim_tmp;
164}
165
166/**
167 * @bat_priv: the bat priv with all the soft interface information
168 * @addr: the address of the originator
169 * @vid: the VLAN ID
170 *
171 * looks for a claim in the hash, and returns it if found
172 * or NULL otherwise.
173 */
174static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
175 uint8_t *addr, short vid)
176{
177 struct hashtable_t *hash = bat_priv->backbone_hash;
178 struct hlist_head *head;
179 struct hlist_node *node;
180 struct backbone_gw search_entry, *backbone_gw;
181 struct backbone_gw *backbone_gw_tmp = NULL;
182 int index;
183
184 if (!hash)
185 return NULL;
186
187 memcpy(search_entry.orig, addr, ETH_ALEN);
188 search_entry.vid = vid;
189
190 index = choose_backbone_gw(&search_entry, hash->size);
191 head = &hash->table[index];
192
193 rcu_read_lock();
194 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
195 if (!compare_backbone_gw(&backbone_gw->hash_entry,
196 &search_entry))
197 continue;
198
199 if (!atomic_inc_not_zero(&backbone_gw->refcount))
200 continue;
201
202 backbone_gw_tmp = backbone_gw;
203 break;
204 }
205 rcu_read_unlock();
206
207 return backbone_gw_tmp;
208}
209
210/* delete all claims for a backbone */
211static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
212{
213 struct hashtable_t *hash;
214 struct hlist_node *node, *node_tmp;
215 struct hlist_head *head;
216 struct claim *claim;
217 int i;
218 spinlock_t *list_lock; /* protects write access to the hash lists */
219
220 hash = backbone_gw->bat_priv->claim_hash;
221 if (!hash)
222 return;
223
224 for (i = 0; i < hash->size; i++) {
225 head = &hash->table[i];
226 list_lock = &hash->list_locks[i];
227
228 spin_lock_bh(list_lock);
229 hlist_for_each_entry_safe(claim, node, node_tmp,
230 head, hash_entry) {
231
232 if (claim->backbone_gw != backbone_gw)
233 continue;
234
235 claim_free_ref(claim);
236 hlist_del_rcu(node);
237 }
238 spin_unlock_bh(list_lock);
239 }
240
241 /* all claims gone, intialize CRC */
242 backbone_gw->crc = BLA_CRC_INIT;
243}
244
245/**
246 * @bat_priv: the bat priv with all the soft interface information
247 * @orig: the mac address to be announced within the claim
248 * @vid: the VLAN ID
249 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
250 *
251 * sends a claim frame according to the provided info.
252 */
253static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
254 short vid, int claimtype)
255{
256 struct sk_buff *skb;
257 struct ethhdr *ethhdr;
258 struct hard_iface *primary_if;
259 struct net_device *soft_iface;
260 uint8_t *hw_src;
261 struct bla_claim_dst local_claim_dest;
262 uint32_t zeroip = 0;
263
264 primary_if = primary_if_get_selected(bat_priv);
265 if (!primary_if)
266 return;
267
268 memcpy(&local_claim_dest, claim_dest, sizeof(local_claim_dest));
269 local_claim_dest.type = claimtype;
270
271 soft_iface = primary_if->soft_iface;
272
273 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
274 /* IP DST: 0.0.0.0 */
275 zeroip,
276 primary_if->soft_iface,
277 /* IP SRC: 0.0.0.0 */
278 zeroip,
279 /* Ethernet DST: Broadcast */
280 NULL,
281 /* Ethernet SRC/HW SRC: originator mac */
282 primary_if->net_dev->dev_addr,
283 /* HW DST: FF:43:05:XX:00:00
284 * with XX = claim type
285 */
286 (uint8_t *)&local_claim_dest);
287
288 if (!skb)
289 goto out;
290
291 ethhdr = (struct ethhdr *)skb->data;
292 hw_src = (uint8_t *)ethhdr +
293 sizeof(struct ethhdr) +
294 sizeof(struct arphdr);
295
296 /* now we pretend that the client would have sent this ... */
297 switch (claimtype) {
298 case CLAIM_TYPE_ADD:
299 /* normal claim frame
300 * set Ethernet SRC to the clients mac
301 */
302 memcpy(ethhdr->h_source, mac, ETH_ALEN);
303 bat_dbg(DBG_BLA, bat_priv,
304 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
305 break;
306 case CLAIM_TYPE_DEL:
307 /* unclaim frame
308 * set HW SRC to the clients mac
309 */
310 memcpy(hw_src, mac, ETH_ALEN);
311 bat_dbg(DBG_BLA, bat_priv,
312 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
313 break;
314 case CLAIM_TYPE_ANNOUNCE:
315 /* announcement frame
316 * set HW SRC to the special mac containg the crc
317 */
318 memcpy(hw_src, mac, ETH_ALEN);
319 bat_dbg(DBG_BLA, bat_priv,
320 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
321 ethhdr->h_source, vid);
322 break;
323 case CLAIM_TYPE_REQUEST:
324 /* request frame
325 * set HW SRC to the special mac containg the crc
326 */
327 memcpy(hw_src, mac, ETH_ALEN);
328 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
329 bat_dbg(DBG_BLA, bat_priv,
330 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
331 ethhdr->h_source, ethhdr->h_dest, vid);
332 break;
333
334 }
335
336 if (vid != -1)
337 skb = vlan_insert_tag(skb, vid);
338
339 skb_reset_mac_header(skb);
340 skb->protocol = eth_type_trans(skb, soft_iface);
341 bat_priv->stats.rx_packets++;
342 bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
343 soft_iface->last_rx = jiffies;
344
345 netif_rx(skb);
346out:
347 if (primary_if)
348 hardif_free_ref(primary_if);
349}
350
351/**
352 * @bat_priv: the bat priv with all the soft interface information
353 * @orig: the mac address of the originator
354 * @vid: the VLAN ID
355 *
356 * searches for the backbone gw or creates a new one if it could not
357 * be found.
358 */
359static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
360 uint8_t *orig, short vid)
361{
362 struct backbone_gw *entry;
Simon Wunderlich20ff9d52012-01-22 20:00:23 +0100363 struct orig_node *orig_node;
Simon Wunderlich23721382012-01-22 20:00:19 +0100364 int hash_added;
365
366 entry = backbone_hash_find(bat_priv, orig, vid);
367
368 if (entry)
369 return entry;
370
371 bat_dbg(DBG_BLA, bat_priv,
372 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
373 orig, vid);
374
375 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
376 if (!entry)
377 return NULL;
378
379 entry->vid = vid;
380 entry->lasttime = jiffies;
381 entry->crc = BLA_CRC_INIT;
382 entry->bat_priv = bat_priv;
383 atomic_set(&entry->request_sent, 0);
384 memcpy(entry->orig, orig, ETH_ALEN);
385
386 /* one for the hash, one for returning */
387 atomic_set(&entry->refcount, 2);
388
389 hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
390 choose_backbone_gw, entry, &entry->hash_entry);
391
392 if (unlikely(hash_added != 0)) {
393 /* hash failed, free the structure */
394 kfree(entry);
395 return NULL;
396 }
397
Simon Wunderlich20ff9d52012-01-22 20:00:23 +0100398 /* this is a gateway now, remove any tt entries */
399 orig_node = orig_hash_find(bat_priv, orig);
400 if (orig_node) {
401 tt_global_del_orig(bat_priv, orig_node,
402 "became a backbone gateway");
403 orig_node_free_ref(orig_node);
404 }
Simon Wunderlich23721382012-01-22 20:00:19 +0100405 return entry;
406}
407
408/* update or add the own backbone gw to make sure we announce
409 * where we receive other backbone gws
410 */
411static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
412 struct hard_iface *primary_if,
413 short vid)
414{
415 struct backbone_gw *backbone_gw;
416
417 backbone_gw = bla_get_backbone_gw(bat_priv,
418 primary_if->net_dev->dev_addr, vid);
419 if (unlikely(!backbone_gw))
420 return;
421
422 backbone_gw->lasttime = jiffies;
423 backbone_gw_free_ref(backbone_gw);
424}
425
426/**
427 * @bat_priv: the bat priv with all the soft interface information
428 * @vid: the vid where the request came on
429 *
430 * Repeat all of our own claims, and finally send an ANNOUNCE frame
431 * to allow the requester another check if the CRC is correct now.
432 */
433static void bla_answer_request(struct bat_priv *bat_priv,
434 struct hard_iface *primary_if, short vid)
435{
436 struct hlist_node *node;
437 struct hlist_head *head;
438 struct hashtable_t *hash;
439 struct claim *claim;
440 struct backbone_gw *backbone_gw;
441 int i;
442
443 bat_dbg(DBG_BLA, bat_priv,
444 "bla_answer_request(): received a claim request, send all of our own claims again\n");
445
446 backbone_gw = backbone_hash_find(bat_priv,
447 primary_if->net_dev->dev_addr, vid);
448 if (!backbone_gw)
449 return;
450
451 hash = bat_priv->claim_hash;
452 for (i = 0; i < hash->size; i++) {
453 head = &hash->table[i];
454
455 rcu_read_lock();
456 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
457 /* only own claims are interesting */
458 if (claim->backbone_gw != backbone_gw)
459 continue;
460
461 bla_send_claim(bat_priv, claim->addr, claim->vid,
462 CLAIM_TYPE_ADD);
463 }
464 rcu_read_unlock();
465 }
466
467 /* finally, send an announcement frame */
468 bla_send_announce(bat_priv, backbone_gw);
469 backbone_gw_free_ref(backbone_gw);
470}
471
472/**
473 * @backbone_gw: the backbone gateway from whom we are out of sync
474 *
475 * When the crc is wrong, ask the backbone gateway for a full table update.
476 * After the request, it will repeat all of his own claims and finally
477 * send an announcement claim with which we can check again.
478 */
479static void bla_send_request(struct backbone_gw *backbone_gw)
480{
481 /* first, remove all old entries */
482 bla_del_backbone_claims(backbone_gw);
483
484 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
485 "Sending REQUEST to %pM\n",
486 backbone_gw->orig);
487
488 /* send request */
489 bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
490 backbone_gw->vid, CLAIM_TYPE_REQUEST);
491
492 /* no local broadcasts should be sent or received, for now. */
493 if (!atomic_read(&backbone_gw->request_sent)) {
494 atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
495 atomic_set(&backbone_gw->request_sent, 1);
496 }
497}
498
499/**
500 * @bat_priv: the bat priv with all the soft interface information
501 * @backbone_gw: our backbone gateway which should be announced
502 *
503 * This function sends an announcement. It is called from multiple
504 * places.
505 */
506static void bla_send_announce(struct bat_priv *bat_priv,
507 struct backbone_gw *backbone_gw)
508{
509 uint8_t mac[ETH_ALEN];
510 uint16_t crc;
511
512 memcpy(mac, announce_mac, 4);
513 crc = htons(backbone_gw->crc);
514 memcpy(&mac[4], (uint8_t *)&crc, 2);
515
516 bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
517
518}
519
520/**
521 * @bat_priv: the bat priv with all the soft interface information
522 * @mac: the mac address of the claim
523 * @vid: the VLAN ID of the frame
524 * @backbone_gw: the backbone gateway which claims it
525 *
526 * Adds a claim in the claim hash.
527 */
528static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
529 const short vid, struct backbone_gw *backbone_gw)
530{
531 struct claim *claim;
532 struct claim search_claim;
533 int hash_added;
534
535 memcpy(search_claim.addr, mac, ETH_ALEN);
536 search_claim.vid = vid;
537 claim = claim_hash_find(bat_priv, &search_claim);
538
539 /* create a new claim entry if it does not exist yet. */
540 if (!claim) {
541 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
542 if (!claim)
543 return;
544
545 memcpy(claim->addr, mac, ETH_ALEN);
546 claim->vid = vid;
547 claim->lasttime = jiffies;
548 claim->backbone_gw = backbone_gw;
549
550 atomic_set(&claim->refcount, 2);
551 bat_dbg(DBG_BLA, bat_priv,
552 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
553 mac, vid);
554 hash_added = hash_add(bat_priv->claim_hash, compare_claim,
555 choose_claim, claim, &claim->hash_entry);
556
557 if (unlikely(hash_added != 0)) {
558 /* only local changes happened. */
559 kfree(claim);
560 return;
561 }
562 } else {
563 claim->lasttime = jiffies;
564 if (claim->backbone_gw == backbone_gw)
565 /* no need to register a new backbone */
566 goto claim_free_ref;
567
568 bat_dbg(DBG_BLA, bat_priv,
569 "bla_add_claim(): changing ownership for %pM, vid %d\n",
570 mac, vid);
571
572 claim->backbone_gw->crc ^=
573 crc16(0, claim->addr, ETH_ALEN);
574 backbone_gw_free_ref(claim->backbone_gw);
575
576 }
577 /* set (new) backbone gw */
578 atomic_inc(&backbone_gw->refcount);
579 claim->backbone_gw = backbone_gw;
580
581 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
582 backbone_gw->lasttime = jiffies;
583
584claim_free_ref:
585 claim_free_ref(claim);
586}
587
588/* Delete a claim from the claim hash which has the
589 * given mac address and vid.
590 */
591static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
592 const short vid)
593{
594 struct claim search_claim, *claim;
595
596 memcpy(search_claim.addr, mac, ETH_ALEN);
597 search_claim.vid = vid;
598 claim = claim_hash_find(bat_priv, &search_claim);
599 if (!claim)
600 return;
601
602 bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
603
604 hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
605 claim_free_ref(claim); /* reference from the hash is gone */
606
607 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
608
609 /* don't need the reference from hash_find() anymore */
610 claim_free_ref(claim);
611}
612
613/* check for ANNOUNCE frame, return 1 if handled */
614static int handle_announce(struct bat_priv *bat_priv,
615 uint8_t *an_addr, uint8_t *backbone_addr, short vid)
616{
617 struct backbone_gw *backbone_gw;
618 uint16_t crc;
619
620 if (memcmp(an_addr, announce_mac, 4) != 0)
621 return 0;
622
623 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
624
625 if (unlikely(!backbone_gw))
626 return 1;
627
628
629 /* handle as ANNOUNCE frame */
630 backbone_gw->lasttime = jiffies;
631 crc = ntohs(*((uint16_t *)(&an_addr[4])));
632
633 bat_dbg(DBG_BLA, bat_priv,
634 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
635 vid, backbone_gw->orig, crc);
636
637 if (backbone_gw->crc != crc) {
638 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
639 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
640 backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
641 crc);
642
643 bla_send_request(backbone_gw);
644 } else {
645 /* if we have sent a request and the crc was OK,
646 * we can allow traffic again.
647 */
648 if (atomic_read(&backbone_gw->request_sent)) {
649 atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
650 atomic_set(&backbone_gw->request_sent, 0);
651 }
652 }
653
654 backbone_gw_free_ref(backbone_gw);
655 return 1;
656}
657
658/* check for REQUEST frame, return 1 if handled */
659static int handle_request(struct bat_priv *bat_priv,
660 struct hard_iface *primary_if,
661 uint8_t *backbone_addr,
662 struct ethhdr *ethhdr, short vid)
663{
664 /* check for REQUEST frame */
665 if (!compare_eth(backbone_addr, ethhdr->h_dest))
666 return 0;
667
668 /* sanity check, this should not happen on a normal switch,
669 * we ignore it in this case.
670 */
671 if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
672 return 1;
673
674 bat_dbg(DBG_BLA, bat_priv,
675 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
676 vid, ethhdr->h_source);
677
678 bla_answer_request(bat_priv, primary_if, vid);
679 return 1;
680}
681
682/* check for UNCLAIM frame, return 1 if handled */
683static int handle_unclaim(struct bat_priv *bat_priv,
684 struct hard_iface *primary_if,
685 uint8_t *backbone_addr,
686 uint8_t *claim_addr, short vid)
687{
688 struct backbone_gw *backbone_gw;
689
690 /* unclaim in any case if it is our own */
691 if (primary_if && compare_eth(backbone_addr,
692 primary_if->net_dev->dev_addr))
693 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
694
695 backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
696
697 if (!backbone_gw)
698 return 1;
699
700 /* this must be an UNCLAIM frame */
701 bat_dbg(DBG_BLA, bat_priv,
702 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
703 claim_addr, vid, backbone_gw->orig);
704
705 bla_del_claim(bat_priv, claim_addr, vid);
706 backbone_gw_free_ref(backbone_gw);
707 return 1;
708}
709
710/* check for CLAIM frame, return 1 if handled */
711static int handle_claim(struct bat_priv *bat_priv,
712 struct hard_iface *primary_if, uint8_t *backbone_addr,
713 uint8_t *claim_addr, short vid)
714{
715 struct backbone_gw *backbone_gw;
716
717 /* register the gateway if not yet available, and add the claim. */
718
719 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
720
721 if (unlikely(!backbone_gw))
722 return 1;
723
724 /* this must be a CLAIM frame */
725 bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
726 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
727 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
728
729 /* TODO: we could call something like tt_local_del() here. */
730
731 backbone_gw_free_ref(backbone_gw);
732 return 1;
733}
734
735/**
736 * @bat_priv: the bat priv with all the soft interface information
737 * @skb: the frame to be checked
738 *
739 * Check if this is a claim frame, and process it accordingly.
740 *
741 * returns 1 if it was a claim frame, otherwise return 0 to
742 * tell the callee that it can use the frame on its own.
743 */
744static int bla_process_claim(struct bat_priv *bat_priv,
745 struct hard_iface *primary_if,
746 struct sk_buff *skb)
747{
748 struct ethhdr *ethhdr;
749 struct vlan_ethhdr *vhdr;
750 struct arphdr *arphdr;
751 uint8_t *hw_src, *hw_dst;
752 struct bla_claim_dst *bla_dst;
753 uint16_t proto;
754 int headlen;
755 short vid = -1;
756
757 ethhdr = (struct ethhdr *)skb_mac_header(skb);
758
759 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
760 vhdr = (struct vlan_ethhdr *)ethhdr;
761 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
762 proto = ntohs(vhdr->h_vlan_encapsulated_proto);
763 headlen = sizeof(*vhdr);
764 } else {
765 proto = ntohs(ethhdr->h_proto);
766 headlen = sizeof(*ethhdr);
767 }
768
769 if (proto != ETH_P_ARP)
770 return 0; /* not a claim frame */
771
772 /* this must be a ARP frame. check if it is a claim. */
773
774 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
775 return 0;
776
777 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
778 ethhdr = (struct ethhdr *)skb_mac_header(skb);
779 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
780
781 /* Check whether the ARP frame carries a valid
782 * IP information
783 */
784
785 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
786 return 0;
787 if (arphdr->ar_pro != htons(ETH_P_IP))
788 return 0;
789 if (arphdr->ar_hln != ETH_ALEN)
790 return 0;
791 if (arphdr->ar_pln != 4)
792 return 0;
793
794 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
795 hw_dst = hw_src + ETH_ALEN + 4;
796 bla_dst = (struct bla_claim_dst *)hw_dst;
797
798 /* check if it is a claim frame. */
799 if (memcmp(hw_dst, claim_dest, 3) != 0)
800 return 0;
801
802 /* become a backbone gw ourselves on this vlan if not happened yet */
803 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
804
805 /* check for the different types of claim frames ... */
806 switch (bla_dst->type) {
807 case CLAIM_TYPE_ADD:
808 if (handle_claim(bat_priv, primary_if, hw_src,
809 ethhdr->h_source, vid))
810 return 1;
811 break;
812 case CLAIM_TYPE_DEL:
813 if (handle_unclaim(bat_priv, primary_if,
814 ethhdr->h_source, hw_src, vid))
815 return 1;
816 break;
817
818 case CLAIM_TYPE_ANNOUNCE:
819 if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
820 return 1;
821 break;
822 case CLAIM_TYPE_REQUEST:
823 if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
824 return 1;
825 break;
826 }
827
828 bat_dbg(DBG_BLA, bat_priv,
829 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
830 ethhdr->h_source, vid, hw_src, hw_dst);
831 return 1;
832}
833
834/* Check when we last heard from other nodes, and remove them in case of
835 * a time out, or clean all backbone gws if now is set.
836 */
837static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
838{
839 struct backbone_gw *backbone_gw;
840 struct hlist_node *node, *node_tmp;
841 struct hlist_head *head;
842 struct hashtable_t *hash;
843 spinlock_t *list_lock; /* protects write access to the hash lists */
844 int i;
845
846 hash = bat_priv->backbone_hash;
847 if (!hash)
848 return;
849
850 for (i = 0; i < hash->size; i++) {
851 head = &hash->table[i];
852 list_lock = &hash->list_locks[i];
853
854 spin_lock_bh(list_lock);
855 hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
856 head, hash_entry) {
857 if (now)
858 goto purge_now;
859 if (!has_timed_out(backbone_gw->lasttime,
860 BLA_BACKBONE_TIMEOUT))
861 continue;
862
863 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
864 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
865 backbone_gw->orig);
866
867purge_now:
868 /* don't wait for the pending request anymore */
869 if (atomic_read(&backbone_gw->request_sent))
870 atomic_dec(&bat_priv->bla_num_requests);
871
872 bla_del_backbone_claims(backbone_gw);
873
874 hlist_del_rcu(node);
875 backbone_gw_free_ref(backbone_gw);
876 }
877 spin_unlock_bh(list_lock);
878 }
879}
880
881/**
882 * @bat_priv: the bat priv with all the soft interface information
883 * @primary_if: the selected primary interface, may be NULL if now is set
884 * @now: whether the whole hash shall be wiped now
885 *
886 * Check when we heard last time from our own claims, and remove them in case of
887 * a time out, or clean all claims if now is set
888 */
889static void bla_purge_claims(struct bat_priv *bat_priv,
890 struct hard_iface *primary_if, int now)
891{
892 struct claim *claim;
893 struct hlist_node *node;
894 struct hlist_head *head;
895 struct hashtable_t *hash;
896 int i;
897
898 hash = bat_priv->claim_hash;
899 if (!hash)
900 return;
901
902 for (i = 0; i < hash->size; i++) {
903 head = &hash->table[i];
904
905 rcu_read_lock();
906 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
907 if (now)
908 goto purge_now;
909 if (!compare_eth(claim->backbone_gw->orig,
910 primary_if->net_dev->dev_addr))
911 continue;
912 if (!has_timed_out(claim->lasttime,
913 BLA_CLAIM_TIMEOUT))
914 continue;
915
916 bat_dbg(DBG_BLA, bat_priv,
917 "bla_purge_claims(): %pM, vid %d, time out\n",
918 claim->addr, claim->vid);
919
920purge_now:
921 handle_unclaim(bat_priv, primary_if,
922 claim->backbone_gw->orig,
923 claim->addr, claim->vid);
924 }
925 rcu_read_unlock();
926 }
927}
928
929/**
930 * @bat_priv: the bat priv with all the soft interface information
931 * @primary_if: the new selected primary_if
932 * @oldif: the old primary interface, may be NULL
933 *
934 * Update the backbone gateways when the own orig address changes.
935 *
936 */
937void bla_update_orig_address(struct bat_priv *bat_priv,
938 struct hard_iface *primary_if,
939 struct hard_iface *oldif)
940{
941 struct backbone_gw *backbone_gw;
942 struct hlist_node *node;
943 struct hlist_head *head;
944 struct hashtable_t *hash;
945 int i;
946
947 if (!oldif) {
948 bla_purge_claims(bat_priv, NULL, 1);
949 bla_purge_backbone_gw(bat_priv, 1);
950 return;
951 }
952
953 hash = bat_priv->backbone_hash;
954 if (!hash)
955 return;
956
957 for (i = 0; i < hash->size; i++) {
958 head = &hash->table[i];
959
960 rcu_read_lock();
961 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
962 /* own orig still holds the old value. */
963 if (!compare_eth(backbone_gw->orig,
964 oldif->net_dev->dev_addr))
965 continue;
966
967 memcpy(backbone_gw->orig,
968 primary_if->net_dev->dev_addr, ETH_ALEN);
969 /* send an announce frame so others will ask for our
970 * claims and update their tables.
971 */
972 bla_send_announce(bat_priv, backbone_gw);
973 }
974 rcu_read_unlock();
975 }
976}
977
978
979
980/* (re)start the timer */
981static void bla_start_timer(struct bat_priv *bat_priv)
982{
983 INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
984 queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
985 msecs_to_jiffies(BLA_PERIOD_LENGTH));
986}
987
988/* periodic work to do:
989 * * purge structures when they are too old
990 * * send announcements
991 */
992static void bla_periodic_work(struct work_struct *work)
993{
994 struct delayed_work *delayed_work =
995 container_of(work, struct delayed_work, work);
996 struct bat_priv *bat_priv =
997 container_of(delayed_work, struct bat_priv, bla_work);
998 struct hlist_node *node;
999 struct hlist_head *head;
1000 struct backbone_gw *backbone_gw;
1001 struct hashtable_t *hash;
1002 struct hard_iface *primary_if;
1003 int i;
1004
1005 primary_if = primary_if_get_selected(bat_priv);
1006 if (!primary_if)
1007 goto out;
1008
1009 bla_purge_claims(bat_priv, primary_if, 0);
1010 bla_purge_backbone_gw(bat_priv, 0);
1011
1012 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1013 goto out;
1014
1015 hash = bat_priv->backbone_hash;
1016 if (!hash)
1017 goto out;
1018
1019 for (i = 0; i < hash->size; i++) {
1020 head = &hash->table[i];
1021
1022 rcu_read_lock();
1023 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1024 if (!compare_eth(backbone_gw->orig,
1025 primary_if->net_dev->dev_addr))
1026 continue;
1027
1028 backbone_gw->lasttime = jiffies;
1029
1030 bla_send_announce(bat_priv, backbone_gw);
1031 }
1032 rcu_read_unlock();
1033 }
1034out:
1035 if (primary_if)
1036 hardif_free_ref(primary_if);
1037
1038 bla_start_timer(bat_priv);
1039}
1040
1041/* initialize all bla structures */
1042int bla_init(struct bat_priv *bat_priv)
1043{
Simon Wunderlichfe2da6f2012-01-22 20:00:24 +01001044 int i;
1045
Simon Wunderlich23721382012-01-22 20:00:19 +01001046 bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
1047
Simon Wunderlichfe2da6f2012-01-22 20:00:24 +01001048 /* initialize the duplicate list */
1049 for (i = 0; i < DUPLIST_SIZE; i++)
1050 bat_priv->bcast_duplist[i].entrytime =
1051 jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
1052 bat_priv->bcast_duplist_curr = 0;
1053
Simon Wunderlich23721382012-01-22 20:00:19 +01001054 if (bat_priv->claim_hash)
1055 return 1;
1056
1057 bat_priv->claim_hash = hash_new(128);
1058 bat_priv->backbone_hash = hash_new(32);
1059
1060 if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
1061 return -1;
1062
1063 bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
1064
1065 bla_start_timer(bat_priv);
1066 return 1;
1067}
1068
1069/**
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001070 * @bat_priv: the bat priv with all the soft interface information
Simon Wunderlichfe2da6f2012-01-22 20:00:24 +01001071 * @bcast_packet: originator mac address
1072 * @hdr_size: maximum length of the frame
1073 *
1074 * check if it is on our broadcast list. Another gateway might
1075 * have sent the same packet because it is connected to the same backbone,
1076 * so we have to remove this duplicate.
1077 *
1078 * This is performed by checking the CRC, which will tell us
1079 * with a good chance that it is the same packet. If it is furthermore
1080 * sent by another host, drop it. We allow equal packets from
1081 * the same host however as this might be intended.
1082 *
1083 **/
1084
1085int bla_check_bcast_duplist(struct bat_priv *bat_priv,
1086 struct bcast_packet *bcast_packet,
1087 int hdr_size)
1088{
1089 int i, length, curr;
1090 uint8_t *content;
1091 uint16_t crc;
1092 struct bcast_duplist_entry *entry;
1093
1094 length = hdr_size - sizeof(*bcast_packet);
1095 content = (uint8_t *)bcast_packet;
1096 content += sizeof(*bcast_packet);
1097
1098 /* calculate the crc ... */
1099 crc = crc16(0, content, length);
1100
1101 for (i = 0 ; i < DUPLIST_SIZE; i++) {
1102 curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
1103 entry = &bat_priv->bcast_duplist[curr];
1104
1105 /* we can stop searching if the entry is too old ;
1106 * later entries will be even older
1107 */
1108 if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
1109 break;
1110
1111 if (entry->crc != crc)
1112 continue;
1113
1114 if (compare_eth(entry->orig, bcast_packet->orig))
1115 continue;
1116
1117 /* this entry seems to match: same crc, not too old,
1118 * and from another gw. therefore return 1 to forbid it.
1119 */
1120 return 1;
1121 }
1122 /* not found, add a new entry (overwrite the oldest entry) */
1123 curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
1124 entry = &bat_priv->bcast_duplist[curr];
1125 entry->crc = crc;
1126 entry->entrytime = jiffies;
1127 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
1128 bat_priv->bcast_duplist_curr = curr;
1129
1130 /* allow it, its the first occurence. */
1131 return 0;
1132}
1133
1134
1135
1136/**
1137 * @bat_priv: the bat priv with all the soft interface information
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001138 * @orig: originator mac address
1139 *
1140 * check if the originator is a gateway for any VLAN ID.
1141 *
1142 * returns 1 if it is found, 0 otherwise
1143 *
1144 */
1145
1146int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1147{
1148 struct hashtable_t *hash = bat_priv->backbone_hash;
1149 struct hlist_head *head;
1150 struct hlist_node *node;
1151 struct backbone_gw *backbone_gw;
1152 int i;
1153
1154 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1155 return 0;
1156
1157 if (!hash)
1158 return 0;
1159
1160 for (i = 0; i < hash->size; i++) {
1161 head = &hash->table[i];
1162
1163 rcu_read_lock();
1164 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1165 if (compare_eth(backbone_gw->orig, orig)) {
1166 rcu_read_unlock();
1167 return 1;
1168 }
1169 }
1170 rcu_read_unlock();
1171 }
1172
1173 return 0;
1174}
1175
1176
1177/**
Simon Wunderlich23721382012-01-22 20:00:19 +01001178 * @skb: the frame to be checked
1179 * @orig_node: the orig_node of the frame
1180 * @hdr_size: maximum length of the frame
1181 *
1182 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1183 * if the orig_node is also a gateway on the soft interface, otherwise it
1184 * returns 0.
1185 *
1186 */
1187int bla_is_backbone_gw(struct sk_buff *skb,
1188 struct orig_node *orig_node, int hdr_size)
1189{
1190 struct ethhdr *ethhdr;
1191 struct vlan_ethhdr *vhdr;
1192 struct backbone_gw *backbone_gw;
1193 short vid = -1;
1194
1195 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1196 return 0;
1197
1198 /* first, find out the vid. */
1199 if (!pskb_may_pull(skb, hdr_size + sizeof(struct ethhdr)))
1200 return 0;
1201
1202 ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
1203
1204 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
1205 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
1206 return 0;
1207
1208 vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
1209 hdr_size);
1210 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1211 }
1212
1213 /* see if this originator is a backbone gw for this VLAN */
1214
1215 backbone_gw = backbone_hash_find(orig_node->bat_priv,
1216 orig_node->orig, vid);
1217 if (!backbone_gw)
1218 return 0;
1219
1220 backbone_gw_free_ref(backbone_gw);
1221 return 1;
1222}
1223
1224/* free all bla structures (for softinterface free or module unload) */
1225void bla_free(struct bat_priv *bat_priv)
1226{
1227 struct hard_iface *primary_if;
1228
1229 cancel_delayed_work_sync(&bat_priv->bla_work);
1230 primary_if = primary_if_get_selected(bat_priv);
1231
1232 if (bat_priv->claim_hash) {
1233 bla_purge_claims(bat_priv, primary_if, 1);
1234 hash_destroy(bat_priv->claim_hash);
1235 bat_priv->claim_hash = NULL;
1236 }
1237 if (bat_priv->backbone_hash) {
1238 bla_purge_backbone_gw(bat_priv, 1);
1239 hash_destroy(bat_priv->backbone_hash);
1240 bat_priv->backbone_hash = NULL;
1241 }
1242 if (primary_if)
1243 hardif_free_ref(primary_if);
1244}
1245
1246/**
1247 * @bat_priv: the bat priv with all the soft interface information
1248 * @skb: the frame to be checked
1249 * @vid: the VLAN ID of the frame
1250 *
1251 * bla_rx avoidance checks if:
1252 * * we have to race for a claim
1253 * * if the frame is allowed on the LAN
1254 *
1255 * in these cases, the skb is further handled by this function and
1256 * returns 1, otherwise it returns 0 and the caller shall further
1257 * process the skb.
1258 *
1259 */
1260int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1261{
1262 struct ethhdr *ethhdr;
1263 struct claim search_claim, *claim = NULL;
1264 struct hard_iface *primary_if;
1265 int ret;
1266
1267 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1268
1269 primary_if = primary_if_get_selected(bat_priv);
1270 if (!primary_if)
1271 goto handled;
1272
1273 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1274 goto allow;
1275
1276
1277 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1278 /* don't allow broadcasts while requests are in flight */
1279 if (is_multicast_ether_addr(ethhdr->h_dest))
1280 goto handled;
1281
1282 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1283 search_claim.vid = vid;
1284 claim = claim_hash_find(bat_priv, &search_claim);
1285
1286 if (!claim) {
1287 /* possible optimization: race for a claim */
1288 /* No claim exists yet, claim it for us!
1289 */
1290 handle_claim(bat_priv, primary_if,
1291 primary_if->net_dev->dev_addr,
1292 ethhdr->h_source, vid);
1293 goto allow;
1294 }
1295
1296 /* if it is our own claim ... */
1297 if (compare_eth(claim->backbone_gw->orig,
1298 primary_if->net_dev->dev_addr)) {
1299 /* ... allow it in any case */
1300 claim->lasttime = jiffies;
1301 goto allow;
1302 }
1303
1304 /* if it is a broadcast ... */
1305 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1306 /* ... drop it. the responsible gateway is in charge. */
1307 goto handled;
1308 } else {
1309 /* seems the client considers us as its best gateway.
1310 * send a claim and update the claim table
1311 * immediately.
1312 */
1313 handle_claim(bat_priv, primary_if,
1314 primary_if->net_dev->dev_addr,
1315 ethhdr->h_source, vid);
1316 goto allow;
1317 }
1318allow:
1319 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1320 ret = 0;
1321 goto out;
1322
1323handled:
1324 kfree_skb(skb);
1325 ret = 1;
1326
1327out:
1328 if (primary_if)
1329 hardif_free_ref(primary_if);
1330 if (claim)
1331 claim_free_ref(claim);
1332 return ret;
1333}
1334
1335/**
1336 * @bat_priv: the bat priv with all the soft interface information
1337 * @skb: the frame to be checked
1338 * @vid: the VLAN ID of the frame
1339 *
1340 * bla_tx checks if:
1341 * * a claim was received which has to be processed
1342 * * the frame is allowed on the mesh
1343 *
1344 * in these cases, the skb is further handled by this function and
1345 * returns 1, otherwise it returns 0 and the caller shall further
1346 * process the skb.
1347 *
1348 */
1349int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1350{
1351 struct ethhdr *ethhdr;
1352 struct claim search_claim, *claim = NULL;
1353 struct hard_iface *primary_if;
1354 int ret = 0;
1355
1356 primary_if = primary_if_get_selected(bat_priv);
1357 if (!primary_if)
1358 goto out;
1359
1360 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1361 goto allow;
1362
1363 /* in VLAN case, the mac header might not be set. */
1364 skb_reset_mac_header(skb);
1365
1366 if (bla_process_claim(bat_priv, primary_if, skb))
1367 goto handled;
1368
1369 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1370
1371 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1372 /* don't allow broadcasts while requests are in flight */
1373 if (is_multicast_ether_addr(ethhdr->h_dest))
1374 goto handled;
1375
1376 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1377 search_claim.vid = vid;
1378
1379 claim = claim_hash_find(bat_priv, &search_claim);
1380
1381 /* if no claim exists, allow it. */
1382 if (!claim)
1383 goto allow;
1384
1385 /* check if we are responsible. */
1386 if (compare_eth(claim->backbone_gw->orig,
1387 primary_if->net_dev->dev_addr)) {
1388 /* if yes, the client has roamed and we have
1389 * to unclaim it.
1390 */
1391 handle_unclaim(bat_priv, primary_if,
1392 primary_if->net_dev->dev_addr,
1393 ethhdr->h_source, vid);
1394 goto allow;
1395 }
1396
1397 /* check if it is a multicast/broadcast frame */
1398 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1399 /* drop it. the responsible gateway has forwarded it into
1400 * the backbone network.
1401 */
1402 goto handled;
1403 } else {
1404 /* we must allow it. at least if we are
1405 * responsible for the DESTINATION.
1406 */
1407 goto allow;
1408 }
1409allow:
1410 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1411 ret = 0;
1412 goto out;
1413handled:
1414 ret = 1;
1415out:
1416 if (primary_if)
1417 hardif_free_ref(primary_if);
1418 if (claim)
1419 claim_free_ref(claim);
1420 return ret;
1421}
Simon Wunderlich9bf8e4d2012-01-22 20:00:21 +01001422
1423int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1424{
1425 struct net_device *net_dev = (struct net_device *)seq->private;
1426 struct bat_priv *bat_priv = netdev_priv(net_dev);
1427 struct hashtable_t *hash = bat_priv->claim_hash;
1428 struct claim *claim;
1429 struct hard_iface *primary_if;
1430 struct hlist_node *node;
1431 struct hlist_head *head;
1432 uint32_t i;
1433 bool is_own;
1434 int ret = 0;
1435
1436 primary_if = primary_if_get_selected(bat_priv);
1437 if (!primary_if) {
1438 ret = seq_printf(seq,
1439 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1440 net_dev->name);
1441 goto out;
1442 }
1443
1444 if (primary_if->if_status != IF_ACTIVE) {
1445 ret = seq_printf(seq,
1446 "BATMAN mesh %s disabled - primary interface not active\n",
1447 net_dev->name);
1448 goto out;
1449 }
1450
1451 seq_printf(seq, "Claims announced for the mesh %s (orig %pM)\n",
1452 net_dev->name, primary_if->net_dev->dev_addr);
1453 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
1454 "Client", "VID", "Originator", "CRC");
1455 for (i = 0; i < hash->size; i++) {
1456 head = &hash->table[i];
1457
1458 rcu_read_lock();
1459 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
1460 is_own = compare_eth(claim->backbone_gw->orig,
1461 primary_if->net_dev->dev_addr);
1462 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
1463 claim->addr, claim->vid,
1464 claim->backbone_gw->orig,
1465 (is_own ? 'x' : ' '),
1466 claim->backbone_gw->crc);
1467 }
1468 rcu_read_unlock();
1469 }
1470out:
1471 if (primary_if)
1472 hardif_free_ref(primary_if);
1473 return ret;
1474}