blob: b1b1773afa0b26e045c4d59589831b9d7b0e2573 [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann64afe352011-01-27 10:38:15 +01002 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22/* increase the reference counter for this originator */
23
24#include "main.h"
25#include "originator.h"
26#include "hash.h"
27#include "translation-table.h"
28#include "routing.h"
29#include "gateway_client.h"
30#include "hard-interface.h"
31#include "unicast.h"
32#include "soft-interface.h"
33
34static void purge_orig(struct work_struct *work);
35
36static void start_purge_timer(struct bat_priv *bat_priv)
37{
38 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
39 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
40}
41
42int originator_init(struct bat_priv *bat_priv)
43{
44 if (bat_priv->orig_hash)
45 return 1;
46
47 spin_lock_bh(&bat_priv->orig_hash_lock);
48 bat_priv->orig_hash = hash_new(1024);
49
50 if (!bat_priv->orig_hash)
51 goto err;
52
53 spin_unlock_bh(&bat_priv->orig_hash_lock);
54 start_purge_timer(bat_priv);
55 return 1;
56
57err:
58 spin_unlock_bh(&bat_priv->orig_hash_lock);
59 return 0;
60}
61
Marek Lindnera8e7f4b2010-12-12 21:57:10 +000062void neigh_node_free_ref(struct kref *refcount)
63{
64 struct neigh_node *neigh_node;
65
66 neigh_node = container_of(refcount, struct neigh_node, refcount);
67 kfree(neigh_node);
68}
69
70struct neigh_node *create_neighbor(struct orig_node *orig_node,
71 struct orig_node *orig_neigh_node,
72 uint8_t *neigh,
73 struct batman_if *if_incoming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000074{
75 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
76 struct neigh_node *neigh_node;
77
78 bat_dbg(DBG_BATMAN, bat_priv,
79 "Creating new last-hop neighbor of originator\n");
80
81 neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
82 if (!neigh_node)
83 return NULL;
84
85 INIT_LIST_HEAD(&neigh_node->list);
86
87 memcpy(neigh_node->addr, neigh, ETH_ALEN);
88 neigh_node->orig_node = orig_neigh_node;
89 neigh_node->if_incoming = if_incoming;
Marek Lindnera8e7f4b2010-12-12 21:57:10 +000090 kref_init(&neigh_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000091
92 list_add_tail(&neigh_node->list, &orig_node->neigh_list);
93 return neigh_node;
94}
95
96static void free_orig_node(void *data, void *arg)
97{
98 struct list_head *list_pos, *list_pos_tmp;
99 struct neigh_node *neigh_node;
100 struct orig_node *orig_node = (struct orig_node *)data;
101 struct bat_priv *bat_priv = (struct bat_priv *)arg;
102
103 /* for all neighbors towards this originator ... */
104 list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
105 neigh_node = list_entry(list_pos, struct neigh_node, list);
106
107 list_del(list_pos);
Marek Lindnera8e7f4b2010-12-12 21:57:10 +0000108 kref_put(&neigh_node->refcount, neigh_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000109 }
110
111 frag_list_free(&orig_node->frag_list);
112 hna_global_del_orig(bat_priv, orig_node, "originator timed out");
113
114 kfree(orig_node->bcast_own);
115 kfree(orig_node->bcast_own_sum);
116 kfree(orig_node);
117}
118
119void originator_free(struct bat_priv *bat_priv)
120{
121 if (!bat_priv->orig_hash)
122 return;
123
124 cancel_delayed_work_sync(&bat_priv->orig_work);
125
126 spin_lock_bh(&bat_priv->orig_hash_lock);
127 hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
128 bat_priv->orig_hash = NULL;
129 spin_unlock_bh(&bat_priv->orig_hash_lock);
130}
131
132/* this function finds or creates an originator entry for the given
133 * address if it does not exits */
134struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
135{
136 struct orig_node *orig_node;
137 int size;
138 int hash_added;
139
140 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
141 compare_orig, choose_orig,
142 addr));
143
144 if (orig_node)
145 return orig_node;
146
147 bat_dbg(DBG_BATMAN, bat_priv,
148 "Creating new originator: %pM\n", addr);
149
150 orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
151 if (!orig_node)
152 return NULL;
153
154 INIT_LIST_HEAD(&orig_node->neigh_list);
155
156 memcpy(orig_node->orig, addr, ETH_ALEN);
157 orig_node->router = NULL;
158 orig_node->hna_buff = NULL;
159 orig_node->bcast_seqno_reset = jiffies - 1
160 - msecs_to_jiffies(RESET_PROTECTION_MS);
161 orig_node->batman_seqno_reset = jiffies - 1
162 - msecs_to_jiffies(RESET_PROTECTION_MS);
163
164 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
165
166 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
167 if (!orig_node->bcast_own)
168 goto free_orig_node;
169
170 size = bat_priv->num_ifaces * sizeof(uint8_t);
171 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
172
173 INIT_LIST_HEAD(&orig_node->frag_list);
174 orig_node->last_frag_packet = 0;
175
176 if (!orig_node->bcast_own_sum)
177 goto free_bcast_own;
178
179 hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig,
180 orig_node);
181 if (hash_added < 0)
182 goto free_bcast_own_sum;
183
184 return orig_node;
185free_bcast_own_sum:
186 kfree(orig_node->bcast_own_sum);
187free_bcast_own:
188 kfree(orig_node->bcast_own);
189free_orig_node:
190 kfree(orig_node);
191 return NULL;
192}
193
194static bool purge_orig_neighbors(struct bat_priv *bat_priv,
195 struct orig_node *orig_node,
196 struct neigh_node **best_neigh_node)
197{
198 struct list_head *list_pos, *list_pos_tmp;
199 struct neigh_node *neigh_node;
200 bool neigh_purged = false;
201
202 *best_neigh_node = NULL;
203
204 /* for all neighbors towards this originator ... */
205 list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
206 neigh_node = list_entry(list_pos, struct neigh_node, list);
207
208 if ((time_after(jiffies,
209 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
210 (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
211 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
212
213 if (neigh_node->if_incoming->if_status ==
214 IF_TO_BE_REMOVED)
215 bat_dbg(DBG_BATMAN, bat_priv,
216 "neighbor purge: originator %pM, "
217 "neighbor: %pM, iface: %s\n",
218 orig_node->orig, neigh_node->addr,
219 neigh_node->if_incoming->net_dev->name);
220 else
221 bat_dbg(DBG_BATMAN, bat_priv,
222 "neighbor timeout: originator %pM, "
223 "neighbor: %pM, last_valid: %lu\n",
224 orig_node->orig, neigh_node->addr,
225 (neigh_node->last_valid / HZ));
226
227 neigh_purged = true;
228 list_del(list_pos);
Marek Lindnera8e7f4b2010-12-12 21:57:10 +0000229 kref_put(&neigh_node->refcount, neigh_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000230 } else {
231 if ((!*best_neigh_node) ||
232 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
233 *best_neigh_node = neigh_node;
234 }
235 }
236 return neigh_purged;
237}
238
239static bool purge_orig_node(struct bat_priv *bat_priv,
240 struct orig_node *orig_node)
241{
242 struct neigh_node *best_neigh_node;
243
244 if (time_after(jiffies,
245 orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
246
247 bat_dbg(DBG_BATMAN, bat_priv,
248 "Originator timeout: originator %pM, last_valid %lu\n",
249 orig_node->orig, (orig_node->last_valid / HZ));
250 return true;
251 } else {
252 if (purge_orig_neighbors(bat_priv, orig_node,
253 &best_neigh_node)) {
254 update_routes(bat_priv, orig_node,
255 best_neigh_node,
256 orig_node->hna_buff,
257 orig_node->hna_buff_len);
258 /* update bonding candidates, we could have lost
259 * some candidates. */
Simon Wunderlich74ef1152010-12-29 16:15:19 +0000260 update_bonding_candidates(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000261 }
262 }
263
264 return false;
265}
266
267static void _purge_orig(struct bat_priv *bat_priv)
268{
269 struct hashtable_t *hash = bat_priv->orig_hash;
270 struct hlist_node *walk, *safe;
271 struct hlist_head *head;
272 struct element_t *bucket;
273 struct orig_node *orig_node;
274 int i;
275
276 if (!hash)
277 return;
278
279 spin_lock_bh(&bat_priv->orig_hash_lock);
280
281 /* for all origins... */
282 for (i = 0; i < hash->size; i++) {
283 head = &hash->table[i];
284
285 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
286 orig_node = bucket->data;
287
288 if (purge_orig_node(bat_priv, orig_node)) {
289 if (orig_node->gw_flags)
290 gw_node_delete(bat_priv, orig_node);
291 hlist_del(walk);
292 kfree(bucket);
293 free_orig_node(orig_node, bat_priv);
294 }
295
296 if (time_after(jiffies, orig_node->last_frag_packet +
297 msecs_to_jiffies(FRAG_TIMEOUT)))
298 frag_list_free(&orig_node->frag_list);
299 }
300 }
301
302 spin_unlock_bh(&bat_priv->orig_hash_lock);
303
304 gw_node_purge(bat_priv);
305 gw_election(bat_priv);
306
307 softif_neigh_purge(bat_priv);
308}
309
310static void purge_orig(struct work_struct *work)
311{
312 struct delayed_work *delayed_work =
313 container_of(work, struct delayed_work, work);
314 struct bat_priv *bat_priv =
315 container_of(delayed_work, struct bat_priv, orig_work);
316
317 _purge_orig(bat_priv);
318 start_purge_timer(bat_priv);
319}
320
321void purge_orig_ref(struct bat_priv *bat_priv)
322{
323 _purge_orig(bat_priv);
324}
325
326int orig_seq_print_text(struct seq_file *seq, void *offset)
327{
328 struct net_device *net_dev = (struct net_device *)seq->private;
329 struct bat_priv *bat_priv = netdev_priv(net_dev);
330 struct hashtable_t *hash = bat_priv->orig_hash;
331 struct hlist_node *walk;
332 struct hlist_head *head;
333 struct element_t *bucket;
334 struct orig_node *orig_node;
335 struct neigh_node *neigh_node;
336 int batman_count = 0;
337 int last_seen_secs;
338 int last_seen_msecs;
339 int i;
340
341 if ((!bat_priv->primary_if) ||
342 (bat_priv->primary_if->if_status != IF_ACTIVE)) {
343 if (!bat_priv->primary_if)
344 return seq_printf(seq, "BATMAN mesh %s disabled - "
345 "please specify interfaces to enable it\n",
346 net_dev->name);
347
348 return seq_printf(seq, "BATMAN mesh %s "
349 "disabled - primary interface not active\n",
350 net_dev->name);
351 }
352
353 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
354 SOURCE_VERSION, REVISION_VERSION_STR,
355 bat_priv->primary_if->net_dev->name,
356 bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
357 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
358 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
359 "outgoingIF", "Potential nexthops");
360
361 spin_lock_bh(&bat_priv->orig_hash_lock);
362
363 for (i = 0; i < hash->size; i++) {
364 head = &hash->table[i];
365
366 hlist_for_each_entry(bucket, walk, head, hlist) {
367 orig_node = bucket->data;
368
369 if (!orig_node->router)
370 continue;
371
372 if (orig_node->router->tq_avg == 0)
373 continue;
374
375 last_seen_secs = jiffies_to_msecs(jiffies -
376 orig_node->last_valid) / 1000;
377 last_seen_msecs = jiffies_to_msecs(jiffies -
378 orig_node->last_valid) % 1000;
379
380 neigh_node = orig_node->router;
381 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
382 orig_node->orig, last_seen_secs,
383 last_seen_msecs, neigh_node->tq_avg,
384 neigh_node->addr,
385 neigh_node->if_incoming->net_dev->name);
386
387 list_for_each_entry(neigh_node, &orig_node->neigh_list,
388 list) {
389 seq_printf(seq, " %pM (%3i)", neigh_node->addr,
390 neigh_node->tq_avg);
391 }
392
393 seq_printf(seq, "\n");
394 batman_count++;
395 }
396 }
397
398 spin_unlock_bh(&bat_priv->orig_hash_lock);
399
400 if ((batman_count == 0))
401 seq_printf(seq, "No batman nodes in range ...\n");
402
403 return 0;
404}
405
406static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
407{
408 void *data_ptr;
409
410 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
411 GFP_ATOMIC);
412 if (!data_ptr) {
413 pr_err("Can't resize orig: out of memory\n");
414 return -1;
415 }
416
417 memcpy(data_ptr, orig_node->bcast_own,
418 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
419 kfree(orig_node->bcast_own);
420 orig_node->bcast_own = data_ptr;
421
422 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
423 if (!data_ptr) {
424 pr_err("Can't resize orig: out of memory\n");
425 return -1;
426 }
427
428 memcpy(data_ptr, orig_node->bcast_own_sum,
429 (max_if_num - 1) * sizeof(uint8_t));
430 kfree(orig_node->bcast_own_sum);
431 orig_node->bcast_own_sum = data_ptr;
432
433 return 0;
434}
435
436int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
437{
438 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
439 struct hashtable_t *hash = bat_priv->orig_hash;
440 struct hlist_node *walk;
441 struct hlist_head *head;
442 struct element_t *bucket;
443 struct orig_node *orig_node;
444 int i;
445
446 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
447 * if_num */
448 spin_lock_bh(&bat_priv->orig_hash_lock);
449
450 for (i = 0; i < hash->size; i++) {
451 head = &hash->table[i];
452
453 hlist_for_each_entry(bucket, walk, head, hlist) {
454 orig_node = bucket->data;
455
456 if (orig_node_add_if(orig_node, max_if_num) == -1)
457 goto err;
458 }
459 }
460
461 spin_unlock_bh(&bat_priv->orig_hash_lock);
462 return 0;
463
464err:
465 spin_unlock_bh(&bat_priv->orig_hash_lock);
466 return -ENOMEM;
467}
468
469static int orig_node_del_if(struct orig_node *orig_node,
470 int max_if_num, int del_if_num)
471{
472 void *data_ptr = NULL;
473 int chunk_size;
474
475 /* last interface was removed */
476 if (max_if_num == 0)
477 goto free_bcast_own;
478
479 chunk_size = sizeof(unsigned long) * NUM_WORDS;
480 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
481 if (!data_ptr) {
482 pr_err("Can't resize orig: out of memory\n");
483 return -1;
484 }
485
486 /* copy first part */
487 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
488
489 /* copy second part */
490 memcpy(data_ptr + del_if_num * chunk_size,
491 orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
492 (max_if_num - del_if_num) * chunk_size);
493
494free_bcast_own:
495 kfree(orig_node->bcast_own);
496 orig_node->bcast_own = data_ptr;
497
498 if (max_if_num == 0)
499 goto free_own_sum;
500
501 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
502 if (!data_ptr) {
503 pr_err("Can't resize orig: out of memory\n");
504 return -1;
505 }
506
507 memcpy(data_ptr, orig_node->bcast_own_sum,
508 del_if_num * sizeof(uint8_t));
509
510 memcpy(data_ptr + del_if_num * sizeof(uint8_t),
511 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
512 (max_if_num - del_if_num) * sizeof(uint8_t));
513
514free_own_sum:
515 kfree(orig_node->bcast_own_sum);
516 orig_node->bcast_own_sum = data_ptr;
517
518 return 0;
519}
520
521int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
522{
523 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
524 struct hashtable_t *hash = bat_priv->orig_hash;
525 struct hlist_node *walk;
526 struct hlist_head *head;
527 struct element_t *bucket;
528 struct batman_if *batman_if_tmp;
529 struct orig_node *orig_node;
530 int i, ret;
531
532 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
533 * if_num */
534 spin_lock_bh(&bat_priv->orig_hash_lock);
535
536 for (i = 0; i < hash->size; i++) {
537 head = &hash->table[i];
538
539 hlist_for_each_entry(bucket, walk, head, hlist) {
540 orig_node = bucket->data;
541
542 ret = orig_node_del_if(orig_node, max_if_num,
543 batman_if->if_num);
544
545 if (ret == -1)
546 goto err;
547 }
548 }
549
550 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
551 rcu_read_lock();
552 list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
553 if (batman_if_tmp->if_status == IF_NOT_IN_USE)
554 continue;
555
556 if (batman_if == batman_if_tmp)
557 continue;
558
559 if (batman_if->soft_iface != batman_if_tmp->soft_iface)
560 continue;
561
562 if (batman_if_tmp->if_num > batman_if->if_num)
563 batman_if_tmp->if_num--;
564 }
565 rcu_read_unlock();
566
567 batman_if->if_num = -1;
568 spin_unlock_bh(&bat_priv->orig_hash_lock);
569 return 0;
570
571err:
572 spin_unlock_bh(&bat_priv->orig_hash_lock);
573 return -ENOMEM;
574}