blob: 5c32314f8279be3d2f1fe880779526077913bbb0 [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann64afe352011-01-27 10:38:15 +01002 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22/* increase the reference counter for this originator */
23
24#include "main.h"
25#include "originator.h"
26#include "hash.h"
27#include "translation-table.h"
28#include "routing.h"
29#include "gateway_client.h"
30#include "hard-interface.h"
31#include "unicast.h"
32#include "soft-interface.h"
33
34static void purge_orig(struct work_struct *work);
35
36static void start_purge_timer(struct bat_priv *bat_priv)
37{
38 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
39 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
40}
41
42int originator_init(struct bat_priv *bat_priv)
43{
44 if (bat_priv->orig_hash)
45 return 1;
46
47 spin_lock_bh(&bat_priv->orig_hash_lock);
48 bat_priv->orig_hash = hash_new(1024);
49
50 if (!bat_priv->orig_hash)
51 goto err;
52
53 spin_unlock_bh(&bat_priv->orig_hash_lock);
54 start_purge_timer(bat_priv);
55 return 1;
56
57err:
58 spin_unlock_bh(&bat_priv->orig_hash_lock);
59 return 0;
60}
61
Marek Lindnera8e7f4b2010-12-12 21:57:10 +000062void neigh_node_free_ref(struct kref *refcount)
63{
64 struct neigh_node *neigh_node;
65
66 neigh_node = container_of(refcount, struct neigh_node, refcount);
67 kfree(neigh_node);
68}
69
Marek Lindnerf987ed62010-12-12 21:57:12 +000070static void neigh_node_free_rcu(struct rcu_head *rcu)
71{
72 struct neigh_node *neigh_node;
73
74 neigh_node = container_of(rcu, struct neigh_node, rcu);
75 kref_put(&neigh_node->refcount, neigh_node_free_ref);
76}
77
Marek Lindnera8e7f4b2010-12-12 21:57:10 +000078struct neigh_node *create_neighbor(struct orig_node *orig_node,
79 struct orig_node *orig_neigh_node,
80 uint8_t *neigh,
81 struct batman_if *if_incoming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000082{
83 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
84 struct neigh_node *neigh_node;
85
86 bat_dbg(DBG_BATMAN, bat_priv,
87 "Creating new last-hop neighbor of originator\n");
88
89 neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
90 if (!neigh_node)
91 return NULL;
92
Marek Lindner9591a792010-12-12 21:57:11 +000093 INIT_HLIST_NODE(&neigh_node->list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000094
95 memcpy(neigh_node->addr, neigh, ETH_ALEN);
96 neigh_node->orig_node = orig_neigh_node;
97 neigh_node->if_incoming = if_incoming;
Marek Lindnera8e7f4b2010-12-12 21:57:10 +000098 kref_init(&neigh_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000099
Marek Lindnerf987ed62010-12-12 21:57:12 +0000100 spin_lock_bh(&orig_node->neigh_list_lock);
101 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
102 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000103 return neigh_node;
104}
105
106static void free_orig_node(void *data, void *arg)
107{
Marek Lindner9591a792010-12-12 21:57:11 +0000108 struct hlist_node *node, *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000109 struct neigh_node *neigh_node;
110 struct orig_node *orig_node = (struct orig_node *)data;
111 struct bat_priv *bat_priv = (struct bat_priv *)arg;
112
Marek Lindnerf987ed62010-12-12 21:57:12 +0000113 spin_lock_bh(&orig_node->neigh_list_lock);
114
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000115 /* for all neighbors towards this originator ... */
Marek Lindner9591a792010-12-12 21:57:11 +0000116 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
117 &orig_node->neigh_list, list) {
Marek Lindnerf987ed62010-12-12 21:57:12 +0000118 hlist_del_rcu(&neigh_node->list);
119 call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000120 }
121
Marek Lindnerf987ed62010-12-12 21:57:12 +0000122 spin_unlock_bh(&orig_node->neigh_list_lock);
123
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000124 frag_list_free(&orig_node->frag_list);
125 hna_global_del_orig(bat_priv, orig_node, "originator timed out");
126
127 kfree(orig_node->bcast_own);
128 kfree(orig_node->bcast_own_sum);
129 kfree(orig_node);
130}
131
132void originator_free(struct bat_priv *bat_priv)
133{
134 if (!bat_priv->orig_hash)
135 return;
136
137 cancel_delayed_work_sync(&bat_priv->orig_work);
138
139 spin_lock_bh(&bat_priv->orig_hash_lock);
140 hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
141 bat_priv->orig_hash = NULL;
142 spin_unlock_bh(&bat_priv->orig_hash_lock);
143}
144
145/* this function finds or creates an originator entry for the given
146 * address if it does not exits */
147struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
148{
149 struct orig_node *orig_node;
150 int size;
151 int hash_added;
152
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000153 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000154 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
155 compare_orig, choose_orig,
156 addr));
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000157 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000158
159 if (orig_node)
160 return orig_node;
161
162 bat_dbg(DBG_BATMAN, bat_priv,
163 "Creating new originator: %pM\n", addr);
164
165 orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
166 if (!orig_node)
167 return NULL;
168
Marek Lindner9591a792010-12-12 21:57:11 +0000169 INIT_HLIST_HEAD(&orig_node->neigh_list);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000170 spin_lock_init(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000171
172 memcpy(orig_node->orig, addr, ETH_ALEN);
173 orig_node->router = NULL;
174 orig_node->hna_buff = NULL;
175 orig_node->bcast_seqno_reset = jiffies - 1
176 - msecs_to_jiffies(RESET_PROTECTION_MS);
177 orig_node->batman_seqno_reset = jiffies - 1
178 - msecs_to_jiffies(RESET_PROTECTION_MS);
179
180 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
181
182 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
183 if (!orig_node->bcast_own)
184 goto free_orig_node;
185
186 size = bat_priv->num_ifaces * sizeof(uint8_t);
187 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
188
189 INIT_LIST_HEAD(&orig_node->frag_list);
190 orig_node->last_frag_packet = 0;
191
192 if (!orig_node->bcast_own_sum)
193 goto free_bcast_own;
194
195 hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig,
196 orig_node);
197 if (hash_added < 0)
198 goto free_bcast_own_sum;
199
200 return orig_node;
201free_bcast_own_sum:
202 kfree(orig_node->bcast_own_sum);
203free_bcast_own:
204 kfree(orig_node->bcast_own);
205free_orig_node:
206 kfree(orig_node);
207 return NULL;
208}
209
210static bool purge_orig_neighbors(struct bat_priv *bat_priv,
211 struct orig_node *orig_node,
212 struct neigh_node **best_neigh_node)
213{
Marek Lindner9591a792010-12-12 21:57:11 +0000214 struct hlist_node *node, *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000215 struct neigh_node *neigh_node;
216 bool neigh_purged = false;
217
218 *best_neigh_node = NULL;
219
Marek Lindnerf987ed62010-12-12 21:57:12 +0000220 spin_lock_bh(&orig_node->neigh_list_lock);
221
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000222 /* for all neighbors towards this originator ... */
Marek Lindner9591a792010-12-12 21:57:11 +0000223 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
224 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000225
226 if ((time_after(jiffies,
227 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
228 (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
Marek Lindner1a241a52011-01-19 19:16:10 +0000229 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000230 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
231
Marek Lindner1a241a52011-01-19 19:16:10 +0000232 if ((neigh_node->if_incoming->if_status ==
233 IF_INACTIVE) ||
234 (neigh_node->if_incoming->if_status ==
235 IF_NOT_IN_USE) ||
236 (neigh_node->if_incoming->if_status ==
237 IF_TO_BE_REMOVED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000238 bat_dbg(DBG_BATMAN, bat_priv,
239 "neighbor purge: originator %pM, "
240 "neighbor: %pM, iface: %s\n",
241 orig_node->orig, neigh_node->addr,
242 neigh_node->if_incoming->net_dev->name);
243 else
244 bat_dbg(DBG_BATMAN, bat_priv,
245 "neighbor timeout: originator %pM, "
246 "neighbor: %pM, last_valid: %lu\n",
247 orig_node->orig, neigh_node->addr,
248 (neigh_node->last_valid / HZ));
249
250 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +0000251
Marek Lindnerf987ed62010-12-12 21:57:12 +0000252 hlist_del_rcu(&neigh_node->list);
253 call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000254 } else {
255 if ((!*best_neigh_node) ||
256 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
257 *best_neigh_node = neigh_node;
258 }
259 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000260
261 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000262 return neigh_purged;
263}
264
265static bool purge_orig_node(struct bat_priv *bat_priv,
266 struct orig_node *orig_node)
267{
268 struct neigh_node *best_neigh_node;
269
270 if (time_after(jiffies,
271 orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
272
273 bat_dbg(DBG_BATMAN, bat_priv,
274 "Originator timeout: originator %pM, last_valid %lu\n",
275 orig_node->orig, (orig_node->last_valid / HZ));
276 return true;
277 } else {
278 if (purge_orig_neighbors(bat_priv, orig_node,
279 &best_neigh_node)) {
280 update_routes(bat_priv, orig_node,
281 best_neigh_node,
282 orig_node->hna_buff,
283 orig_node->hna_buff_len);
284 /* update bonding candidates, we could have lost
285 * some candidates. */
Simon Wunderlich74ef1152010-12-29 16:15:19 +0000286 update_bonding_candidates(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000287 }
288 }
289
290 return false;
291}
292
293static void _purge_orig(struct bat_priv *bat_priv)
294{
295 struct hashtable_t *hash = bat_priv->orig_hash;
296 struct hlist_node *walk, *safe;
297 struct hlist_head *head;
298 struct element_t *bucket;
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000299 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000300 struct orig_node *orig_node;
301 int i;
302
303 if (!hash)
304 return;
305
306 spin_lock_bh(&bat_priv->orig_hash_lock);
307
308 /* for all origins... */
309 for (i = 0; i < hash->size; i++) {
310 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000311 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000312
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000313 spin_lock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000314 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
315 orig_node = bucket->data;
316
317 if (purge_orig_node(bat_priv, orig_node)) {
318 if (orig_node->gw_flags)
319 gw_node_delete(bat_priv, orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000320 hlist_del_rcu(walk);
321 call_rcu(&bucket->rcu, bucket_free_rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000322 free_orig_node(orig_node, bat_priv);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000323 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000324 }
325
326 if (time_after(jiffies, orig_node->last_frag_packet +
327 msecs_to_jiffies(FRAG_TIMEOUT)))
328 frag_list_free(&orig_node->frag_list);
329 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000330 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000331 }
332
333 spin_unlock_bh(&bat_priv->orig_hash_lock);
334
335 gw_node_purge(bat_priv);
336 gw_election(bat_priv);
337
338 softif_neigh_purge(bat_priv);
339}
340
341static void purge_orig(struct work_struct *work)
342{
343 struct delayed_work *delayed_work =
344 container_of(work, struct delayed_work, work);
345 struct bat_priv *bat_priv =
346 container_of(delayed_work, struct bat_priv, orig_work);
347
348 _purge_orig(bat_priv);
349 start_purge_timer(bat_priv);
350}
351
352void purge_orig_ref(struct bat_priv *bat_priv)
353{
354 _purge_orig(bat_priv);
355}
356
357int orig_seq_print_text(struct seq_file *seq, void *offset)
358{
359 struct net_device *net_dev = (struct net_device *)seq->private;
360 struct bat_priv *bat_priv = netdev_priv(net_dev);
361 struct hashtable_t *hash = bat_priv->orig_hash;
Marek Lindner9591a792010-12-12 21:57:11 +0000362 struct hlist_node *walk, *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000363 struct hlist_head *head;
364 struct element_t *bucket;
365 struct orig_node *orig_node;
366 struct neigh_node *neigh_node;
367 int batman_count = 0;
368 int last_seen_secs;
369 int last_seen_msecs;
370 int i;
371
372 if ((!bat_priv->primary_if) ||
373 (bat_priv->primary_if->if_status != IF_ACTIVE)) {
374 if (!bat_priv->primary_if)
375 return seq_printf(seq, "BATMAN mesh %s disabled - "
376 "please specify interfaces to enable it\n",
377 net_dev->name);
378
379 return seq_printf(seq, "BATMAN mesh %s "
380 "disabled - primary interface not active\n",
381 net_dev->name);
382 }
383
384 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
385 SOURCE_VERSION, REVISION_VERSION_STR,
386 bat_priv->primary_if->net_dev->name,
387 bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
388 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
389 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
390 "outgoingIF", "Potential nexthops");
391
392 spin_lock_bh(&bat_priv->orig_hash_lock);
393
394 for (i = 0; i < hash->size; i++) {
395 head = &hash->table[i];
396
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000397 rcu_read_lock();
398 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000399 orig_node = bucket->data;
400
401 if (!orig_node->router)
402 continue;
403
404 if (orig_node->router->tq_avg == 0)
405 continue;
406
407 last_seen_secs = jiffies_to_msecs(jiffies -
408 orig_node->last_valid) / 1000;
409 last_seen_msecs = jiffies_to_msecs(jiffies -
410 orig_node->last_valid) % 1000;
411
412 neigh_node = orig_node->router;
413 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
414 orig_node->orig, last_seen_secs,
415 last_seen_msecs, neigh_node->tq_avg,
416 neigh_node->addr,
417 neigh_node->if_incoming->net_dev->name);
418
Marek Lindnerf987ed62010-12-12 21:57:12 +0000419 hlist_for_each_entry_rcu(neigh_node, node,
420 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000421 seq_printf(seq, " %pM (%3i)", neigh_node->addr,
422 neigh_node->tq_avg);
423 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000424
425 seq_printf(seq, "\n");
426 batman_count++;
427 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000428 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000429 }
430
431 spin_unlock_bh(&bat_priv->orig_hash_lock);
432
433 if ((batman_count == 0))
434 seq_printf(seq, "No batman nodes in range ...\n");
435
436 return 0;
437}
438
439static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
440{
441 void *data_ptr;
442
443 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
444 GFP_ATOMIC);
445 if (!data_ptr) {
446 pr_err("Can't resize orig: out of memory\n");
447 return -1;
448 }
449
450 memcpy(data_ptr, orig_node->bcast_own,
451 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
452 kfree(orig_node->bcast_own);
453 orig_node->bcast_own = data_ptr;
454
455 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
456 if (!data_ptr) {
457 pr_err("Can't resize orig: out of memory\n");
458 return -1;
459 }
460
461 memcpy(data_ptr, orig_node->bcast_own_sum,
462 (max_if_num - 1) * sizeof(uint8_t));
463 kfree(orig_node->bcast_own_sum);
464 orig_node->bcast_own_sum = data_ptr;
465
466 return 0;
467}
468
469int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
470{
471 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
472 struct hashtable_t *hash = bat_priv->orig_hash;
473 struct hlist_node *walk;
474 struct hlist_head *head;
475 struct element_t *bucket;
476 struct orig_node *orig_node;
477 int i;
478
479 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
480 * if_num */
481 spin_lock_bh(&bat_priv->orig_hash_lock);
482
483 for (i = 0; i < hash->size; i++) {
484 head = &hash->table[i];
485
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000486 rcu_read_lock();
487 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000488 orig_node = bucket->data;
489
490 if (orig_node_add_if(orig_node, max_if_num) == -1)
491 goto err;
492 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000493 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000494 }
495
496 spin_unlock_bh(&bat_priv->orig_hash_lock);
497 return 0;
498
499err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000500 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000501 spin_unlock_bh(&bat_priv->orig_hash_lock);
502 return -ENOMEM;
503}
504
505static int orig_node_del_if(struct orig_node *orig_node,
506 int max_if_num, int del_if_num)
507{
508 void *data_ptr = NULL;
509 int chunk_size;
510
511 /* last interface was removed */
512 if (max_if_num == 0)
513 goto free_bcast_own;
514
515 chunk_size = sizeof(unsigned long) * NUM_WORDS;
516 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
517 if (!data_ptr) {
518 pr_err("Can't resize orig: out of memory\n");
519 return -1;
520 }
521
522 /* copy first part */
523 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
524
525 /* copy second part */
526 memcpy(data_ptr + del_if_num * chunk_size,
527 orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
528 (max_if_num - del_if_num) * chunk_size);
529
530free_bcast_own:
531 kfree(orig_node->bcast_own);
532 orig_node->bcast_own = data_ptr;
533
534 if (max_if_num == 0)
535 goto free_own_sum;
536
537 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
538 if (!data_ptr) {
539 pr_err("Can't resize orig: out of memory\n");
540 return -1;
541 }
542
543 memcpy(data_ptr, orig_node->bcast_own_sum,
544 del_if_num * sizeof(uint8_t));
545
546 memcpy(data_ptr + del_if_num * sizeof(uint8_t),
547 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
548 (max_if_num - del_if_num) * sizeof(uint8_t));
549
550free_own_sum:
551 kfree(orig_node->bcast_own_sum);
552 orig_node->bcast_own_sum = data_ptr;
553
554 return 0;
555}
556
557int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
558{
559 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
560 struct hashtable_t *hash = bat_priv->orig_hash;
561 struct hlist_node *walk;
562 struct hlist_head *head;
563 struct element_t *bucket;
564 struct batman_if *batman_if_tmp;
565 struct orig_node *orig_node;
566 int i, ret;
567
568 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
569 * if_num */
570 spin_lock_bh(&bat_priv->orig_hash_lock);
571
572 for (i = 0; i < hash->size; i++) {
573 head = &hash->table[i];
574
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000575 rcu_read_lock();
576 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000577 orig_node = bucket->data;
578
579 ret = orig_node_del_if(orig_node, max_if_num,
580 batman_if->if_num);
581
582 if (ret == -1)
583 goto err;
584 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000585 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000586 }
587
588 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
589 rcu_read_lock();
590 list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
591 if (batman_if_tmp->if_status == IF_NOT_IN_USE)
592 continue;
593
594 if (batman_if == batman_if_tmp)
595 continue;
596
597 if (batman_if->soft_iface != batman_if_tmp->soft_iface)
598 continue;
599
600 if (batman_if_tmp->if_num > batman_if->if_num)
601 batman_if_tmp->if_num--;
602 }
603 rcu_read_unlock();
604
605 batman_if->if_num = -1;
606 spin_unlock_bh(&bat_priv->orig_hash_lock);
607 return 0;
608
609err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000610 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000611 spin_unlock_bh(&bat_priv->orig_hash_lock);
612 return -ENOMEM;
613}