blob: 8d1b16eb12d13974ad6cba87c360c0aef5a450a3 [file] [log] [blame]
Antonio Quartulli0b873932013-01-04 03:05:31 +01001/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018 */
19
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000020#include "main.h"
Antonio Quartulli785ea112011-11-23 11:35:44 +010021#include "distributed-arp-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000022#include "originator.h"
23#include "hash.h"
24#include "translation-table.h"
25#include "routing.h"
26#include "gateway_client.h"
27#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000028#include "soft-interface.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010029#include "bridge_loop_avoidance.h"
Martin Hundebølld56b1702013-01-25 11:12:39 +010030#include "network-coding.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020031#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000032
Antonio Quartullidec05072012-11-10 11:00:32 +010033/* hash class keys */
34static struct lock_class_key batadv_orig_hash_lock_class_key;
35
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020036static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000037
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020038/* returns 1 if they are the same originator */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020039int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020040{
Sven Eckelmann56303d32012-06-05 22:31:31 +020041 const void *data1 = container_of(node, struct batadv_orig_node,
42 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020043
44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
45}
46
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020047/**
48 * batadv_orig_node_vlan_get - get an orig_node_vlan object
49 * @orig_node: the originator serving the VLAN
50 * @vid: the VLAN identifier
51 *
52 * Returns the vlan object identified by vid and belonging to orig_node or NULL
53 * if it does not exist.
54 */
55struct batadv_orig_node_vlan *
56batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
57 unsigned short vid)
58{
59 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
60
61 rcu_read_lock();
62 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
63 if (tmp->vid != vid)
64 continue;
65
66 if (!atomic_inc_not_zero(&tmp->refcount))
67 continue;
68
69 vlan = tmp;
70
71 break;
72 }
73 rcu_read_unlock();
74
75 return vlan;
76}
77
78/**
79 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
80 * object
81 * @orig_node: the originator serving the VLAN
82 * @vid: the VLAN identifier
83 *
84 * Returns NULL in case of failure or the vlan object identified by vid and
85 * belonging to orig_node otherwise. The object is created and added to the list
86 * if it does not exist.
87 *
88 * The object is returned with refcounter increased by 1.
89 */
90struct batadv_orig_node_vlan *
91batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
92 unsigned short vid)
93{
94 struct batadv_orig_node_vlan *vlan;
95
96 spin_lock_bh(&orig_node->vlan_list_lock);
97
98 /* first look if an object for this vid already exists */
99 vlan = batadv_orig_node_vlan_get(orig_node, vid);
100 if (vlan)
101 goto out;
102
103 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
104 if (!vlan)
105 goto out;
106
107 atomic_set(&vlan->refcount, 2);
108 vlan->vid = vid;
109
110 list_add_rcu(&vlan->list, &orig_node->vlan_list);
111
112out:
113 spin_unlock_bh(&orig_node->vlan_list_lock);
114
115 return vlan;
116}
117
118/**
119 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
120 * the originator-vlan object
121 * @orig_vlan: the originator-vlan object to release
122 */
123void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
124{
125 if (atomic_dec_and_test(&orig_vlan->refcount))
126 kfree_rcu(orig_vlan, rcu);
127}
128
Sven Eckelmann56303d32012-06-05 22:31:31 +0200129int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000130{
131 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200132 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000133
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200134 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000135
136 if (!bat_priv->orig_hash)
137 goto err;
138
Antonio Quartullidec05072012-11-10 11:00:32 +0100139 batadv_hash_set_lock_class(bat_priv->orig_hash,
140 &batadv_orig_hash_lock_class_key);
141
Antonio Quartulli72414442012-12-25 13:14:37 +0100142 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
143 queue_delayed_work(batadv_event_workqueue,
144 &bat_priv->orig_work,
145 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
146
Sven Eckelmann5346c352012-05-05 13:27:28 +0200147 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000148
149err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200150 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000151}
152
Sven Eckelmann56303d32012-06-05 22:31:31 +0200153void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000154{
Marek Lindner44524fc2011-02-10 14:33:53 +0000155 if (atomic_dec_and_test(&neigh_node->refcount))
Paul E. McKenneyae179ae2011-05-01 23:27:50 -0700156 kfree_rcu(neigh_node, rcu);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000157}
158
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000159/* increases the refcounter of a found router */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200160struct batadv_neigh_node *
161batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000162{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200163 struct batadv_neigh_node *router;
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000164
165 rcu_read_lock();
166 router = rcu_dereference(orig_node->router);
167
168 if (router && !atomic_inc_not_zero(&router->refcount))
169 router = NULL;
170
171 rcu_read_unlock();
172 return router;
173}
174
Antonio Quartulli0538f752013-09-02 12:15:01 +0200175/**
176 * batadv_neigh_node_new - create and init a new neigh_node object
177 * @hard_iface: the interface where the neighbour is connected to
178 * @neigh_addr: the mac address of the neighbour interface
179 * @orig_node: originator object representing the neighbour
180 *
181 * Allocates a new neigh_node object and initialises all the generic fields.
182 * Returns the new object or NULL on failure.
183 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200184struct batadv_neigh_node *
185batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
Antonio Quartulli0538f752013-09-02 12:15:01 +0200186 const uint8_t *neigh_addr,
187 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000188{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200189 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190
Sven Eckelmann704509b2011-05-14 23:14:54 +0200191 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000192 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800193 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000194
Marek Lindner9591a792010-12-12 21:57:11 +0000195 INIT_HLIST_NODE(&neigh_node->list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000196
Marek Lindner7ae8b282012-03-01 15:35:21 +0800197 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
Antonio Quartulli0538f752013-09-02 12:15:01 +0200198 neigh_node->if_incoming = hard_iface;
199 neigh_node->orig_node = orig_node;
200
201 INIT_LIST_HEAD(&neigh_node->bonding_list);
Marek Lindner1605d0d2011-02-18 12:28:11 +0000202
203 /* extra reference for return */
204 atomic_set(&neigh_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000205
Marek Lindner7ae8b282012-03-01 15:35:21 +0800206out:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000207 return neigh_node;
208}
209
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200210static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000211{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800212 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200213 struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
214 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000215
Sven Eckelmann56303d32012-06-05 22:31:31 +0200216 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000217
Marek Lindnerf987ed62010-12-12 21:57:12 +0000218 spin_lock_bh(&orig_node->neigh_list_lock);
219
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000220 /* for all bonding members ... */
221 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
222 &orig_node->bond_list, bonding_list) {
223 list_del_rcu(&neigh_node->bonding_list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200224 batadv_neigh_node_free_ref(neigh_node);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000225 }
226
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000227 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800228 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000229 &orig_node->neigh_list, list) {
Marek Lindnerf987ed62010-12-12 21:57:12 +0000230 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200231 batadv_neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000232 }
233
Marek Lindnerf987ed62010-12-12 21:57:12 +0000234 spin_unlock_bh(&orig_node->neigh_list_lock);
235
Martin Hundebølld56b1702013-01-25 11:12:39 +0100236 /* Free nc_nodes */
237 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
238
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200239 batadv_frag_purge_orig(orig_node, NULL);
240
Antonio Quartulli95fb1302013-08-07 18:28:55 +0200241 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200242 "originator timed out");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000243
Antonio Quartullia73105b2011-04-27 14:27:44 +0200244 kfree(orig_node->tt_buff);
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200245 kfree(orig_node->bat_iv.bcast_own);
246 kfree(orig_node->bat_iv.bcast_own_sum);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000247 kfree(orig_node);
248}
249
Linus LĂĽssing72822222013-04-15 21:43:29 +0800250/**
251 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
252 * schedule an rcu callback for freeing it
253 * @orig_node: the orig node to free
254 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200255void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000256{
257 if (atomic_dec_and_test(&orig_node->refcount))
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200258 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000259}
260
Linus LĂĽssing72822222013-04-15 21:43:29 +0800261/**
262 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
263 * possibly free it (without rcu callback)
264 * @orig_node: the orig node to free
265 */
266void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
267{
268 if (atomic_dec_and_test(&orig_node->refcount))
269 batadv_orig_node_free_rcu(&orig_node->rcu);
270}
271
Sven Eckelmann56303d32012-06-05 22:31:31 +0200272void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000273{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200274 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800275 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000276 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000277 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200278 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200279 uint32_t i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000280
281 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000282 return;
283
284 cancel_delayed_work_sync(&bat_priv->orig_work);
285
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000286 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000287
288 for (i = 0; i < hash->size; i++) {
289 head = &hash->table[i];
290 list_lock = &hash->list_locks[i];
291
292 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800293 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000294 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800295 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200296 batadv_orig_node_free_ref(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000297 }
298 spin_unlock_bh(list_lock);
299 }
300
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200301 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000302}
303
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200304/**
305 * batadv_orig_node_new - creates a new orig_node
306 * @bat_priv: the bat priv with all the soft interface information
307 * @addr: the mac address of the originator
308 *
309 * Creates a new originator object and initialise all the generic fields.
310 * The new object is not added to the originator list.
311 * Returns the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200312 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200313struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200314 const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000315{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200316 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200317 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200318 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200319 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000320
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200321 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
322 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000323
Sven Eckelmann704509b2011-05-14 23:14:54 +0200324 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000325 if (!orig_node)
326 return NULL;
327
Marek Lindner9591a792010-12-12 21:57:11 +0000328 INIT_HLIST_HEAD(&orig_node->neigh_list);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000329 INIT_LIST_HEAD(&orig_node->bond_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200330 INIT_LIST_HEAD(&orig_node->vlan_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000331 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000332 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200333 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200334 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200335 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000336
Martin Hundebølld56b1702013-01-25 11:12:39 +0100337 batadv_nc_init_orig(orig_node);
338
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000339 /* extra reference for return */
340 atomic_set(&orig_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000341
Antonio Quartulli17071572011-11-07 16:36:40 +0100342 orig_node->tt_initialised = false;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000343 orig_node->bat_priv = bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000344 memcpy(orig_node->orig, addr, ETH_ALEN);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100345 batadv_dat_init_orig_node_addr(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000346 orig_node->router = NULL;
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200347 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200348 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200349 orig_node->tt_buff_len = 0;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200350 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
351 orig_node->bcast_seqno_reset = reset_time;
352 orig_node->batman_seqno_reset = reset_time;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000353
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000354 atomic_set(&orig_node->bond_candidates, 0);
355
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200356 /* create a vlan object for the "untagged" LAN */
357 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
358 if (!vlan)
359 goto free_orig_node;
360 /* batadv_orig_node_vlan_new() increases the refcounter.
361 * Immediately release vlan since it is not needed anymore in this
362 * context
363 */
364 batadv_orig_node_vlan_free_ref(vlan);
365
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200366 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
367 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
368 spin_lock_init(&orig_node->fragments[i].lock);
369 orig_node->fragments[i].size = 0;
370 }
371
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000372 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000373free_orig_node:
374 kfree(orig_node);
375 return NULL;
376}
377
Sven Eckelmann56303d32012-06-05 22:31:31 +0200378static bool
379batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
380 struct batadv_orig_node *orig_node,
381 struct batadv_neigh_node **best_neigh_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000382{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800383 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200384 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000385 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +0800386 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200387 struct batadv_hard_iface *if_incoming;
Antonio Quartulli0538f752013-09-02 12:15:01 +0200388 uint8_t best_metric = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000389
390 *best_neigh_node = NULL;
391
Marek Lindnerf987ed62010-12-12 21:57:12 +0000392 spin_lock_bh(&orig_node->neigh_list_lock);
393
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000394 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800395 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000396 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200397 last_seen = neigh_node->last_seen;
398 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000399
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200400 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200401 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
402 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
403 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200404 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
405 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
406 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200407 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200408 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
409 orig_node->orig, neigh_node->addr,
410 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000411 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200412 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200413 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
414 orig_node->orig, neigh_node->addr,
415 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000416
417 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +0000418
Marek Lindnerf987ed62010-12-12 21:57:12 +0000419 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200420 batadv_bonding_candidate_del(orig_node, neigh_node);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200421 batadv_neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000422 } else {
423 if ((!*best_neigh_node) ||
Antonio Quartulli0538f752013-09-02 12:15:01 +0200424 (neigh_node->bat_iv.tq_avg > best_metric)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000425 *best_neigh_node = neigh_node;
Antonio Quartulli0538f752013-09-02 12:15:01 +0200426 best_metric = neigh_node->bat_iv.tq_avg;
427 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000428 }
429 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000430
431 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000432 return neigh_purged;
433}
434
Sven Eckelmann56303d32012-06-05 22:31:31 +0200435static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
436 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000437{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200438 struct batadv_neigh_node *best_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000439
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200440 if (batadv_has_timed_out(orig_node->last_seen,
441 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200442 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200443 "Originator timeout: originator %pM, last_seen %u\n",
444 orig_node->orig,
445 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000446 return true;
447 } else {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200448 if (batadv_purge_orig_neighbors(bat_priv, orig_node,
449 &best_neigh_node))
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200450 batadv_update_route(bat_priv, orig_node,
451 best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000452 }
453
454 return false;
455}
456
Sven Eckelmann56303d32012-06-05 22:31:31 +0200457static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000458{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200459 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800460 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000461 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000462 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200463 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200464 uint32_t i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000465
466 if (!hash)
467 return;
468
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000469 /* for all origins... */
470 for (i = 0; i < hash->size; i++) {
471 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000472 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000473
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000474 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800475 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000476 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200477 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +0800478 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800479 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200480 batadv_orig_node_free_ref(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000481 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000482 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200483
484 batadv_frag_purge_orig(orig_node,
485 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000486 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000487 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000488 }
489
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200490 batadv_gw_node_purge(bat_priv);
491 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000492}
493
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200494static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000495{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200496 struct delayed_work *delayed_work;
497 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000498
Sven Eckelmann56303d32012-06-05 22:31:31 +0200499 delayed_work = container_of(work, struct delayed_work, work);
500 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200501 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +0100502 queue_delayed_work(batadv_event_workqueue,
503 &bat_priv->orig_work,
504 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000505}
506
Sven Eckelmann56303d32012-06-05 22:31:31 +0200507void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000508{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200509 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000510}
511
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200512int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000513{
514 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200515 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200516 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000517
Marek Lindner30da63a2012-08-03 17:15:46 +0200518 primary_if = batadv_seq_print_text_primary_if_get(seq);
519 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200520 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000521
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200522 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200523 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200524 primary_if->net_dev->dev_addr, net_dev->name,
525 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000526
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200527 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000528
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200529 if (!bat_priv->bat_algo_ops->bat_orig_print) {
530 seq_puts(seq,
531 "No printing function for this routing protocol\n");
532 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000533 }
534
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200535 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000536
Marek Lindner30da63a2012-08-03 17:15:46 +0200537 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000538}
539
Sven Eckelmann56303d32012-06-05 22:31:31 +0200540static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
541 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000542{
543 void *data_ptr;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200544 size_t data_size, old_size;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000545
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200546 data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
547 old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
548 data_ptr = kmalloc(data_size, GFP_ATOMIC);
Joe Perches320f4222011-08-29 14:17:24 -0700549 if (!data_ptr)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200550 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000551
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200552 memcpy(data_ptr, orig_node->bat_iv.bcast_own, old_size);
553 kfree(orig_node->bat_iv.bcast_own);
554 orig_node->bat_iv.bcast_own = data_ptr;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000555
556 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
Joe Perches320f4222011-08-29 14:17:24 -0700557 if (!data_ptr)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200558 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000559
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200560 memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000561 (max_if_num - 1) * sizeof(uint8_t));
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200562 kfree(orig_node->bat_iv.bcast_own_sum);
563 orig_node->bat_iv.bcast_own_sum = data_ptr;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000564
565 return 0;
566}
567
Sven Eckelmann56303d32012-06-05 22:31:31 +0200568int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
569 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000570{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200571 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200572 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000573 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200574 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200575 uint32_t i;
576 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000577
578 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200579 * if_num
580 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000581 for (i = 0; i < hash->size; i++) {
582 head = &hash->table[i];
583
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000584 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -0800585 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200586 spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200587 ret = batadv_orig_node_add_if(orig_node, max_if_num);
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200588 spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000589
Sven Eckelmann5346c352012-05-05 13:27:28 +0200590 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000591 goto err;
592 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000593 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000594 }
595
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000596 return 0;
597
598err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000599 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000600 return -ENOMEM;
601}
602
Sven Eckelmann56303d32012-06-05 22:31:31 +0200603static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200604 int max_if_num, int del_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000605{
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200606 int chunk_size, if_offset;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000607 void *data_ptr = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000608
609 /* last interface was removed */
610 if (max_if_num == 0)
611 goto free_bcast_own;
612
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200613 chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000614 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
Joe Perches320f4222011-08-29 14:17:24 -0700615 if (!data_ptr)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200616 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000617
618 /* copy first part */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200619 memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000620
621 /* copy second part */
Sven Eckelmann38e3c5f2011-05-14 23:14:49 +0200622 memcpy((char *)data_ptr + del_if_num * chunk_size,
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200623 orig_node->bat_iv.bcast_own + ((del_if_num + 1) * chunk_size),
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000624 (max_if_num - del_if_num) * chunk_size);
625
626free_bcast_own:
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200627 kfree(orig_node->bat_iv.bcast_own);
628 orig_node->bat_iv.bcast_own = data_ptr;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000629
630 if (max_if_num == 0)
631 goto free_own_sum;
632
633 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
Joe Perches320f4222011-08-29 14:17:24 -0700634 if (!data_ptr)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200635 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000636
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200637 memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000638 del_if_num * sizeof(uint8_t));
639
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200640 if_offset = (del_if_num + 1) * sizeof(uint8_t);
Sven Eckelmann38e3c5f2011-05-14 23:14:49 +0200641 memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200642 orig_node->bat_iv.bcast_own_sum + if_offset,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000643 (max_if_num - del_if_num) * sizeof(uint8_t));
644
645free_own_sum:
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200646 kfree(orig_node->bat_iv.bcast_own_sum);
647 orig_node->bat_iv.bcast_own_sum = data_ptr;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000648
649 return 0;
650}
651
Sven Eckelmann56303d32012-06-05 22:31:31 +0200652int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
653 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000654{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200655 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200656 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000657 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200658 struct batadv_hard_iface *hard_iface_tmp;
659 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200660 uint32_t i;
661 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000662
663 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200664 * if_num
665 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000666 for (i = 0; i < hash->size; i++) {
667 head = &hash->table[i];
668
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000669 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -0800670 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200671 spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200672 ret = batadv_orig_node_del_if(orig_node, max_if_num,
673 hard_iface->if_num);
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200674 spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000675
Sven Eckelmann5346c352012-05-05 13:27:28 +0200676 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000677 goto err;
678 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000679 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000680 }
681
682 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
683 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200684 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200685 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000686 continue;
687
Marek Lindnere6c10f42011-02-18 12:33:20 +0000688 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000689 continue;
690
Marek Lindnere6c10f42011-02-18 12:33:20 +0000691 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000692 continue;
693
Marek Lindnere6c10f42011-02-18 12:33:20 +0000694 if (hard_iface_tmp->if_num > hard_iface->if_num)
695 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000696 }
697 rcu_read_unlock();
698
Marek Lindnere6c10f42011-02-18 12:33:20 +0000699 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000700 return 0;
701
702err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000703 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000704 return -ENOMEM;
705}