blob: 1a4725f5267d68945f06e189c1f8140c6a2b9951 [file] [log] [blame]
Antonio Quartulli0b873932013-01-04 03:05:31 +01001/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "main.h"
Antonio Quartulli785ea112011-11-23 11:35:44 +010019#include "distributed-arp-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000020#include "originator.h"
21#include "hash.h"
22#include "translation-table.h"
23#include "routing.h"
24#include "gateway_client.h"
25#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000026#include "soft-interface.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010027#include "bridge_loop_avoidance.h"
Martin Hundebølld56b1702013-01-25 11:12:39 +010028#include "network-coding.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020029#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000030
Antonio Quartullidec05072012-11-10 11:00:32 +010031/* hash class keys */
32static struct lock_class_key batadv_orig_hash_lock_class_key;
33
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020034static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000035
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020036/* returns 1 if they are the same originator */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020037int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020038{
Sven Eckelmann56303d32012-06-05 22:31:31 +020039 const void *data1 = container_of(node, struct batadv_orig_node,
40 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020041
dingtianhong323813e2013-12-26 19:40:39 +080042 return batadv_compare_eth(data1, data2);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020043}
44
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020045/**
46 * batadv_orig_node_vlan_get - get an orig_node_vlan object
47 * @orig_node: the originator serving the VLAN
48 * @vid: the VLAN identifier
49 *
50 * Returns the vlan object identified by vid and belonging to orig_node or NULL
51 * if it does not exist.
52 */
53struct batadv_orig_node_vlan *
54batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
55 unsigned short vid)
56{
57 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
58
59 rcu_read_lock();
60 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
61 if (tmp->vid != vid)
62 continue;
63
64 if (!atomic_inc_not_zero(&tmp->refcount))
65 continue;
66
67 vlan = tmp;
68
69 break;
70 }
71 rcu_read_unlock();
72
73 return vlan;
74}
75
76/**
77 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
78 * object
79 * @orig_node: the originator serving the VLAN
80 * @vid: the VLAN identifier
81 *
82 * Returns NULL in case of failure or the vlan object identified by vid and
83 * belonging to orig_node otherwise. The object is created and added to the list
84 * if it does not exist.
85 *
86 * The object is returned with refcounter increased by 1.
87 */
88struct batadv_orig_node_vlan *
89batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
90 unsigned short vid)
91{
92 struct batadv_orig_node_vlan *vlan;
93
94 spin_lock_bh(&orig_node->vlan_list_lock);
95
96 /* first look if an object for this vid already exists */
97 vlan = batadv_orig_node_vlan_get(orig_node, vid);
98 if (vlan)
99 goto out;
100
101 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
102 if (!vlan)
103 goto out;
104
105 atomic_set(&vlan->refcount, 2);
106 vlan->vid = vid;
107
108 list_add_rcu(&vlan->list, &orig_node->vlan_list);
109
110out:
111 spin_unlock_bh(&orig_node->vlan_list_lock);
112
113 return vlan;
114}
115
116/**
117 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
118 * the originator-vlan object
119 * @orig_vlan: the originator-vlan object to release
120 */
121void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
122{
123 if (atomic_dec_and_test(&orig_vlan->refcount))
124 kfree_rcu(orig_vlan, rcu);
125}
126
Sven Eckelmann56303d32012-06-05 22:31:31 +0200127int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000128{
129 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200130 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000131
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200132 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000133
134 if (!bat_priv->orig_hash)
135 goto err;
136
Antonio Quartullidec05072012-11-10 11:00:32 +0100137 batadv_hash_set_lock_class(bat_priv->orig_hash,
138 &batadv_orig_hash_lock_class_key);
139
Antonio Quartulli72414442012-12-25 13:14:37 +0100140 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
141 queue_delayed_work(batadv_event_workqueue,
142 &bat_priv->orig_work,
143 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
144
Sven Eckelmann5346c352012-05-05 13:27:28 +0200145 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000146
147err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200148 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000149}
150
Simon Wunderlich89652332013-11-13 19:14:46 +0100151/**
152 * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
153 * @rcu: rcu pointer of the neigh_ifinfo object
154 */
155static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
156{
157 struct batadv_neigh_ifinfo *neigh_ifinfo;
158
159 neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
160
161 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
162 batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
163
164 kfree(neigh_ifinfo);
165}
166
167/**
168 * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
169 * the neigh_ifinfo (without rcu callback)
170 * @neigh_ifinfo: the neigh_ifinfo object to release
171 */
172static void
173batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
174{
175 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
176 batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
177}
178
179/**
180 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
181 * the neigh_ifinfo
182 * @neigh_ifinfo: the neigh_ifinfo object to release
183 */
184void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
185{
186 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
187 call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
188}
189
190/**
191 * batadv_neigh_node_free_rcu - free the neigh_node
192 * @rcu: rcu pointer of the neigh_node
193 */
194static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
195{
196 struct hlist_node *node_tmp;
197 struct batadv_neigh_node *neigh_node;
198 struct batadv_neigh_ifinfo *neigh_ifinfo;
199
200 neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
201
202 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
203 &neigh_node->ifinfo_list, list) {
204 batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
205 }
206 batadv_hardif_free_ref_now(neigh_node->if_incoming);
207
208 kfree(neigh_node);
209}
210
211/**
212 * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
213 * and possibly free it (without rcu callback)
214 * @neigh_node: neigh neighbor to free
215 */
216static void
217batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
218{
219 if (atomic_dec_and_test(&neigh_node->refcount))
220 batadv_neigh_node_free_rcu(&neigh_node->rcu);
221}
222
223/**
224 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
225 * and possibly free it
226 * @neigh_node: neigh neighbor to free
227 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200228void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000229{
Marek Lindner44524fc2011-02-10 14:33:53 +0000230 if (atomic_dec_and_test(&neigh_node->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +0100231 call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000232}
233
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000234/* increases the refcounter of a found router */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200235struct batadv_neigh_node *
236batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000237{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200238 struct batadv_neigh_node *router;
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000239
240 rcu_read_lock();
241 router = rcu_dereference(orig_node->router);
242
243 if (router && !atomic_inc_not_zero(&router->refcount))
244 router = NULL;
245
246 rcu_read_unlock();
247 return router;
248}
249
Antonio Quartulli0538f752013-09-02 12:15:01 +0200250/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100251 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
252 * @neigh_node: the neigh node to be queried
253 * @if_outgoing: the interface for which the ifinfo should be acquired
254 *
255 * The object is returned with refcounter increased by 1.
256 *
257 * Returns the requested neigh_ifinfo or NULL if not found
258 */
259struct batadv_neigh_ifinfo *
260batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
261 struct batadv_hard_iface *if_outgoing)
262{
263 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
264 *tmp_neigh_ifinfo;
265
266 rcu_read_lock();
267 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
268 list) {
269 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
270 continue;
271
272 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
273 continue;
274
275 neigh_ifinfo = tmp_neigh_ifinfo;
276 break;
277 }
278 rcu_read_unlock();
279
280 return neigh_ifinfo;
281}
282
283/**
284 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
285 * @neigh_node: the neigh node to be queried
286 * @if_outgoing: the interface for which the ifinfo should be acquired
287 *
288 * Returns NULL in case of failure or the neigh_ifinfo object for the
289 * if_outgoing interface otherwise. The object is created and added to the list
290 * if it does not exist.
291 *
292 * The object is returned with refcounter increased by 1.
293 */
294struct batadv_neigh_ifinfo *
295batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
296 struct batadv_hard_iface *if_outgoing)
297{
298 struct batadv_neigh_ifinfo *neigh_ifinfo;
299
300 spin_lock_bh(&neigh->ifinfo_lock);
301
302 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
303 if (neigh_ifinfo)
304 goto out;
305
306 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
307 if (!neigh_ifinfo)
308 goto out;
309
310 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
311 kfree(neigh_ifinfo);
312 neigh_ifinfo = NULL;
313 goto out;
314 }
315
316 INIT_HLIST_NODE(&neigh_ifinfo->list);
317 atomic_set(&neigh_ifinfo->refcount, 2);
318 neigh_ifinfo->if_outgoing = if_outgoing;
319
320 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
321
322out:
323 spin_unlock_bh(&neigh->ifinfo_lock);
324
325 return neigh_ifinfo;
326}
327
328/**
Antonio Quartulli0538f752013-09-02 12:15:01 +0200329 * batadv_neigh_node_new - create and init a new neigh_node object
330 * @hard_iface: the interface where the neighbour is connected to
331 * @neigh_addr: the mac address of the neighbour interface
332 * @orig_node: originator object representing the neighbour
333 *
334 * Allocates a new neigh_node object and initialises all the generic fields.
335 * Returns the new object or NULL on failure.
336 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200337struct batadv_neigh_node *
338batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
Antonio Quartulli0538f752013-09-02 12:15:01 +0200339 const uint8_t *neigh_addr,
340 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000341{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200342 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000343
Sven Eckelmann704509b2011-05-14 23:14:54 +0200344 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000345 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800346 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000347
Marek Lindner9591a792010-12-12 21:57:11 +0000348 INIT_HLIST_NODE(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100349 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
350 spin_lock_init(&neigh_node->ifinfo_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000351
Marek Lindner7ae8b282012-03-01 15:35:21 +0800352 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
Antonio Quartulli0538f752013-09-02 12:15:01 +0200353 neigh_node->if_incoming = hard_iface;
354 neigh_node->orig_node = orig_node;
355
Marek Lindner1605d0d2011-02-18 12:28:11 +0000356 /* extra reference for return */
357 atomic_set(&neigh_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000358
Marek Lindner7ae8b282012-03-01 15:35:21 +0800359out:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000360 return neigh_node;
361}
362
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200363static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000364{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800365 struct hlist_node *node_tmp;
Simon Wunderlichf6c8b712013-11-13 19:14:45 +0100366 struct batadv_neigh_node *neigh_node;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200367 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000368
Sven Eckelmann56303d32012-06-05 22:31:31 +0200369 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000370
Marek Lindnerf987ed62010-12-12 21:57:12 +0000371 spin_lock_bh(&orig_node->neigh_list_lock);
372
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000373 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800374 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000375 &orig_node->neigh_list, list) {
Marek Lindnerf987ed62010-12-12 21:57:12 +0000376 hlist_del_rcu(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100377 batadv_neigh_node_free_ref_now(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000378 }
379
Marek Lindnerf987ed62010-12-12 21:57:12 +0000380 spin_unlock_bh(&orig_node->neigh_list_lock);
381
Martin Hundebølld56b1702013-01-25 11:12:39 +0100382 /* Free nc_nodes */
383 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
384
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200385 batadv_frag_purge_orig(orig_node, NULL);
386
Antonio Quartulli95fb1302013-08-07 18:28:55 +0200387 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200388 "originator timed out");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000389
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200390 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
391 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
392
Antonio Quartullia73105b2011-04-27 14:27:44 +0200393 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000394 kfree(orig_node);
395}
396
Linus LĂĽssing72822222013-04-15 21:43:29 +0800397/**
398 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
399 * schedule an rcu callback for freeing it
400 * @orig_node: the orig node to free
401 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200402void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000403{
404 if (atomic_dec_and_test(&orig_node->refcount))
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200405 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000406}
407
Linus LĂĽssing72822222013-04-15 21:43:29 +0800408/**
409 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
410 * possibly free it (without rcu callback)
411 * @orig_node: the orig node to free
412 */
413void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
414{
415 if (atomic_dec_and_test(&orig_node->refcount))
416 batadv_orig_node_free_rcu(&orig_node->rcu);
417}
418
Sven Eckelmann56303d32012-06-05 22:31:31 +0200419void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000420{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200421 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800422 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000423 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000424 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200425 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200426 uint32_t i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000427
428 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000429 return;
430
431 cancel_delayed_work_sync(&bat_priv->orig_work);
432
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000433 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000434
435 for (i = 0; i < hash->size; i++) {
436 head = &hash->table[i];
437 list_lock = &hash->list_locks[i];
438
439 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800440 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000441 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800442 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200443 batadv_orig_node_free_ref(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000444 }
445 spin_unlock_bh(list_lock);
446 }
447
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200448 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000449}
450
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200451/**
452 * batadv_orig_node_new - creates a new orig_node
453 * @bat_priv: the bat priv with all the soft interface information
454 * @addr: the mac address of the originator
455 *
456 * Creates a new originator object and initialise all the generic fields.
457 * The new object is not added to the originator list.
458 * Returns the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200459 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200460struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200461 const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000462{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200463 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200464 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200465 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200466 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000467
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200468 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
469 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000470
Sven Eckelmann704509b2011-05-14 23:14:54 +0200471 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000472 if (!orig_node)
473 return NULL;
474
Marek Lindner9591a792010-12-12 21:57:11 +0000475 INIT_HLIST_HEAD(&orig_node->neigh_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200476 INIT_LIST_HEAD(&orig_node->vlan_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000477 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000478 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200479 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200480 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200481 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000482
Martin Hundebølld56b1702013-01-25 11:12:39 +0100483 batadv_nc_init_orig(orig_node);
484
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000485 /* extra reference for return */
486 atomic_set(&orig_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000487
Antonio Quartulli17071572011-11-07 16:36:40 +0100488 orig_node->tt_initialised = false;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000489 orig_node->bat_priv = bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000490 memcpy(orig_node->orig, addr, ETH_ALEN);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100491 batadv_dat_init_orig_node_addr(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000492 orig_node->router = NULL;
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200493 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200494 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200495 orig_node->tt_buff_len = 0;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200496 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
497 orig_node->bcast_seqno_reset = reset_time;
498 orig_node->batman_seqno_reset = reset_time;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000499
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200500 /* create a vlan object for the "untagged" LAN */
501 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
502 if (!vlan)
503 goto free_orig_node;
504 /* batadv_orig_node_vlan_new() increases the refcounter.
505 * Immediately release vlan since it is not needed anymore in this
506 * context
507 */
508 batadv_orig_node_vlan_free_ref(vlan);
509
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200510 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
511 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
512 spin_lock_init(&orig_node->fragments[i].lock);
513 orig_node->fragments[i].size = 0;
514 }
515
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000516 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000517free_orig_node:
518 kfree(orig_node);
519 return NULL;
520}
521
Simon Wunderlich89652332013-11-13 19:14:46 +0100522/**
523 * batadv_purge_orig_neighbors - purges neighbors from originator
524 * @bat_priv: the bat priv with all the soft interface information
525 * @orig_node: orig node which is to be checked
526 *
527 * Returns true if any neighbor was purged, false otherwise
528 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200529static bool
530batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
Simon Wunderlich89652332013-11-13 19:14:46 +0100531 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000532{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800533 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200534 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000535 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +0800536 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200537 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000538
Marek Lindnerf987ed62010-12-12 21:57:12 +0000539 spin_lock_bh(&orig_node->neigh_list_lock);
540
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000541 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800542 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000543 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200544 last_seen = neigh_node->last_seen;
545 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000546
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200547 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200548 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
549 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
550 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200551 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
552 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
553 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200554 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200555 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
556 orig_node->orig, neigh_node->addr,
557 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000558 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200559 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200560 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
561 orig_node->orig, neigh_node->addr,
562 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000563
564 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +0000565
Marek Lindnerf987ed62010-12-12 21:57:12 +0000566 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200567 batadv_neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000568 }
569 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000570
571 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000572 return neigh_purged;
573}
574
Simon Wunderlich89652332013-11-13 19:14:46 +0100575/**
576 * batadv_find_best_neighbor - finds the best neighbor after purging
577 * @bat_priv: the bat priv with all the soft interface information
578 * @orig_node: orig node which is to be checked
579 * @if_outgoing: the interface for which the metric should be compared
580 *
581 * Returns the current best neighbor, with refcount increased.
582 */
583static struct batadv_neigh_node *
584batadv_find_best_neighbor(struct batadv_priv *bat_priv,
585 struct batadv_orig_node *orig_node,
586 struct batadv_hard_iface *if_outgoing)
587{
588 struct batadv_neigh_node *best = NULL, *neigh;
589 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
590
591 rcu_read_lock();
592 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
593 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
594 best, if_outgoing) <= 0))
595 continue;
596
597 if (!atomic_inc_not_zero(&neigh->refcount))
598 continue;
599
600 if (best)
601 batadv_neigh_node_free_ref(best);
602
603 best = neigh;
604 }
605 rcu_read_unlock();
606
607 return best;
608}
609
Sven Eckelmann56303d32012-06-05 22:31:31 +0200610static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
611 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000612{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200613 struct batadv_neigh_node *best_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000614
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200615 if (batadv_has_timed_out(orig_node->last_seen,
616 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200617 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200618 "Originator timeout: originator %pM, last_seen %u\n",
619 orig_node->orig,
620 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000621 return true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000622 }
Simon Wunderlich89652332013-11-13 19:14:46 +0100623 if (!batadv_purge_orig_neighbors(bat_priv, orig_node))
624 return false;
625
626 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
627 BATADV_IF_DEFAULT);
628 batadv_update_route(bat_priv, orig_node, best_neigh_node);
629 if (best_neigh_node)
630 batadv_neigh_node_free_ref(best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000631
632 return false;
633}
634
Sven Eckelmann56303d32012-06-05 22:31:31 +0200635static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000636{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200637 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800638 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000639 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000640 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200641 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200642 uint32_t i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000643
644 if (!hash)
645 return;
646
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000647 /* for all origins... */
648 for (i = 0; i < hash->size; i++) {
649 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000650 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000651
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000652 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800653 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000654 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200655 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +0800656 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800657 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200658 batadv_orig_node_free_ref(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000659 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000660 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200661
662 batadv_frag_purge_orig(orig_node,
663 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000664 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000665 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000666 }
667
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200668 batadv_gw_node_purge(bat_priv);
669 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000670}
671
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200672static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000673{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200674 struct delayed_work *delayed_work;
675 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000676
Sven Eckelmann56303d32012-06-05 22:31:31 +0200677 delayed_work = container_of(work, struct delayed_work, work);
678 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200679 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +0100680 queue_delayed_work(batadv_event_workqueue,
681 &bat_priv->orig_work,
682 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000683}
684
Sven Eckelmann56303d32012-06-05 22:31:31 +0200685void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000686{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200687 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000688}
689
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200690int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000691{
692 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200693 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200694 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000695
Marek Lindner30da63a2012-08-03 17:15:46 +0200696 primary_if = batadv_seq_print_text_primary_if_get(seq);
697 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200698 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000699
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200700 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200701 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200702 primary_if->net_dev->dev_addr, net_dev->name,
703 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000704
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200705 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000706
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200707 if (!bat_priv->bat_algo_ops->bat_orig_print) {
708 seq_puts(seq,
709 "No printing function for this routing protocol\n");
710 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000711 }
712
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200713 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000714
Marek Lindner30da63a2012-08-03 17:15:46 +0200715 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000716}
717
Sven Eckelmann56303d32012-06-05 22:31:31 +0200718int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
719 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000720{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200721 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200722 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200723 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000724 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200725 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200726 uint32_t i;
727 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000728
729 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200730 * if_num
731 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000732 for (i = 0; i < hash->size; i++) {
733 head = &hash->table[i];
734
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000735 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -0800736 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200737 ret = 0;
738 if (bao->bat_orig_add_if)
739 ret = bao->bat_orig_add_if(orig_node,
740 max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200741 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000742 goto err;
743 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000744 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000745 }
746
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000747 return 0;
748
749err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000750 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000751 return -ENOMEM;
752}
753
Sven Eckelmann56303d32012-06-05 22:31:31 +0200754int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
755 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000756{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200757 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200758 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000759 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200760 struct batadv_hard_iface *hard_iface_tmp;
761 struct batadv_orig_node *orig_node;
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200762 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200763 uint32_t i;
764 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000765
766 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200767 * if_num
768 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000769 for (i = 0; i < hash->size; i++) {
770 head = &hash->table[i];
771
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000772 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -0800773 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200774 ret = 0;
775 if (bao->bat_orig_del_if)
776 ret = bao->bat_orig_del_if(orig_node,
777 max_if_num,
778 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200779 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000780 goto err;
781 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000782 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000783 }
784
785 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
786 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200787 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200788 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000789 continue;
790
Marek Lindnere6c10f42011-02-18 12:33:20 +0000791 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000792 continue;
793
Marek Lindnere6c10f42011-02-18 12:33:20 +0000794 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000795 continue;
796
Marek Lindnere6c10f42011-02-18 12:33:20 +0000797 if (hard_iface_tmp->if_num > hard_iface->if_num)
798 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000799 }
800 rcu_read_unlock();
801
Marek Lindnere6c10f42011-02-18 12:33:20 +0000802 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000803 return 0;
804
805err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000806 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000807 return -ENOMEM;
808}