blob: 6a484514cd3e98b9e0b27a924b4dcb92f2682055 [file] [log] [blame]
Simon Wunderliche19f9752014-01-04 18:04:25 +01001/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "main.h"
Antonio Quartulli785ea112011-11-23 11:35:44 +010019#include "distributed-arp-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000020#include "originator.h"
21#include "hash.h"
22#include "translation-table.h"
23#include "routing.h"
24#include "gateway_client.h"
25#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000026#include "soft-interface.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010027#include "bridge_loop_avoidance.h"
Martin Hundebølld56b1702013-01-25 11:12:39 +010028#include "network-coding.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020029#include "fragmentation.h"
Linus Lüssing60432d72014-02-15 17:47:51 +010030#include "multicast.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000031
Antonio Quartullidec05072012-11-10 11:00:32 +010032/* hash class keys */
33static struct lock_class_key batadv_orig_hash_lock_class_key;
34
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020035static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000036
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020037/* returns 1 if they are the same originator */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020038int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020039{
Sven Eckelmann56303d32012-06-05 22:31:31 +020040 const void *data1 = container_of(node, struct batadv_orig_node,
41 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020042
dingtianhong323813e2013-12-26 19:40:39 +080043 return batadv_compare_eth(data1, data2);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020044}
45
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020046/**
47 * batadv_orig_node_vlan_get - get an orig_node_vlan object
48 * @orig_node: the originator serving the VLAN
49 * @vid: the VLAN identifier
50 *
51 * Returns the vlan object identified by vid and belonging to orig_node or NULL
52 * if it does not exist.
53 */
54struct batadv_orig_node_vlan *
55batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
56 unsigned short vid)
57{
58 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
59
60 rcu_read_lock();
61 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
62 if (tmp->vid != vid)
63 continue;
64
65 if (!atomic_inc_not_zero(&tmp->refcount))
66 continue;
67
68 vlan = tmp;
69
70 break;
71 }
72 rcu_read_unlock();
73
74 return vlan;
75}
76
77/**
78 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
79 * object
80 * @orig_node: the originator serving the VLAN
81 * @vid: the VLAN identifier
82 *
83 * Returns NULL in case of failure or the vlan object identified by vid and
84 * belonging to orig_node otherwise. The object is created and added to the list
85 * if it does not exist.
86 *
87 * The object is returned with refcounter increased by 1.
88 */
89struct batadv_orig_node_vlan *
90batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
91 unsigned short vid)
92{
93 struct batadv_orig_node_vlan *vlan;
94
95 spin_lock_bh(&orig_node->vlan_list_lock);
96
97 /* first look if an object for this vid already exists */
98 vlan = batadv_orig_node_vlan_get(orig_node, vid);
99 if (vlan)
100 goto out;
101
102 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
103 if (!vlan)
104 goto out;
105
106 atomic_set(&vlan->refcount, 2);
107 vlan->vid = vid;
108
109 list_add_rcu(&vlan->list, &orig_node->vlan_list);
110
111out:
112 spin_unlock_bh(&orig_node->vlan_list_lock);
113
114 return vlan;
115}
116
117/**
118 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
119 * the originator-vlan object
120 * @orig_vlan: the originator-vlan object to release
121 */
122void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
123{
124 if (atomic_dec_and_test(&orig_vlan->refcount))
125 kfree_rcu(orig_vlan, rcu);
126}
127
Sven Eckelmann56303d32012-06-05 22:31:31 +0200128int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000129{
130 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200131 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000132
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200133 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000134
135 if (!bat_priv->orig_hash)
136 goto err;
137
Antonio Quartullidec05072012-11-10 11:00:32 +0100138 batadv_hash_set_lock_class(bat_priv->orig_hash,
139 &batadv_orig_hash_lock_class_key);
140
Antonio Quartulli72414442012-12-25 13:14:37 +0100141 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
142 queue_delayed_work(batadv_event_workqueue,
143 &bat_priv->orig_work,
144 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
145
Sven Eckelmann5346c352012-05-05 13:27:28 +0200146 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000147
148err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200149 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000150}
151
Simon Wunderlich89652332013-11-13 19:14:46 +0100152/**
153 * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
154 * @rcu: rcu pointer of the neigh_ifinfo object
155 */
156static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
157{
158 struct batadv_neigh_ifinfo *neigh_ifinfo;
159
160 neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
161
162 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
163 batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
164
165 kfree(neigh_ifinfo);
166}
167
168/**
169 * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
170 * the neigh_ifinfo (without rcu callback)
171 * @neigh_ifinfo: the neigh_ifinfo object to release
172 */
173static void
174batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
175{
176 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
177 batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
178}
179
180/**
181 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
182 * the neigh_ifinfo
183 * @neigh_ifinfo: the neigh_ifinfo object to release
184 */
185void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
186{
187 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
188 call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
189}
190
191/**
192 * batadv_neigh_node_free_rcu - free the neigh_node
193 * @rcu: rcu pointer of the neigh_node
194 */
195static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
196{
197 struct hlist_node *node_tmp;
198 struct batadv_neigh_node *neigh_node;
199 struct batadv_neigh_ifinfo *neigh_ifinfo;
200
201 neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
202
203 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
204 &neigh_node->ifinfo_list, list) {
205 batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
206 }
207 batadv_hardif_free_ref_now(neigh_node->if_incoming);
208
209 kfree(neigh_node);
210}
211
212/**
213 * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
214 * and possibly free it (without rcu callback)
215 * @neigh_node: neigh neighbor to free
216 */
217static void
218batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
219{
220 if (atomic_dec_and_test(&neigh_node->refcount))
221 batadv_neigh_node_free_rcu(&neigh_node->rcu);
222}
223
224/**
225 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
226 * and possibly free it
227 * @neigh_node: neigh neighbor to free
228 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200229void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000230{
Marek Lindner44524fc2011-02-10 14:33:53 +0000231 if (atomic_dec_and_test(&neigh_node->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +0100232 call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000233}
234
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100235/**
236 * batadv_orig_node_get_router - router to the originator depending on iface
237 * @orig_node: the orig node for the router
238 * @if_outgoing: the interface where the payload packet has been received or
239 * the OGM should be sent to
240 *
241 * Returns the neighbor which should be router for this orig_node/iface.
242 *
243 * The object is returned with refcounter increased by 1.
244 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200245struct batadv_neigh_node *
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100246batadv_orig_router_get(struct batadv_orig_node *orig_node,
247 const struct batadv_hard_iface *if_outgoing)
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000248{
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100249 struct batadv_orig_ifinfo *orig_ifinfo;
250 struct batadv_neigh_node *router = NULL;
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000251
252 rcu_read_lock();
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100253 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
254 if (orig_ifinfo->if_outgoing != if_outgoing)
255 continue;
256
257 router = rcu_dereference(orig_ifinfo->router);
258 break;
259 }
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000260
261 if (router && !atomic_inc_not_zero(&router->refcount))
262 router = NULL;
263
264 rcu_read_unlock();
265 return router;
266}
267
Antonio Quartulli0538f752013-09-02 12:15:01 +0200268/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100269 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
270 * @orig_node: the orig node to be queried
271 * @if_outgoing: the interface for which the ifinfo should be acquired
272 *
273 * Returns the requested orig_ifinfo or NULL if not found.
274 *
275 * The object is returned with refcounter increased by 1.
276 */
277struct batadv_orig_ifinfo *
278batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
279 struct batadv_hard_iface *if_outgoing)
280{
281 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
282
283 rcu_read_lock();
284 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
285 list) {
286 if (tmp->if_outgoing != if_outgoing)
287 continue;
288
289 if (!atomic_inc_not_zero(&tmp->refcount))
290 continue;
291
292 orig_ifinfo = tmp;
293 break;
294 }
295 rcu_read_unlock();
296
297 return orig_ifinfo;
298}
299
300/**
301 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
302 * @orig_node: the orig node to be queried
303 * @if_outgoing: the interface for which the ifinfo should be acquired
304 *
305 * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
306 * interface otherwise. The object is created and added to the list
307 * if it does not exist.
308 *
309 * The object is returned with refcounter increased by 1.
310 */
311struct batadv_orig_ifinfo *
312batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
313 struct batadv_hard_iface *if_outgoing)
314{
315 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
316 unsigned long reset_time;
317
318 spin_lock_bh(&orig_node->neigh_list_lock);
319
320 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
321 if (orig_ifinfo)
322 goto out;
323
324 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
325 if (!orig_ifinfo)
326 goto out;
327
328 if (if_outgoing != BATADV_IF_DEFAULT &&
329 !atomic_inc_not_zero(&if_outgoing->refcount)) {
330 kfree(orig_ifinfo);
331 orig_ifinfo = NULL;
332 goto out;
333 }
334
335 reset_time = jiffies - 1;
336 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
337 orig_ifinfo->batman_seqno_reset = reset_time;
338 orig_ifinfo->if_outgoing = if_outgoing;
339 INIT_HLIST_NODE(&orig_ifinfo->list);
340 atomic_set(&orig_ifinfo->refcount, 2);
341 hlist_add_head_rcu(&orig_ifinfo->list,
342 &orig_node->ifinfo_list);
343out:
344 spin_unlock_bh(&orig_node->neigh_list_lock);
345 return orig_ifinfo;
346}
347
348/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100349 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
350 * @neigh_node: the neigh node to be queried
351 * @if_outgoing: the interface for which the ifinfo should be acquired
352 *
353 * The object is returned with refcounter increased by 1.
354 *
355 * Returns the requested neigh_ifinfo or NULL if not found
356 */
357struct batadv_neigh_ifinfo *
358batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
359 struct batadv_hard_iface *if_outgoing)
360{
361 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
362 *tmp_neigh_ifinfo;
363
364 rcu_read_lock();
365 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
366 list) {
367 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
368 continue;
369
370 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
371 continue;
372
373 neigh_ifinfo = tmp_neigh_ifinfo;
374 break;
375 }
376 rcu_read_unlock();
377
378 return neigh_ifinfo;
379}
380
381/**
382 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
383 * @neigh_node: the neigh node to be queried
384 * @if_outgoing: the interface for which the ifinfo should be acquired
385 *
386 * Returns NULL in case of failure or the neigh_ifinfo object for the
387 * if_outgoing interface otherwise. The object is created and added to the list
388 * if it does not exist.
389 *
390 * The object is returned with refcounter increased by 1.
391 */
392struct batadv_neigh_ifinfo *
393batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
394 struct batadv_hard_iface *if_outgoing)
395{
396 struct batadv_neigh_ifinfo *neigh_ifinfo;
397
398 spin_lock_bh(&neigh->ifinfo_lock);
399
400 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
401 if (neigh_ifinfo)
402 goto out;
403
404 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
405 if (!neigh_ifinfo)
406 goto out;
407
408 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
409 kfree(neigh_ifinfo);
410 neigh_ifinfo = NULL;
411 goto out;
412 }
413
414 INIT_HLIST_NODE(&neigh_ifinfo->list);
415 atomic_set(&neigh_ifinfo->refcount, 2);
416 neigh_ifinfo->if_outgoing = if_outgoing;
417
418 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
419
420out:
421 spin_unlock_bh(&neigh->ifinfo_lock);
422
423 return neigh_ifinfo;
424}
425
426/**
Antonio Quartulli0538f752013-09-02 12:15:01 +0200427 * batadv_neigh_node_new - create and init a new neigh_node object
428 * @hard_iface: the interface where the neighbour is connected to
429 * @neigh_addr: the mac address of the neighbour interface
430 * @orig_node: originator object representing the neighbour
431 *
432 * Allocates a new neigh_node object and initialises all the generic fields.
433 * Returns the new object or NULL on failure.
434 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200435struct batadv_neigh_node *
436batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
Antonio Quartulli0538f752013-09-02 12:15:01 +0200437 const uint8_t *neigh_addr,
438 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000439{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200440 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000441
Sven Eckelmann704509b2011-05-14 23:14:54 +0200442 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000443 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800444 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000445
Marek Lindner9591a792010-12-12 21:57:11 +0000446 INIT_HLIST_NODE(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100447 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
448 spin_lock_init(&neigh_node->ifinfo_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000449
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100450 ether_addr_copy(neigh_node->addr, neigh_addr);
Antonio Quartulli0538f752013-09-02 12:15:01 +0200451 neigh_node->if_incoming = hard_iface;
452 neigh_node->orig_node = orig_node;
453
Marek Lindner1605d0d2011-02-18 12:28:11 +0000454 /* extra reference for return */
455 atomic_set(&neigh_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000456
Marek Lindner7ae8b282012-03-01 15:35:21 +0800457out:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000458 return neigh_node;
459}
460
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100461/**
Antonio Quartulli08bf0ed2014-01-29 11:25:12 +0100462 * batadv_neigh_node_get - retrieve a neighbour from the list
463 * @orig_node: originator which the neighbour belongs to
464 * @hard_iface: the interface where this neighbour is connected to
465 * @addr: the address of the neighbour
466 *
467 * Looks for and possibly returns a neighbour belonging to this originator list
468 * which is connected through the provided hard interface.
469 * Returns NULL if the neighbour is not found.
470 */
471struct batadv_neigh_node *
472batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
473 const struct batadv_hard_iface *hard_iface,
474 const uint8_t *addr)
475{
476 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
477
478 rcu_read_lock();
479 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
480 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
481 continue;
482
483 if (tmp_neigh_node->if_incoming != hard_iface)
484 continue;
485
486 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
487 continue;
488
489 res = tmp_neigh_node;
490 break;
491 }
492 rcu_read_unlock();
493
494 return res;
495}
496
497/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100498 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
499 * @rcu: rcu pointer of the orig_ifinfo object
500 */
501static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
502{
503 struct batadv_orig_ifinfo *orig_ifinfo;
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100504 struct batadv_neigh_node *router;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100505
506 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
507
508 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
509 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
510
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100511 /* this is the last reference to this object */
512 router = rcu_dereference_protected(orig_ifinfo->router, true);
513 if (router)
514 batadv_neigh_node_free_ref_now(router);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100515 kfree(orig_ifinfo);
516}
517
518/**
519 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
520 * the orig_ifinfo (without rcu callback)
521 * @orig_ifinfo: the orig_ifinfo object to release
522 */
523static void
524batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
525{
526 if (atomic_dec_and_test(&orig_ifinfo->refcount))
527 batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
528}
529
530/**
531 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
532 * the orig_ifinfo
533 * @orig_ifinfo: the orig_ifinfo object to release
534 */
535void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
536{
537 if (atomic_dec_and_test(&orig_ifinfo->refcount))
538 call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
539}
540
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200541static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000542{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800543 struct hlist_node *node_tmp;
Simon Wunderlichf6c8b712013-11-13 19:14:45 +0100544 struct batadv_neigh_node *neigh_node;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200545 struct batadv_orig_node *orig_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100546 struct batadv_orig_ifinfo *orig_ifinfo;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000547
Sven Eckelmann56303d32012-06-05 22:31:31 +0200548 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000549
Marek Lindnerf987ed62010-12-12 21:57:12 +0000550 spin_lock_bh(&orig_node->neigh_list_lock);
551
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000552 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800553 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000554 &orig_node->neigh_list, list) {
Marek Lindnerf987ed62010-12-12 21:57:12 +0000555 hlist_del_rcu(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100556 batadv_neigh_node_free_ref_now(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000557 }
558
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100559 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
560 &orig_node->ifinfo_list, list) {
561 hlist_del_rcu(&orig_ifinfo->list);
562 batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
563 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000564 spin_unlock_bh(&orig_node->neigh_list_lock);
565
Linus Lüssing60432d72014-02-15 17:47:51 +0100566 batadv_mcast_purge_orig(orig_node);
567
Martin Hundebølld56b1702013-01-25 11:12:39 +0100568 /* Free nc_nodes */
569 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
570
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200571 batadv_frag_purge_orig(orig_node, NULL);
572
Antonio Quartulli95fb1302013-08-07 18:28:55 +0200573 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200574 "originator timed out");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000575
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200576 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
577 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
578
Antonio Quartullia73105b2011-04-27 14:27:44 +0200579 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000580 kfree(orig_node);
581}
582
Linus Lüssing72822222013-04-15 21:43:29 +0800583/**
584 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
585 * schedule an rcu callback for freeing it
586 * @orig_node: the orig node to free
587 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200588void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000589{
590 if (atomic_dec_and_test(&orig_node->refcount))
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200591 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000592}
593
Linus Lüssing72822222013-04-15 21:43:29 +0800594/**
595 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
596 * possibly free it (without rcu callback)
597 * @orig_node: the orig node to free
598 */
599void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
600{
601 if (atomic_dec_and_test(&orig_node->refcount))
602 batadv_orig_node_free_rcu(&orig_node->rcu);
603}
604
Sven Eckelmann56303d32012-06-05 22:31:31 +0200605void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000606{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200607 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800608 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000609 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000610 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200611 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200612 uint32_t i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000613
614 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000615 return;
616
617 cancel_delayed_work_sync(&bat_priv->orig_work);
618
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000619 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000620
621 for (i = 0; i < hash->size; i++) {
622 head = &hash->table[i];
623 list_lock = &hash->list_locks[i];
624
625 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800626 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000627 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800628 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200629 batadv_orig_node_free_ref(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000630 }
631 spin_unlock_bh(list_lock);
632 }
633
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200634 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000635}
636
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200637/**
638 * batadv_orig_node_new - creates a new orig_node
639 * @bat_priv: the bat priv with all the soft interface information
640 * @addr: the mac address of the originator
641 *
642 * Creates a new originator object and initialise all the generic fields.
643 * The new object is not added to the originator list.
644 * Returns the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200645 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200646struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200647 const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000648{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200649 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200650 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200651 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200652 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000653
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200654 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
655 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000656
Sven Eckelmann704509b2011-05-14 23:14:54 +0200657 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000658 if (!orig_node)
659 return NULL;
660
Marek Lindner9591a792010-12-12 21:57:11 +0000661 INIT_HLIST_HEAD(&orig_node->neigh_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200662 INIT_LIST_HEAD(&orig_node->vlan_list);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100663 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000664 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000665 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200666 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200667 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200668 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000669
Martin Hundebølld56b1702013-01-25 11:12:39 +0100670 batadv_nc_init_orig(orig_node);
671
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000672 /* extra reference for return */
673 atomic_set(&orig_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000674
Marek Lindner16b1aba2011-01-19 20:01:42 +0000675 orig_node->bat_priv = bat_priv;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100676 ether_addr_copy(orig_node->orig, addr);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100677 batadv_dat_init_orig_node_addr(orig_node);
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200678 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200679 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200680 orig_node->tt_buff_len = 0;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200681 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
682 orig_node->bcast_seqno_reset = reset_time;
Linus Lüssing60432d72014-02-15 17:47:51 +0100683#ifdef CONFIG_BATMAN_ADV_MCAST
684 orig_node->mcast_flags = BATADV_NO_FLAGS;
685#endif
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000686
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200687 /* create a vlan object for the "untagged" LAN */
688 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
689 if (!vlan)
690 goto free_orig_node;
691 /* batadv_orig_node_vlan_new() increases the refcounter.
692 * Immediately release vlan since it is not needed anymore in this
693 * context
694 */
695 batadv_orig_node_vlan_free_ref(vlan);
696
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200697 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
698 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
699 spin_lock_init(&orig_node->fragments[i].lock);
700 orig_node->fragments[i].size = 0;
701 }
702
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000703 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000704free_orig_node:
705 kfree(orig_node);
706 return NULL;
707}
708
Simon Wunderlich89652332013-11-13 19:14:46 +0100709/**
Simon Wunderlich709de132014-03-26 15:46:24 +0100710 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
711 * @bat_priv: the bat priv with all the soft interface information
712 * @neigh: orig node which is to be checked
713 */
714static void
715batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
716 struct batadv_neigh_node *neigh)
717{
718 struct batadv_neigh_ifinfo *neigh_ifinfo;
719 struct batadv_hard_iface *if_outgoing;
720 struct hlist_node *node_tmp;
721
722 spin_lock_bh(&neigh->ifinfo_lock);
723
724 /* for all ifinfo objects for this neighinator */
725 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
726 &neigh->ifinfo_list, list) {
727 if_outgoing = neigh_ifinfo->if_outgoing;
728
729 /* always keep the default interface */
730 if (if_outgoing == BATADV_IF_DEFAULT)
731 continue;
732
733 /* don't purge if the interface is not (going) down */
734 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
735 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
736 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
737 continue;
738
739 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
740 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
741 neigh->addr, if_outgoing->net_dev->name);
742
743 hlist_del_rcu(&neigh_ifinfo->list);
744 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
745 }
746
747 spin_unlock_bh(&neigh->ifinfo_lock);
748}
749
750/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100751 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
752 * @bat_priv: the bat priv with all the soft interface information
753 * @orig_node: orig node which is to be checked
754 *
755 * Returns true if any ifinfo entry was purged, false otherwise.
756 */
757static bool
758batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
759 struct batadv_orig_node *orig_node)
760{
761 struct batadv_orig_ifinfo *orig_ifinfo;
762 struct batadv_hard_iface *if_outgoing;
763 struct hlist_node *node_tmp;
764 bool ifinfo_purged = false;
765
766 spin_lock_bh(&orig_node->neigh_list_lock);
767
768 /* for all ifinfo objects for this originator */
769 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
770 &orig_node->ifinfo_list, list) {
771 if_outgoing = orig_ifinfo->if_outgoing;
772
773 /* always keep the default interface */
774 if (if_outgoing == BATADV_IF_DEFAULT)
775 continue;
776
777 /* don't purge if the interface is not (going) down */
778 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
779 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
780 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
781 continue;
782
783 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
784 "router/ifinfo purge: originator %pM, iface: %s\n",
785 orig_node->orig, if_outgoing->net_dev->name);
786
787 ifinfo_purged = true;
788
789 hlist_del_rcu(&orig_ifinfo->list);
790 batadv_orig_ifinfo_free_ref(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +0100791 if (orig_node->last_bonding_candidate == orig_ifinfo) {
792 orig_node->last_bonding_candidate = NULL;
793 batadv_orig_ifinfo_free_ref(orig_ifinfo);
794 }
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100795 }
796
797 spin_unlock_bh(&orig_node->neigh_list_lock);
798
799 return ifinfo_purged;
800}
801
802
803/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100804 * batadv_purge_orig_neighbors - purges neighbors from originator
805 * @bat_priv: the bat priv with all the soft interface information
806 * @orig_node: orig node which is to be checked
807 *
808 * Returns true if any neighbor was purged, false otherwise
809 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200810static bool
811batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
Simon Wunderlich89652332013-11-13 19:14:46 +0100812 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000813{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800814 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200815 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000816 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +0800817 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200818 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000819
Marek Lindnerf987ed62010-12-12 21:57:12 +0000820 spin_lock_bh(&orig_node->neigh_list_lock);
821
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000822 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800823 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000824 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200825 last_seen = neigh_node->last_seen;
826 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000827
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200828 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200829 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
830 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
831 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200832 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
833 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
834 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200835 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200836 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
837 orig_node->orig, neigh_node->addr,
838 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000839 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200840 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200841 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
842 orig_node->orig, neigh_node->addr,
843 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000844
845 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +0000846
Marek Lindnerf987ed62010-12-12 21:57:12 +0000847 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200848 batadv_neigh_node_free_ref(neigh_node);
Simon Wunderlich709de132014-03-26 15:46:24 +0100849 } else {
850 /* only necessary if not the whole neighbor is to be
851 * deleted, but some interface has been removed.
852 */
853 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000854 }
855 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000856
857 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000858 return neigh_purged;
859}
860
Simon Wunderlich89652332013-11-13 19:14:46 +0100861/**
862 * batadv_find_best_neighbor - finds the best neighbor after purging
863 * @bat_priv: the bat priv with all the soft interface information
864 * @orig_node: orig node which is to be checked
865 * @if_outgoing: the interface for which the metric should be compared
866 *
867 * Returns the current best neighbor, with refcount increased.
868 */
869static struct batadv_neigh_node *
870batadv_find_best_neighbor(struct batadv_priv *bat_priv,
871 struct batadv_orig_node *orig_node,
872 struct batadv_hard_iface *if_outgoing)
873{
874 struct batadv_neigh_node *best = NULL, *neigh;
875 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
876
877 rcu_read_lock();
878 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
879 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
880 best, if_outgoing) <= 0))
881 continue;
882
883 if (!atomic_inc_not_zero(&neigh->refcount))
884 continue;
885
886 if (best)
887 batadv_neigh_node_free_ref(best);
888
889 best = neigh;
890 }
891 rcu_read_unlock();
892
893 return best;
894}
895
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100896/**
897 * batadv_purge_orig_node - purges obsolete information from an orig_node
898 * @bat_priv: the bat priv with all the soft interface information
899 * @orig_node: orig node which is to be checked
900 *
901 * This function checks if the orig_node or substructures of it have become
902 * obsolete, and purges this information if that's the case.
903 *
904 * Returns true if the orig_node is to be removed, false otherwise.
905 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200906static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
907 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000908{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200909 struct batadv_neigh_node *best_neigh_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100910 struct batadv_hard_iface *hard_iface;
Simon Wunderlich7b955a92014-03-26 15:46:23 +0100911 bool changed_ifinfo, changed_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000912
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200913 if (batadv_has_timed_out(orig_node->last_seen,
914 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200915 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200916 "Originator timeout: originator %pM, last_seen %u\n",
917 orig_node->orig,
918 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000919 return true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000920 }
Simon Wunderlich7b955a92014-03-26 15:46:23 +0100921 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
922 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100923
Simon Wunderlich7b955a92014-03-26 15:46:23 +0100924 if (!changed_ifinfo && !changed_neigh)
Simon Wunderlich89652332013-11-13 19:14:46 +0100925 return false;
926
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100927 /* first for NULL ... */
Simon Wunderlich89652332013-11-13 19:14:46 +0100928 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
929 BATADV_IF_DEFAULT);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100930 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
931 best_neigh_node);
Simon Wunderlich89652332013-11-13 19:14:46 +0100932 if (best_neigh_node)
933 batadv_neigh_node_free_ref(best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000934
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100935 /* ... then for all other interfaces. */
936 rcu_read_lock();
937 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
938 if (hard_iface->if_status != BATADV_IF_ACTIVE)
939 continue;
940
941 if (hard_iface->soft_iface != bat_priv->soft_iface)
942 continue;
943
944 best_neigh_node = batadv_find_best_neighbor(bat_priv,
945 orig_node,
946 hard_iface);
947 batadv_update_route(bat_priv, orig_node, hard_iface,
948 best_neigh_node);
949 if (best_neigh_node)
950 batadv_neigh_node_free_ref(best_neigh_node);
951 }
952 rcu_read_unlock();
953
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000954 return false;
955}
956
Sven Eckelmann56303d32012-06-05 22:31:31 +0200957static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000958{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200959 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800960 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000961 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000962 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200963 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200964 uint32_t i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000965
966 if (!hash)
967 return;
968
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000969 /* for all origins... */
970 for (i = 0; i < hash->size; i++) {
971 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000972 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000973
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000974 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800975 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000976 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200977 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +0800978 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800979 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200980 batadv_orig_node_free_ref(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000981 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000982 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200983
984 batadv_frag_purge_orig(orig_node,
985 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000986 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000987 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000988 }
989
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200990 batadv_gw_node_purge(bat_priv);
991 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000992}
993
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200994static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000995{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200996 struct delayed_work *delayed_work;
997 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000998
Sven Eckelmann56303d32012-06-05 22:31:31 +0200999 delayed_work = container_of(work, struct delayed_work, work);
1000 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001001 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +01001002 queue_delayed_work(batadv_event_workqueue,
1003 &bat_priv->orig_work,
1004 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001005}
1006
Sven Eckelmann56303d32012-06-05 22:31:31 +02001007void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001008{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001009 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001010}
1011
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001012int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001013{
1014 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001015 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +02001016 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001017
Marek Lindner30da63a2012-08-03 17:15:46 +02001018 primary_if = batadv_seq_print_text_primary_if_get(seq);
1019 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001020 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001021
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001022 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001023 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001024 primary_if->net_dev->dev_addr, net_dev->name,
1025 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001026
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001027 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001028
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001029 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1030 seq_puts(seq,
1031 "No printing function for this routing protocol\n");
1032 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001033 }
1034
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001035 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1036 BATADV_IF_DEFAULT);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001037
Marek Lindner30da63a2012-08-03 17:15:46 +02001038 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001039}
1040
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001041/**
1042 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1043 * outgoing interface
1044 * @seq: debugfs table seq_file struct
1045 * @offset: not used
1046 *
1047 * Returns 0
1048 */
1049int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1050{
1051 struct net_device *net_dev = (struct net_device *)seq->private;
1052 struct batadv_hard_iface *hard_iface;
1053 struct batadv_priv *bat_priv;
1054
1055 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1056
1057 if (!hard_iface || !hard_iface->soft_iface) {
1058 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1059 goto out;
1060 }
1061
1062 bat_priv = netdev_priv(hard_iface->soft_iface);
1063 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1064 seq_puts(seq,
1065 "No printing function for this routing protocol\n");
1066 goto out;
1067 }
1068
1069 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1070 seq_puts(seq, "Interface not active\n");
1071 goto out;
1072 }
1073
1074 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1075 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1076 hard_iface->net_dev->dev_addr,
1077 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1078
1079 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1080
1081out:
Marek Lindner16a41422014-04-24 03:44:25 +08001082 if (hard_iface)
1083 batadv_hardif_free_ref(hard_iface);
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001084 return 0;
1085}
1086
Sven Eckelmann56303d32012-06-05 22:31:31 +02001087int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1088 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001089{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001090 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001091 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001092 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001093 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001094 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001095 uint32_t i;
1096 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001097
1098 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001099 * if_num
1100 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001101 for (i = 0; i < hash->size; i++) {
1102 head = &hash->table[i];
1103
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001104 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001105 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001106 ret = 0;
1107 if (bao->bat_orig_add_if)
1108 ret = bao->bat_orig_add_if(orig_node,
1109 max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001110 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001111 goto err;
1112 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001113 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001114 }
1115
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001116 return 0;
1117
1118err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001119 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001120 return -ENOMEM;
1121}
1122
Sven Eckelmann56303d32012-06-05 22:31:31 +02001123int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1124 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001125{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001126 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001127 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001128 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001129 struct batadv_hard_iface *hard_iface_tmp;
1130 struct batadv_orig_node *orig_node;
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001131 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001132 uint32_t i;
1133 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001134
1135 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001136 * if_num
1137 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001138 for (i = 0; i < hash->size; i++) {
1139 head = &hash->table[i];
1140
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001141 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001142 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001143 ret = 0;
1144 if (bao->bat_orig_del_if)
1145 ret = bao->bat_orig_del_if(orig_node,
1146 max_if_num,
1147 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001148 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001149 goto err;
1150 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001151 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001152 }
1153
1154 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1155 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001156 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001157 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001158 continue;
1159
Marek Lindnere6c10f42011-02-18 12:33:20 +00001160 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001161 continue;
1162
Marek Lindnere6c10f42011-02-18 12:33:20 +00001163 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001164 continue;
1165
Marek Lindnere6c10f42011-02-18 12:33:20 +00001166 if (hard_iface_tmp->if_num > hard_iface->if_num)
1167 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001168 }
1169 rcu_read_unlock();
1170
Marek Lindnere6c10f42011-02-18 12:33:20 +00001171 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001172 return 0;
1173
1174err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001175 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001176 return -ENOMEM;
1177}