blob: f8317c1db427412db45f9c737391d2f21892b7cf [file] [log] [blame]
Sven Eckelmann9f6446c2015-04-23 13:16:35 +02001/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "originator.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/errno.h>
22#include <linux/etherdevice.h>
23#include <linux/fs.h>
24#include <linux/jiffies.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/lockdep.h>
28#include <linux/netdevice.h>
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080029#include <linux/rculist.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020030#include <linux/seq_file.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <linux/workqueue.h>
34
35#include "distributed-arp-table.h"
36#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000037#include "gateway_client.h"
38#include "hard-interface.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020039#include "hash.h"
Linus Lüssing60432d72014-02-15 17:47:51 +010040#include "multicast.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020041#include "network-coding.h"
42#include "routing.h"
43#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044
Antonio Quartullidec05072012-11-10 11:00:32 +010045/* hash class keys */
46static struct lock_class_key batadv_orig_hash_lock_class_key;
47
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020048static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000049
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020050/* returns 1 if they are the same originator */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020051int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020052{
Sven Eckelmann56303d32012-06-05 22:31:31 +020053 const void *data1 = container_of(node, struct batadv_orig_node,
54 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020055
dingtianhong323813e2013-12-26 19:40:39 +080056 return batadv_compare_eth(data1, data2);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020057}
58
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020059/**
60 * batadv_orig_node_vlan_get - get an orig_node_vlan object
61 * @orig_node: the originator serving the VLAN
62 * @vid: the VLAN identifier
63 *
64 * Returns the vlan object identified by vid and belonging to orig_node or NULL
65 * if it does not exist.
66 */
67struct batadv_orig_node_vlan *
68batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
69 unsigned short vid)
70{
71 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
72
73 rcu_read_lock();
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080074 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020075 if (tmp->vid != vid)
76 continue;
77
78 if (!atomic_inc_not_zero(&tmp->refcount))
79 continue;
80
81 vlan = tmp;
82
83 break;
84 }
85 rcu_read_unlock();
86
87 return vlan;
88}
89
90/**
91 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
92 * object
93 * @orig_node: the originator serving the VLAN
94 * @vid: the VLAN identifier
95 *
96 * Returns NULL in case of failure or the vlan object identified by vid and
97 * belonging to orig_node otherwise. The object is created and added to the list
98 * if it does not exist.
99 *
100 * The object is returned with refcounter increased by 1.
101 */
102struct batadv_orig_node_vlan *
103batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
104 unsigned short vid)
105{
106 struct batadv_orig_node_vlan *vlan;
107
108 spin_lock_bh(&orig_node->vlan_list_lock);
109
110 /* first look if an object for this vid already exists */
111 vlan = batadv_orig_node_vlan_get(orig_node, vid);
112 if (vlan)
113 goto out;
114
115 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
116 if (!vlan)
117 goto out;
118
119 atomic_set(&vlan->refcount, 2);
120 vlan->vid = vid;
121
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800122 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200123
124out:
125 spin_unlock_bh(&orig_node->vlan_list_lock);
126
127 return vlan;
128}
129
130/**
131 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
132 * the originator-vlan object
133 * @orig_vlan: the originator-vlan object to release
134 */
135void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
136{
137 if (atomic_dec_and_test(&orig_vlan->refcount))
138 kfree_rcu(orig_vlan, rcu);
139}
140
Sven Eckelmann56303d32012-06-05 22:31:31 +0200141int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000142{
143 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200144 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000145
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200146 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000147
148 if (!bat_priv->orig_hash)
149 goto err;
150
Antonio Quartullidec05072012-11-10 11:00:32 +0100151 batadv_hash_set_lock_class(bat_priv->orig_hash,
152 &batadv_orig_hash_lock_class_key);
153
Antonio Quartulli72414442012-12-25 13:14:37 +0100154 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
155 queue_delayed_work(batadv_event_workqueue,
156 &bat_priv->orig_work,
157 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
158
Sven Eckelmann5346c352012-05-05 13:27:28 +0200159 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000160
161err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200162 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000163}
164
Simon Wunderlich89652332013-11-13 19:14:46 +0100165/**
166 * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
167 * @rcu: rcu pointer of the neigh_ifinfo object
168 */
169static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
170{
171 struct batadv_neigh_ifinfo *neigh_ifinfo;
172
173 neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
174
175 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
176 batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
177
178 kfree(neigh_ifinfo);
179}
180
181/**
182 * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
183 * the neigh_ifinfo (without rcu callback)
184 * @neigh_ifinfo: the neigh_ifinfo object to release
185 */
186static void
187batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
188{
189 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
190 batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
191}
192
193/**
194 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
195 * the neigh_ifinfo
196 * @neigh_ifinfo: the neigh_ifinfo object to release
197 */
198void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
199{
200 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
201 call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
202}
203
204/**
205 * batadv_neigh_node_free_rcu - free the neigh_node
206 * @rcu: rcu pointer of the neigh_node
207 */
208static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
209{
210 struct hlist_node *node_tmp;
211 struct batadv_neigh_node *neigh_node;
212 struct batadv_neigh_ifinfo *neigh_ifinfo;
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800213 struct batadv_algo_ops *bao;
Simon Wunderlich89652332013-11-13 19:14:46 +0100214
215 neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800216 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
Simon Wunderlich89652332013-11-13 19:14:46 +0100217
218 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
219 &neigh_node->ifinfo_list, list) {
220 batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
221 }
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800222
223 if (bao->bat_neigh_free)
224 bao->bat_neigh_free(neigh_node);
225
Simon Wunderlich89652332013-11-13 19:14:46 +0100226 batadv_hardif_free_ref_now(neigh_node->if_incoming);
227
228 kfree(neigh_node);
229}
230
231/**
232 * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
233 * and possibly free it (without rcu callback)
234 * @neigh_node: neigh neighbor to free
235 */
236static void
237batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
238{
239 if (atomic_dec_and_test(&neigh_node->refcount))
240 batadv_neigh_node_free_rcu(&neigh_node->rcu);
241}
242
243/**
244 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
245 * and possibly free it
246 * @neigh_node: neigh neighbor to free
247 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200248void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000249{
Marek Lindner44524fc2011-02-10 14:33:53 +0000250 if (atomic_dec_and_test(&neigh_node->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +0100251 call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000252}
253
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100254/**
255 * batadv_orig_node_get_router - router to the originator depending on iface
256 * @orig_node: the orig node for the router
257 * @if_outgoing: the interface where the payload packet has been received or
258 * the OGM should be sent to
259 *
260 * Returns the neighbor which should be router for this orig_node/iface.
261 *
262 * The object is returned with refcounter increased by 1.
263 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200264struct batadv_neigh_node *
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100265batadv_orig_router_get(struct batadv_orig_node *orig_node,
266 const struct batadv_hard_iface *if_outgoing)
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000267{
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100268 struct batadv_orig_ifinfo *orig_ifinfo;
269 struct batadv_neigh_node *router = NULL;
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000270
271 rcu_read_lock();
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100272 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
273 if (orig_ifinfo->if_outgoing != if_outgoing)
274 continue;
275
276 router = rcu_dereference(orig_ifinfo->router);
277 break;
278 }
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000279
280 if (router && !atomic_inc_not_zero(&router->refcount))
281 router = NULL;
282
283 rcu_read_unlock();
284 return router;
285}
286
Antonio Quartulli0538f752013-09-02 12:15:01 +0200287/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100288 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
289 * @orig_node: the orig node to be queried
290 * @if_outgoing: the interface for which the ifinfo should be acquired
291 *
292 * Returns the requested orig_ifinfo or NULL if not found.
293 *
294 * The object is returned with refcounter increased by 1.
295 */
296struct batadv_orig_ifinfo *
297batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
298 struct batadv_hard_iface *if_outgoing)
299{
300 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
301
302 rcu_read_lock();
303 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
304 list) {
305 if (tmp->if_outgoing != if_outgoing)
306 continue;
307
308 if (!atomic_inc_not_zero(&tmp->refcount))
309 continue;
310
311 orig_ifinfo = tmp;
312 break;
313 }
314 rcu_read_unlock();
315
316 return orig_ifinfo;
317}
318
319/**
320 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
321 * @orig_node: the orig node to be queried
322 * @if_outgoing: the interface for which the ifinfo should be acquired
323 *
324 * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
325 * interface otherwise. The object is created and added to the list
326 * if it does not exist.
327 *
328 * The object is returned with refcounter increased by 1.
329 */
330struct batadv_orig_ifinfo *
331batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
332 struct batadv_hard_iface *if_outgoing)
333{
334 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
335 unsigned long reset_time;
336
337 spin_lock_bh(&orig_node->neigh_list_lock);
338
339 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
340 if (orig_ifinfo)
341 goto out;
342
343 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
344 if (!orig_ifinfo)
345 goto out;
346
347 if (if_outgoing != BATADV_IF_DEFAULT &&
348 !atomic_inc_not_zero(&if_outgoing->refcount)) {
349 kfree(orig_ifinfo);
350 orig_ifinfo = NULL;
351 goto out;
352 }
353
354 reset_time = jiffies - 1;
355 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
356 orig_ifinfo->batman_seqno_reset = reset_time;
357 orig_ifinfo->if_outgoing = if_outgoing;
358 INIT_HLIST_NODE(&orig_ifinfo->list);
359 atomic_set(&orig_ifinfo->refcount, 2);
360 hlist_add_head_rcu(&orig_ifinfo->list,
361 &orig_node->ifinfo_list);
362out:
363 spin_unlock_bh(&orig_node->neigh_list_lock);
364 return orig_ifinfo;
365}
366
367/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100368 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
369 * @neigh_node: the neigh node to be queried
370 * @if_outgoing: the interface for which the ifinfo should be acquired
371 *
372 * The object is returned with refcounter increased by 1.
373 *
374 * Returns the requested neigh_ifinfo or NULL if not found
375 */
376struct batadv_neigh_ifinfo *
377batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
378 struct batadv_hard_iface *if_outgoing)
379{
380 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
381 *tmp_neigh_ifinfo;
382
383 rcu_read_lock();
384 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
385 list) {
386 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
387 continue;
388
389 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
390 continue;
391
392 neigh_ifinfo = tmp_neigh_ifinfo;
393 break;
394 }
395 rcu_read_unlock();
396
397 return neigh_ifinfo;
398}
399
400/**
401 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
402 * @neigh_node: the neigh node to be queried
403 * @if_outgoing: the interface for which the ifinfo should be acquired
404 *
405 * Returns NULL in case of failure or the neigh_ifinfo object for the
406 * if_outgoing interface otherwise. The object is created and added to the list
407 * if it does not exist.
408 *
409 * The object is returned with refcounter increased by 1.
410 */
411struct batadv_neigh_ifinfo *
412batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
413 struct batadv_hard_iface *if_outgoing)
414{
415 struct batadv_neigh_ifinfo *neigh_ifinfo;
416
417 spin_lock_bh(&neigh->ifinfo_lock);
418
419 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
420 if (neigh_ifinfo)
421 goto out;
422
423 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
424 if (!neigh_ifinfo)
425 goto out;
426
427 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
428 kfree(neigh_ifinfo);
429 neigh_ifinfo = NULL;
430 goto out;
431 }
432
433 INIT_HLIST_NODE(&neigh_ifinfo->list);
434 atomic_set(&neigh_ifinfo->refcount, 2);
435 neigh_ifinfo->if_outgoing = if_outgoing;
436
437 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
438
439out:
440 spin_unlock_bh(&neigh->ifinfo_lock);
441
442 return neigh_ifinfo;
443}
444
445/**
Antonio Quartulli0538f752013-09-02 12:15:01 +0200446 * batadv_neigh_node_new - create and init a new neigh_node object
447 * @hard_iface: the interface where the neighbour is connected to
448 * @neigh_addr: the mac address of the neighbour interface
449 * @orig_node: originator object representing the neighbour
450 *
451 * Allocates a new neigh_node object and initialises all the generic fields.
452 * Returns the new object or NULL on failure.
453 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200454struct batadv_neigh_node *
455batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200456 const u8 *neigh_addr, struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000457{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200458 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000459
Sven Eckelmann704509b2011-05-14 23:14:54 +0200460 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000461 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800462 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000463
Marek Lindnerf729dc702015-07-26 04:37:15 +0800464 if (!atomic_inc_not_zero(&hard_iface->refcount)) {
465 kfree(neigh_node);
466 neigh_node = NULL;
467 goto out;
468 }
469
Marek Lindner9591a792010-12-12 21:57:11 +0000470 INIT_HLIST_NODE(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100471 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
472 spin_lock_init(&neigh_node->ifinfo_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000473
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100474 ether_addr_copy(neigh_node->addr, neigh_addr);
Antonio Quartulli0538f752013-09-02 12:15:01 +0200475 neigh_node->if_incoming = hard_iface;
476 neigh_node->orig_node = orig_node;
477
Marek Lindner1605d0d2011-02-18 12:28:11 +0000478 /* extra reference for return */
479 atomic_set(&neigh_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000480
Marek Lindner7ae8b282012-03-01 15:35:21 +0800481out:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000482 return neigh_node;
483}
484
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100485/**
Antonio Quartulli08bf0ed2014-01-29 11:25:12 +0100486 * batadv_neigh_node_get - retrieve a neighbour from the list
487 * @orig_node: originator which the neighbour belongs to
488 * @hard_iface: the interface where this neighbour is connected to
489 * @addr: the address of the neighbour
490 *
491 * Looks for and possibly returns a neighbour belonging to this originator list
492 * which is connected through the provided hard interface.
493 * Returns NULL if the neighbour is not found.
494 */
495struct batadv_neigh_node *
496batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
497 const struct batadv_hard_iface *hard_iface,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200498 const u8 *addr)
Antonio Quartulli08bf0ed2014-01-29 11:25:12 +0100499{
500 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
501
502 rcu_read_lock();
503 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
504 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
505 continue;
506
507 if (tmp_neigh_node->if_incoming != hard_iface)
508 continue;
509
510 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
511 continue;
512
513 res = tmp_neigh_node;
514 break;
515 }
516 rcu_read_unlock();
517
518 return res;
519}
520
521/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100522 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
523 * @rcu: rcu pointer of the orig_ifinfo object
524 */
525static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
526{
527 struct batadv_orig_ifinfo *orig_ifinfo;
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100528 struct batadv_neigh_node *router;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100529
530 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
531
532 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
533 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
534
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100535 /* this is the last reference to this object */
536 router = rcu_dereference_protected(orig_ifinfo->router, true);
537 if (router)
538 batadv_neigh_node_free_ref_now(router);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100539 kfree(orig_ifinfo);
540}
541
542/**
543 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
544 * the orig_ifinfo (without rcu callback)
545 * @orig_ifinfo: the orig_ifinfo object to release
546 */
547static void
548batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
549{
550 if (atomic_dec_and_test(&orig_ifinfo->refcount))
551 batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
552}
553
554/**
555 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
556 * the orig_ifinfo
557 * @orig_ifinfo: the orig_ifinfo object to release
558 */
559void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
560{
561 if (atomic_dec_and_test(&orig_ifinfo->refcount))
562 call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
563}
564
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200565static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000566{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800567 struct hlist_node *node_tmp;
Simon Wunderlichf6c8b712013-11-13 19:14:45 +0100568 struct batadv_neigh_node *neigh_node;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200569 struct batadv_orig_node *orig_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100570 struct batadv_orig_ifinfo *orig_ifinfo;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000571
Sven Eckelmann56303d32012-06-05 22:31:31 +0200572 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000573
Marek Lindnerf987ed62010-12-12 21:57:12 +0000574 spin_lock_bh(&orig_node->neigh_list_lock);
575
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000576 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800577 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000578 &orig_node->neigh_list, list) {
Marek Lindnerf987ed62010-12-12 21:57:12 +0000579 hlist_del_rcu(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100580 batadv_neigh_node_free_ref_now(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000581 }
582
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100583 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
584 &orig_node->ifinfo_list, list) {
585 hlist_del_rcu(&orig_ifinfo->list);
586 batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
587 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000588 spin_unlock_bh(&orig_node->neigh_list_lock);
589
Linus Lüssing60432d72014-02-15 17:47:51 +0100590 batadv_mcast_purge_orig(orig_node);
591
Martin Hundebølld56b1702013-01-25 11:12:39 +0100592 /* Free nc_nodes */
593 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
594
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200595 batadv_frag_purge_orig(orig_node, NULL);
596
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200597 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
598 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
599
Antonio Quartullia73105b2011-04-27 14:27:44 +0200600 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000601 kfree(orig_node);
602}
603
Linus Lüssing72822222013-04-15 21:43:29 +0800604/**
605 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
606 * schedule an rcu callback for freeing it
607 * @orig_node: the orig node to free
608 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200609void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000610{
611 if (atomic_dec_and_test(&orig_node->refcount))
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200612 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000613}
614
Linus Lüssing72822222013-04-15 21:43:29 +0800615/**
616 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
617 * possibly free it (without rcu callback)
618 * @orig_node: the orig node to free
619 */
620void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
621{
622 if (atomic_dec_and_test(&orig_node->refcount))
623 batadv_orig_node_free_rcu(&orig_node->rcu);
624}
625
Sven Eckelmann56303d32012-06-05 22:31:31 +0200626void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000627{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200628 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800629 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000630 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000631 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200632 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200633 u32 i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000634
635 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000636 return;
637
638 cancel_delayed_work_sync(&bat_priv->orig_work);
639
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000640 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000641
642 for (i = 0; i < hash->size; i++) {
643 head = &hash->table[i];
644 list_lock = &hash->list_locks[i];
645
646 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800647 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000648 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800649 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200650 batadv_orig_node_free_ref(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000651 }
652 spin_unlock_bh(list_lock);
653 }
654
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200655 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000656}
657
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200658/**
659 * batadv_orig_node_new - creates a new orig_node
660 * @bat_priv: the bat priv with all the soft interface information
661 * @addr: the mac address of the originator
662 *
663 * Creates a new originator object and initialise all the generic fields.
664 * The new object is not added to the originator list.
665 * Returns the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200666 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200667struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200668 const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000669{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200670 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200671 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200672 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200673 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000674
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200675 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
676 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000677
Sven Eckelmann704509b2011-05-14 23:14:54 +0200678 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000679 if (!orig_node)
680 return NULL;
681
Marek Lindner9591a792010-12-12 21:57:11 +0000682 INIT_HLIST_HEAD(&orig_node->neigh_list);
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800683 INIT_HLIST_HEAD(&orig_node->vlan_list);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100684 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000685 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000686 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200687 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200688 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200689 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000690
Martin Hundebølld56b1702013-01-25 11:12:39 +0100691 batadv_nc_init_orig(orig_node);
692
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000693 /* extra reference for return */
694 atomic_set(&orig_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000695
Marek Lindner16b1aba2011-01-19 20:01:42 +0000696 orig_node->bat_priv = bat_priv;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100697 ether_addr_copy(orig_node->orig, addr);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100698 batadv_dat_init_orig_node_addr(orig_node);
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200699 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200700 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200701 orig_node->tt_buff_len = 0;
Linus Lüssing2c667a32014-10-30 06:23:40 +0100702 orig_node->last_seen = jiffies;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200703 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
704 orig_node->bcast_seqno_reset = reset_time;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200705
Linus Lüssing60432d72014-02-15 17:47:51 +0100706#ifdef CONFIG_BATMAN_ADV_MCAST
707 orig_node->mcast_flags = BATADV_NO_FLAGS;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200708 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
709 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
710 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
711 spin_lock_init(&orig_node->mcast_handler_lock);
Linus Lüssing60432d72014-02-15 17:47:51 +0100712#endif
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000713
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200714 /* create a vlan object for the "untagged" LAN */
715 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
716 if (!vlan)
717 goto free_orig_node;
718 /* batadv_orig_node_vlan_new() increases the refcounter.
719 * Immediately release vlan since it is not needed anymore in this
720 * context
721 */
722 batadv_orig_node_vlan_free_ref(vlan);
723
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200724 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
725 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
726 spin_lock_init(&orig_node->fragments[i].lock);
727 orig_node->fragments[i].size = 0;
728 }
729
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000730 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000731free_orig_node:
732 kfree(orig_node);
733 return NULL;
734}
735
Simon Wunderlich89652332013-11-13 19:14:46 +0100736/**
Simon Wunderlich709de132014-03-26 15:46:24 +0100737 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
738 * @bat_priv: the bat priv with all the soft interface information
739 * @neigh: orig node which is to be checked
740 */
741static void
742batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
743 struct batadv_neigh_node *neigh)
744{
745 struct batadv_neigh_ifinfo *neigh_ifinfo;
746 struct batadv_hard_iface *if_outgoing;
747 struct hlist_node *node_tmp;
748
749 spin_lock_bh(&neigh->ifinfo_lock);
750
751 /* for all ifinfo objects for this neighinator */
752 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
753 &neigh->ifinfo_list, list) {
754 if_outgoing = neigh_ifinfo->if_outgoing;
755
756 /* always keep the default interface */
757 if (if_outgoing == BATADV_IF_DEFAULT)
758 continue;
759
760 /* don't purge if the interface is not (going) down */
761 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
762 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
763 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
764 continue;
765
766 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
767 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
768 neigh->addr, if_outgoing->net_dev->name);
769
770 hlist_del_rcu(&neigh_ifinfo->list);
771 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
772 }
773
774 spin_unlock_bh(&neigh->ifinfo_lock);
775}
776
777/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100778 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
779 * @bat_priv: the bat priv with all the soft interface information
780 * @orig_node: orig node which is to be checked
781 *
782 * Returns true if any ifinfo entry was purged, false otherwise.
783 */
784static bool
785batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
786 struct batadv_orig_node *orig_node)
787{
788 struct batadv_orig_ifinfo *orig_ifinfo;
789 struct batadv_hard_iface *if_outgoing;
790 struct hlist_node *node_tmp;
791 bool ifinfo_purged = false;
792
793 spin_lock_bh(&orig_node->neigh_list_lock);
794
795 /* for all ifinfo objects for this originator */
796 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
797 &orig_node->ifinfo_list, list) {
798 if_outgoing = orig_ifinfo->if_outgoing;
799
800 /* always keep the default interface */
801 if (if_outgoing == BATADV_IF_DEFAULT)
802 continue;
803
804 /* don't purge if the interface is not (going) down */
805 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
806 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
807 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
808 continue;
809
810 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
811 "router/ifinfo purge: originator %pM, iface: %s\n",
812 orig_node->orig, if_outgoing->net_dev->name);
813
814 ifinfo_purged = true;
815
816 hlist_del_rcu(&orig_ifinfo->list);
817 batadv_orig_ifinfo_free_ref(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +0100818 if (orig_node->last_bonding_candidate == orig_ifinfo) {
819 orig_node->last_bonding_candidate = NULL;
820 batadv_orig_ifinfo_free_ref(orig_ifinfo);
821 }
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100822 }
823
824 spin_unlock_bh(&orig_node->neigh_list_lock);
825
826 return ifinfo_purged;
827}
828
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100829/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100830 * batadv_purge_orig_neighbors - purges neighbors from originator
831 * @bat_priv: the bat priv with all the soft interface information
832 * @orig_node: orig node which is to be checked
833 *
834 * Returns true if any neighbor was purged, false otherwise
835 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200836static bool
837batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
Simon Wunderlich89652332013-11-13 19:14:46 +0100838 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000839{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800840 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200841 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000842 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +0800843 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200844 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000845
Marek Lindnerf987ed62010-12-12 21:57:12 +0000846 spin_lock_bh(&orig_node->neigh_list_lock);
847
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000848 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800849 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000850 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200851 last_seen = neigh_node->last_seen;
852 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000853
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200854 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200855 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
856 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
857 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200858 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
859 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
860 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200861 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200862 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
863 orig_node->orig, neigh_node->addr,
864 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000865 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200866 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200867 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
868 orig_node->orig, neigh_node->addr,
869 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000870
871 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +0000872
Marek Lindnerf987ed62010-12-12 21:57:12 +0000873 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200874 batadv_neigh_node_free_ref(neigh_node);
Simon Wunderlich709de132014-03-26 15:46:24 +0100875 } else {
876 /* only necessary if not the whole neighbor is to be
877 * deleted, but some interface has been removed.
878 */
879 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000880 }
881 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000882
883 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000884 return neigh_purged;
885}
886
Simon Wunderlich89652332013-11-13 19:14:46 +0100887/**
888 * batadv_find_best_neighbor - finds the best neighbor after purging
889 * @bat_priv: the bat priv with all the soft interface information
890 * @orig_node: orig node which is to be checked
891 * @if_outgoing: the interface for which the metric should be compared
892 *
893 * Returns the current best neighbor, with refcount increased.
894 */
895static struct batadv_neigh_node *
896batadv_find_best_neighbor(struct batadv_priv *bat_priv,
897 struct batadv_orig_node *orig_node,
898 struct batadv_hard_iface *if_outgoing)
899{
900 struct batadv_neigh_node *best = NULL, *neigh;
901 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
902
903 rcu_read_lock();
904 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
905 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
906 best, if_outgoing) <= 0))
907 continue;
908
909 if (!atomic_inc_not_zero(&neigh->refcount))
910 continue;
911
912 if (best)
913 batadv_neigh_node_free_ref(best);
914
915 best = neigh;
916 }
917 rcu_read_unlock();
918
919 return best;
920}
921
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100922/**
923 * batadv_purge_orig_node - purges obsolete information from an orig_node
924 * @bat_priv: the bat priv with all the soft interface information
925 * @orig_node: orig node which is to be checked
926 *
927 * This function checks if the orig_node or substructures of it have become
928 * obsolete, and purges this information if that's the case.
929 *
930 * Returns true if the orig_node is to be removed, false otherwise.
931 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200932static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
933 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000934{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200935 struct batadv_neigh_node *best_neigh_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100936 struct batadv_hard_iface *hard_iface;
Simon Wunderlich7b955a92014-03-26 15:46:23 +0100937 bool changed_ifinfo, changed_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000938
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200939 if (batadv_has_timed_out(orig_node->last_seen,
940 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200941 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200942 "Originator timeout: originator %pM, last_seen %u\n",
943 orig_node->orig,
944 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000945 return true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000946 }
Simon Wunderlich7b955a92014-03-26 15:46:23 +0100947 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
948 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100949
Simon Wunderlich7b955a92014-03-26 15:46:23 +0100950 if (!changed_ifinfo && !changed_neigh)
Simon Wunderlich89652332013-11-13 19:14:46 +0100951 return false;
952
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100953 /* first for NULL ... */
Simon Wunderlich89652332013-11-13 19:14:46 +0100954 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
955 BATADV_IF_DEFAULT);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100956 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
957 best_neigh_node);
Simon Wunderlich89652332013-11-13 19:14:46 +0100958 if (best_neigh_node)
959 batadv_neigh_node_free_ref(best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000960
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100961 /* ... then for all other interfaces. */
962 rcu_read_lock();
963 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
964 if (hard_iface->if_status != BATADV_IF_ACTIVE)
965 continue;
966
967 if (hard_iface->soft_iface != bat_priv->soft_iface)
968 continue;
969
970 best_neigh_node = batadv_find_best_neighbor(bat_priv,
971 orig_node,
972 hard_iface);
973 batadv_update_route(bat_priv, orig_node, hard_iface,
974 best_neigh_node);
975 if (best_neigh_node)
976 batadv_neigh_node_free_ref(best_neigh_node);
977 }
978 rcu_read_unlock();
979
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000980 return false;
981}
982
Sven Eckelmann56303d32012-06-05 22:31:31 +0200983static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000984{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200985 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800986 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000987 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000988 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200989 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200990 u32 i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000991
992 if (!hash)
993 return;
994
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000995 /* for all origins... */
996 for (i = 0; i < hash->size; i++) {
997 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000998 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000999
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001000 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001001 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +00001002 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001003 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +08001004 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001005 hlist_del_rcu(&orig_node->hash_entry);
Linus Lüssing9d31b3c2014-12-13 23:32:15 +01001006 batadv_tt_global_del_orig(orig_node->bat_priv,
1007 orig_node, -1,
1008 "originator timed out");
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001009 batadv_orig_node_free_ref(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001010 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001011 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +02001012
1013 batadv_frag_purge_orig(orig_node,
1014 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001015 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001016 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001017 }
1018
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +02001019 batadv_gw_node_purge(bat_priv);
1020 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001021}
1022
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001023static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001024{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001025 struct delayed_work *delayed_work;
1026 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001027
Sven Eckelmann56303d32012-06-05 22:31:31 +02001028 delayed_work = container_of(work, struct delayed_work, work);
1029 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001030 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +01001031 queue_delayed_work(batadv_event_workqueue,
1032 &bat_priv->orig_work,
1033 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001034}
1035
Sven Eckelmann56303d32012-06-05 22:31:31 +02001036void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001037{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001038 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001039}
1040
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001041int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001042{
1043 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001044 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +02001045 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001046
Marek Lindner30da63a2012-08-03 17:15:46 +02001047 primary_if = batadv_seq_print_text_primary_if_get(seq);
1048 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001049 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001050
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001051 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001052 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001053 primary_if->net_dev->dev_addr, net_dev->name,
1054 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001055
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001056 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001057
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001058 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1059 seq_puts(seq,
1060 "No printing function for this routing protocol\n");
1061 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001062 }
1063
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001064 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1065 BATADV_IF_DEFAULT);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001066
Marek Lindner30da63a2012-08-03 17:15:46 +02001067 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001068}
1069
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001070/**
1071 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1072 * outgoing interface
1073 * @seq: debugfs table seq_file struct
1074 * @offset: not used
1075 *
1076 * Returns 0
1077 */
1078int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1079{
1080 struct net_device *net_dev = (struct net_device *)seq->private;
1081 struct batadv_hard_iface *hard_iface;
1082 struct batadv_priv *bat_priv;
1083
1084 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1085
1086 if (!hard_iface || !hard_iface->soft_iface) {
1087 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1088 goto out;
1089 }
1090
1091 bat_priv = netdev_priv(hard_iface->soft_iface);
1092 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1093 seq_puts(seq,
1094 "No printing function for this routing protocol\n");
1095 goto out;
1096 }
1097
1098 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1099 seq_puts(seq, "Interface not active\n");
1100 goto out;
1101 }
1102
1103 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1104 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1105 hard_iface->net_dev->dev_addr,
1106 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1107
1108 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1109
1110out:
Marek Lindner16a41422014-04-24 03:44:25 +08001111 if (hard_iface)
1112 batadv_hardif_free_ref(hard_iface);
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001113 return 0;
1114}
1115
Sven Eckelmann56303d32012-06-05 22:31:31 +02001116int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1117 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001118{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001119 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001120 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001121 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001122 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001123 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001124 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001125 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001126
1127 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001128 * if_num
1129 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001130 for (i = 0; i < hash->size; i++) {
1131 head = &hash->table[i];
1132
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001133 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001134 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001135 ret = 0;
1136 if (bao->bat_orig_add_if)
1137 ret = bao->bat_orig_add_if(orig_node,
1138 max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001139 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001140 goto err;
1141 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001142 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001143 }
1144
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001145 return 0;
1146
1147err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001148 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001149 return -ENOMEM;
1150}
1151
Sven Eckelmann56303d32012-06-05 22:31:31 +02001152int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1153 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001154{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001155 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001156 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001157 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001158 struct batadv_hard_iface *hard_iface_tmp;
1159 struct batadv_orig_node *orig_node;
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001160 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001161 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001162 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001163
1164 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001165 * if_num
1166 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001167 for (i = 0; i < hash->size; i++) {
1168 head = &hash->table[i];
1169
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001170 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001171 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001172 ret = 0;
1173 if (bao->bat_orig_del_if)
1174 ret = bao->bat_orig_del_if(orig_node,
1175 max_if_num,
1176 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001177 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001178 goto err;
1179 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001180 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001181 }
1182
1183 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1184 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001185 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001186 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001187 continue;
1188
Marek Lindnere6c10f42011-02-18 12:33:20 +00001189 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001190 continue;
1191
Marek Lindnere6c10f42011-02-18 12:33:20 +00001192 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001193 continue;
1194
Marek Lindnere6c10f42011-02-18 12:33:20 +00001195 if (hard_iface_tmp->if_num > hard_iface->if_num)
1196 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001197 }
1198 rcu_read_unlock();
1199
Marek Lindnere6c10f42011-02-18 12:33:20 +00001200 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001201 return 0;
1202
1203err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001204 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001205 return -ENOMEM;
1206}