blob: f6ca4e50972982c295c91178fb4f8d0696aaae14 [file] [log] [blame]
Sven Eckelmann9f6446c2015-04-23 13:16:35 +02001/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "originator.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/errno.h>
22#include <linux/etherdevice.h>
23#include <linux/fs.h>
24#include <linux/jiffies.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/lockdep.h>
28#include <linux/netdevice.h>
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080029#include <linux/rculist.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020030#include <linux/seq_file.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <linux/workqueue.h>
34
35#include "distributed-arp-table.h"
36#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000037#include "gateway_client.h"
38#include "hard-interface.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020039#include "hash.h"
Linus Lüssing60432d72014-02-15 17:47:51 +010040#include "multicast.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020041#include "network-coding.h"
42#include "routing.h"
43#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044
Antonio Quartullidec05072012-11-10 11:00:32 +010045/* hash class keys */
46static struct lock_class_key batadv_orig_hash_lock_class_key;
47
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020048static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000049
Sven Eckelmann62fe7102015-09-15 19:00:48 +020050/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +010051 * batadv_compare_orig - comparing function used in the originator hash table
52 * @node: node in the local table
53 * @data2: second object to compare the node to
Sven Eckelmann62fe7102015-09-15 19:00:48 +020054 *
55 * Return: 1 if they are the same originator
56 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020057int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020058{
Sven Eckelmann56303d32012-06-05 22:31:31 +020059 const void *data1 = container_of(node, struct batadv_orig_node,
60 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020061
dingtianhong323813e2013-12-26 19:40:39 +080062 return batadv_compare_eth(data1, data2);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020063}
64
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020065/**
66 * batadv_orig_node_vlan_get - get an orig_node_vlan object
67 * @orig_node: the originator serving the VLAN
68 * @vid: the VLAN identifier
69 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +020070 * Return: the vlan object identified by vid and belonging to orig_node or NULL
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020071 * if it does not exist.
72 */
73struct batadv_orig_node_vlan *
74batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
75 unsigned short vid)
76{
77 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
78
79 rcu_read_lock();
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080080 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020081 if (tmp->vid != vid)
82 continue;
83
84 if (!atomic_inc_not_zero(&tmp->refcount))
85 continue;
86
87 vlan = tmp;
88
89 break;
90 }
91 rcu_read_unlock();
92
93 return vlan;
94}
95
96/**
97 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
98 * object
99 * @orig_node: the originator serving the VLAN
100 * @vid: the VLAN identifier
101 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200102 * Return: NULL in case of failure or the vlan object identified by vid and
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200103 * belonging to orig_node otherwise. The object is created and added to the list
104 * if it does not exist.
105 *
106 * The object is returned with refcounter increased by 1.
107 */
108struct batadv_orig_node_vlan *
109batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
110 unsigned short vid)
111{
112 struct batadv_orig_node_vlan *vlan;
113
114 spin_lock_bh(&orig_node->vlan_list_lock);
115
116 /* first look if an object for this vid already exists */
117 vlan = batadv_orig_node_vlan_get(orig_node, vid);
118 if (vlan)
119 goto out;
120
121 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
122 if (!vlan)
123 goto out;
124
125 atomic_set(&vlan->refcount, 2);
126 vlan->vid = vid;
127
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800128 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200129
130out:
131 spin_unlock_bh(&orig_node->vlan_list_lock);
132
133 return vlan;
134}
135
136/**
137 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
138 * the originator-vlan object
139 * @orig_vlan: the originator-vlan object to release
140 */
141void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
142{
143 if (atomic_dec_and_test(&orig_vlan->refcount))
144 kfree_rcu(orig_vlan, rcu);
145}
146
Sven Eckelmann56303d32012-06-05 22:31:31 +0200147int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000148{
149 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200150 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000151
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200152 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000153
154 if (!bat_priv->orig_hash)
155 goto err;
156
Antonio Quartullidec05072012-11-10 11:00:32 +0100157 batadv_hash_set_lock_class(bat_priv->orig_hash,
158 &batadv_orig_hash_lock_class_key);
159
Antonio Quartulli72414442012-12-25 13:14:37 +0100160 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
161 queue_delayed_work(batadv_event_workqueue,
162 &bat_priv->orig_work,
163 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
164
Sven Eckelmann5346c352012-05-05 13:27:28 +0200165 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000166
167err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200168 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000169}
170
Simon Wunderlich89652332013-11-13 19:14:46 +0100171/**
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100172 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
173 * free after rcu grace period
Simon Wunderlich89652332013-11-13 19:14:46 +0100174 * @neigh_ifinfo: the neigh_ifinfo object to release
175 */
176static void
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100177batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
Simon Wunderlich89652332013-11-13 19:14:46 +0100178{
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100179 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
180 batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
181
182 kfree_rcu(neigh_ifinfo, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100183}
184
185/**
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100186 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
Simon Wunderlich89652332013-11-13 19:14:46 +0100187 * the neigh_ifinfo
188 * @neigh_ifinfo: the neigh_ifinfo object to release
189 */
190void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
191{
192 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100193 batadv_neigh_ifinfo_release(neigh_ifinfo);
Simon Wunderlich89652332013-11-13 19:14:46 +0100194}
195
196/**
Sven Eckelmannf6389692016-01-05 12:06:23 +0100197 * batadv_hardif_neigh_release - release hardif neigh node from lists and
198 * queue for free after rcu grace period
Marek Lindnercef63412015-08-04 21:09:55 +0800199 * @hardif_neigh: hardif neigh neighbor to free
200 */
201static void
Sven Eckelmannf6389692016-01-05 12:06:23 +0100202batadv_hardif_neigh_release(struct batadv_hardif_neigh_node *hardif_neigh)
Marek Lindnercef63412015-08-04 21:09:55 +0800203{
Sven Eckelmannf6389692016-01-05 12:06:23 +0100204 spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
205 hlist_del_init_rcu(&hardif_neigh->list);
206 spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
Sven Eckelmannbab7c6c2016-01-05 12:06:17 +0100207
Sven Eckelmannf6389692016-01-05 12:06:23 +0100208 batadv_hardif_free_ref(hardif_neigh->if_incoming);
209 kfree_rcu(hardif_neigh, rcu);
Marek Lindnercef63412015-08-04 21:09:55 +0800210}
211
212/**
213 * batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
Sven Eckelmannf6389692016-01-05 12:06:23 +0100214 * and possibly release it
Marek Lindnercef63412015-08-04 21:09:55 +0800215 * @hardif_neigh: hardif neigh neighbor to free
216 */
217void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
218{
Sven Eckelmannf6389692016-01-05 12:06:23 +0100219 if (atomic_dec_and_test(&hardif_neigh->refcount))
220 batadv_hardif_neigh_release(hardif_neigh);
Marek Lindnercef63412015-08-04 21:09:55 +0800221}
222
223/**
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100224 * batadv_neigh_node_release - release neigh_node from lists and queue for
225 * free after rcu grace period
226 * @neigh_node: neigh neighbor to free
Simon Wunderlich89652332013-11-13 19:14:46 +0100227 */
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100228static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
Simon Wunderlich89652332013-11-13 19:14:46 +0100229{
230 struct hlist_node *node_tmp;
Marek Lindnercef63412015-08-04 21:09:55 +0800231 struct batadv_hardif_neigh_node *hardif_neigh;
Simon Wunderlich89652332013-11-13 19:14:46 +0100232 struct batadv_neigh_ifinfo *neigh_ifinfo;
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800233 struct batadv_algo_ops *bao;
Simon Wunderlich89652332013-11-13 19:14:46 +0100234
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800235 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
Simon Wunderlich89652332013-11-13 19:14:46 +0100236
237 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
238 &neigh_node->ifinfo_list, list) {
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100239 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
Simon Wunderlich89652332013-11-13 19:14:46 +0100240 }
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800241
Marek Lindnercef63412015-08-04 21:09:55 +0800242 hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
243 neigh_node->addr);
244 if (hardif_neigh) {
245 /* batadv_hardif_neigh_get() increases refcount too */
Sven Eckelmannf6389692016-01-05 12:06:23 +0100246 batadv_hardif_neigh_free_ref(hardif_neigh);
247 batadv_hardif_neigh_free_ref(hardif_neigh);
Marek Lindnercef63412015-08-04 21:09:55 +0800248 }
249
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800250 if (bao->bat_neigh_free)
251 bao->bat_neigh_free(neigh_node);
252
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100253 batadv_hardif_free_ref(neigh_node->if_incoming);
Simon Wunderlich89652332013-11-13 19:14:46 +0100254
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100255 kfree_rcu(neigh_node, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100256}
257
258/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100259 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100260 * and possibly release it
Simon Wunderlich89652332013-11-13 19:14:46 +0100261 * @neigh_node: neigh neighbor to free
262 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200263void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000264{
Marek Lindner44524fc2011-02-10 14:33:53 +0000265 if (atomic_dec_and_test(&neigh_node->refcount))
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100266 batadv_neigh_node_release(neigh_node);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000267}
268
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100269/**
270 * batadv_orig_node_get_router - router to the originator depending on iface
271 * @orig_node: the orig node for the router
272 * @if_outgoing: the interface where the payload packet has been received or
273 * the OGM should be sent to
274 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200275 * Return: the neighbor which should be router for this orig_node/iface.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100276 *
277 * The object is returned with refcounter increased by 1.
278 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200279struct batadv_neigh_node *
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100280batadv_orig_router_get(struct batadv_orig_node *orig_node,
281 const struct batadv_hard_iface *if_outgoing)
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000282{
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100283 struct batadv_orig_ifinfo *orig_ifinfo;
284 struct batadv_neigh_node *router = NULL;
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000285
286 rcu_read_lock();
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100287 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
288 if (orig_ifinfo->if_outgoing != if_outgoing)
289 continue;
290
291 router = rcu_dereference(orig_ifinfo->router);
292 break;
293 }
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000294
295 if (router && !atomic_inc_not_zero(&router->refcount))
296 router = NULL;
297
298 rcu_read_unlock();
299 return router;
300}
301
Antonio Quartulli0538f752013-09-02 12:15:01 +0200302/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100303 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
304 * @orig_node: the orig node to be queried
305 * @if_outgoing: the interface for which the ifinfo should be acquired
306 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200307 * Return: the requested orig_ifinfo or NULL if not found.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100308 *
309 * The object is returned with refcounter increased by 1.
310 */
311struct batadv_orig_ifinfo *
312batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
313 struct batadv_hard_iface *if_outgoing)
314{
315 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
316
317 rcu_read_lock();
318 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
319 list) {
320 if (tmp->if_outgoing != if_outgoing)
321 continue;
322
323 if (!atomic_inc_not_zero(&tmp->refcount))
324 continue;
325
326 orig_ifinfo = tmp;
327 break;
328 }
329 rcu_read_unlock();
330
331 return orig_ifinfo;
332}
333
334/**
335 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
336 * @orig_node: the orig node to be queried
337 * @if_outgoing: the interface for which the ifinfo should be acquired
338 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200339 * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100340 * interface otherwise. The object is created and added to the list
341 * if it does not exist.
342 *
343 * The object is returned with refcounter increased by 1.
344 */
345struct batadv_orig_ifinfo *
346batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
347 struct batadv_hard_iface *if_outgoing)
348{
349 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
350 unsigned long reset_time;
351
352 spin_lock_bh(&orig_node->neigh_list_lock);
353
354 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
355 if (orig_ifinfo)
356 goto out;
357
358 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
359 if (!orig_ifinfo)
360 goto out;
361
362 if (if_outgoing != BATADV_IF_DEFAULT &&
363 !atomic_inc_not_zero(&if_outgoing->refcount)) {
364 kfree(orig_ifinfo);
365 orig_ifinfo = NULL;
366 goto out;
367 }
368
369 reset_time = jiffies - 1;
370 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
371 orig_ifinfo->batman_seqno_reset = reset_time;
372 orig_ifinfo->if_outgoing = if_outgoing;
373 INIT_HLIST_NODE(&orig_ifinfo->list);
374 atomic_set(&orig_ifinfo->refcount, 2);
375 hlist_add_head_rcu(&orig_ifinfo->list,
376 &orig_node->ifinfo_list);
377out:
378 spin_unlock_bh(&orig_node->neigh_list_lock);
379 return orig_ifinfo;
380}
381
382/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100383 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200384 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100385 * @if_outgoing: the interface for which the ifinfo should be acquired
386 *
387 * The object is returned with refcounter increased by 1.
388 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200389 * Return: the requested neigh_ifinfo or NULL if not found
Simon Wunderlich89652332013-11-13 19:14:46 +0100390 */
391struct batadv_neigh_ifinfo *
392batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
393 struct batadv_hard_iface *if_outgoing)
394{
395 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
396 *tmp_neigh_ifinfo;
397
398 rcu_read_lock();
399 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
400 list) {
401 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
402 continue;
403
404 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
405 continue;
406
407 neigh_ifinfo = tmp_neigh_ifinfo;
408 break;
409 }
410 rcu_read_unlock();
411
412 return neigh_ifinfo;
413}
414
415/**
416 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200417 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100418 * @if_outgoing: the interface for which the ifinfo should be acquired
419 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200420 * Return: NULL in case of failure or the neigh_ifinfo object for the
Simon Wunderlich89652332013-11-13 19:14:46 +0100421 * if_outgoing interface otherwise. The object is created and added to the list
422 * if it does not exist.
423 *
424 * The object is returned with refcounter increased by 1.
425 */
426struct batadv_neigh_ifinfo *
427batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
428 struct batadv_hard_iface *if_outgoing)
429{
430 struct batadv_neigh_ifinfo *neigh_ifinfo;
431
432 spin_lock_bh(&neigh->ifinfo_lock);
433
434 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
435 if (neigh_ifinfo)
436 goto out;
437
438 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
439 if (!neigh_ifinfo)
440 goto out;
441
442 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
443 kfree(neigh_ifinfo);
444 neigh_ifinfo = NULL;
445 goto out;
446 }
447
448 INIT_HLIST_NODE(&neigh_ifinfo->list);
449 atomic_set(&neigh_ifinfo->refcount, 2);
450 neigh_ifinfo->if_outgoing = if_outgoing;
451
452 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
453
454out:
455 spin_unlock_bh(&neigh->ifinfo_lock);
456
457 return neigh_ifinfo;
458}
459
460/**
Marek Lindnered292662015-08-04 23:31:44 +0800461 * batadv_neigh_node_get - retrieve a neighbour from the list
462 * @orig_node: originator which the neighbour belongs to
463 * @hard_iface: the interface where this neighbour is connected to
464 * @addr: the address of the neighbour
465 *
466 * Looks for and possibly returns a neighbour belonging to this originator list
467 * which is connected through the provided hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200468 *
469 * Return: neighbor when found. Othwerwise NULL
Marek Lindnered292662015-08-04 23:31:44 +0800470 */
471static struct batadv_neigh_node *
472batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
473 const struct batadv_hard_iface *hard_iface,
474 const u8 *addr)
475{
476 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
477
478 rcu_read_lock();
479 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
480 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
481 continue;
482
483 if (tmp_neigh_node->if_incoming != hard_iface)
484 continue;
485
486 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
487 continue;
488
489 res = tmp_neigh_node;
490 break;
491 }
492 rcu_read_unlock();
493
494 return res;
495}
496
497/**
Marek Lindnercef63412015-08-04 21:09:55 +0800498 * batadv_hardif_neigh_create - create a hardif neighbour node
499 * @hard_iface: the interface this neighbour is connected to
500 * @neigh_addr: the interface address of the neighbour to retrieve
501 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200502 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800503 */
504static struct batadv_hardif_neigh_node *
505batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
506 const u8 *neigh_addr)
507{
Marek Lindner8248a4c2015-08-04 21:09:56 +0800508 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Marek Lindnercef63412015-08-04 21:09:55 +0800509 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
510
511 spin_lock_bh(&hard_iface->neigh_list_lock);
512
513 /* check if neighbor hasn't been added in the meantime */
514 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
515 if (hardif_neigh)
516 goto out;
517
518 if (!atomic_inc_not_zero(&hard_iface->refcount))
519 goto out;
520
521 hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
522 if (!hardif_neigh) {
523 batadv_hardif_free_ref(hard_iface);
524 goto out;
525 }
526
527 INIT_HLIST_NODE(&hardif_neigh->list);
528 ether_addr_copy(hardif_neigh->addr, neigh_addr);
529 hardif_neigh->if_incoming = hard_iface;
530 hardif_neigh->last_seen = jiffies;
531
532 atomic_set(&hardif_neigh->refcount, 1);
533
Marek Lindner8248a4c2015-08-04 21:09:56 +0800534 if (bat_priv->bat_algo_ops->bat_hardif_neigh_init)
535 bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh);
536
Marek Lindnercef63412015-08-04 21:09:55 +0800537 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
538
539out:
540 spin_unlock_bh(&hard_iface->neigh_list_lock);
541 return hardif_neigh;
542}
543
544/**
545 * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
546 * node
547 * @hard_iface: the interface this neighbour is connected to
548 * @neigh_addr: the interface address of the neighbour to retrieve
549 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200550 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800551 */
552static struct batadv_hardif_neigh_node *
553batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
554 const u8 *neigh_addr)
555{
556 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
557
558 /* first check without locking to avoid the overhead */
559 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
560 if (hardif_neigh)
561 return hardif_neigh;
562
563 return batadv_hardif_neigh_create(hard_iface, neigh_addr);
564}
565
566/**
567 * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
568 * @hard_iface: the interface where this neighbour is connected to
569 * @neigh_addr: the address of the neighbour
570 *
571 * Looks for and possibly returns a neighbour belonging to this hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200572 *
573 * Return: neighbor when found. Othwerwise NULL
Marek Lindnercef63412015-08-04 21:09:55 +0800574 */
575struct batadv_hardif_neigh_node *
576batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
577 const u8 *neigh_addr)
578{
579 struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
580
581 rcu_read_lock();
582 hlist_for_each_entry_rcu(tmp_hardif_neigh,
583 &hard_iface->neigh_list, list) {
584 if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
585 continue;
586
587 if (!atomic_inc_not_zero(&tmp_hardif_neigh->refcount))
588 continue;
589
590 hardif_neigh = tmp_hardif_neigh;
591 break;
592 }
593 rcu_read_unlock();
594
595 return hardif_neigh;
596}
597
598/**
Antonio Quartulli0538f752013-09-02 12:15:01 +0200599 * batadv_neigh_node_new - create and init a new neigh_node object
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800600 * @orig_node: originator object representing the neighbour
Antonio Quartulli0538f752013-09-02 12:15:01 +0200601 * @hard_iface: the interface where the neighbour is connected to
602 * @neigh_addr: the mac address of the neighbour interface
Antonio Quartulli0538f752013-09-02 12:15:01 +0200603 *
604 * Allocates a new neigh_node object and initialises all the generic fields.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200605 *
606 * Return: neighbor when found. Othwerwise NULL
Antonio Quartulli0538f752013-09-02 12:15:01 +0200607 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200608struct batadv_neigh_node *
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800609batadv_neigh_node_new(struct batadv_orig_node *orig_node,
610 struct batadv_hard_iface *hard_iface,
611 const u8 *neigh_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000612{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200613 struct batadv_neigh_node *neigh_node;
Marek Lindnercef63412015-08-04 21:09:55 +0800614 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000615
Marek Lindner741aa062015-07-26 04:57:43 +0800616 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
617 if (neigh_node)
618 goto out;
619
Marek Lindnercef63412015-08-04 21:09:55 +0800620 hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
621 neigh_addr);
622 if (!hardif_neigh)
623 goto out;
624
Sven Eckelmann704509b2011-05-14 23:14:54 +0200625 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000626 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800627 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000628
Marek Lindnerf729dc702015-07-26 04:37:15 +0800629 if (!atomic_inc_not_zero(&hard_iface->refcount)) {
630 kfree(neigh_node);
631 neigh_node = NULL;
632 goto out;
633 }
634
Marek Lindner9591a792010-12-12 21:57:11 +0000635 INIT_HLIST_NODE(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100636 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
637 spin_lock_init(&neigh_node->ifinfo_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000638
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100639 ether_addr_copy(neigh_node->addr, neigh_addr);
Antonio Quartulli0538f752013-09-02 12:15:01 +0200640 neigh_node->if_incoming = hard_iface;
641 neigh_node->orig_node = orig_node;
642
Marek Lindner1605d0d2011-02-18 12:28:11 +0000643 /* extra reference for return */
644 atomic_set(&neigh_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000645
Marek Lindner741aa062015-07-26 04:57:43 +0800646 spin_lock_bh(&orig_node->neigh_list_lock);
647 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
648 spin_unlock_bh(&orig_node->neigh_list_lock);
649
Marek Lindnercef63412015-08-04 21:09:55 +0800650 /* increment unique neighbor refcount */
651 atomic_inc(&hardif_neigh->refcount);
652
Marek Lindner741aa062015-07-26 04:57:43 +0800653 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
654 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
655 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
656
Marek Lindner7ae8b282012-03-01 15:35:21 +0800657out:
Marek Lindnercef63412015-08-04 21:09:55 +0800658 if (hardif_neigh)
659 batadv_hardif_neigh_free_ref(hardif_neigh);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000660 return neigh_node;
661}
662
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100663/**
Marek Lindner75874052015-08-04 21:09:57 +0800664 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
665 * @seq: neighbour table seq_file struct
666 * @offset: not used
667 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200668 * Return: always 0
Marek Lindner75874052015-08-04 21:09:57 +0800669 */
670int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
671{
672 struct net_device *net_dev = (struct net_device *)seq->private;
673 struct batadv_priv *bat_priv = netdev_priv(net_dev);
674 struct batadv_hard_iface *primary_if;
675
676 primary_if = batadv_seq_print_text_primary_if_get(seq);
677 if (!primary_if)
678 return 0;
679
680 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
681 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
682 primary_if->net_dev->dev_addr, net_dev->name,
683 bat_priv->bat_algo_ops->name);
684
685 batadv_hardif_free_ref(primary_if);
686
687 if (!bat_priv->bat_algo_ops->bat_neigh_print) {
688 seq_puts(seq,
689 "No printing function for this routing protocol\n");
690 return 0;
691 }
692
693 bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq);
694 return 0;
695}
696
697/**
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100698 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
699 * free after rcu grace period
700 * @orig_ifinfo: the orig_ifinfo object to release
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100701 */
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100702static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100703{
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100704 struct batadv_neigh_node *router;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100705
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100706 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100707 batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100708
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100709 /* this is the last reference to this object */
710 router = rcu_dereference_protected(orig_ifinfo->router, true);
711 if (router)
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100712 batadv_neigh_node_free_ref(router);
713
714 kfree_rcu(orig_ifinfo, rcu);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100715}
716
717/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100718 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100719 * the orig_ifinfo
720 * @orig_ifinfo: the orig_ifinfo object to release
721 */
722void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
723{
724 if (atomic_dec_and_test(&orig_ifinfo->refcount))
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100725 batadv_orig_ifinfo_release(orig_ifinfo);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100726}
727
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100728/**
729 * batadv_orig_node_free_rcu - free the orig_node
730 * @rcu: rcu pointer of the orig_node
731 */
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200732static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000733{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200734 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000735
Sven Eckelmann56303d32012-06-05 22:31:31 +0200736 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000737
Linus Lüssing60432d72014-02-15 17:47:51 +0100738 batadv_mcast_purge_orig(orig_node);
739
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200740 batadv_frag_purge_orig(orig_node, NULL);
741
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200742 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
743 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
744
Antonio Quartullia73105b2011-04-27 14:27:44 +0200745 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000746 kfree(orig_node);
747}
748
Linus Lüssing72822222013-04-15 21:43:29 +0800749/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100750 * batadv_orig_node_release - release orig_node from lists and queue for
751 * free after rcu grace period
752 * @orig_node: the orig node to free
753 */
754static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
755{
756 struct hlist_node *node_tmp;
757 struct batadv_neigh_node *neigh_node;
758 struct batadv_orig_ifinfo *orig_ifinfo;
759
760 spin_lock_bh(&orig_node->neigh_list_lock);
761
762 /* for all neighbors towards this originator ... */
763 hlist_for_each_entry_safe(neigh_node, node_tmp,
764 &orig_node->neigh_list, list) {
765 hlist_del_rcu(&neigh_node->list);
766 batadv_neigh_node_free_ref(neigh_node);
767 }
768
769 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
770 &orig_node->ifinfo_list, list) {
771 hlist_del_rcu(&orig_ifinfo->list);
772 batadv_orig_ifinfo_free_ref(orig_ifinfo);
773 }
774 spin_unlock_bh(&orig_node->neigh_list_lock);
775
776 /* Free nc_nodes */
777 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
778
779 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
780}
781
782/**
Linus Lüssing72822222013-04-15 21:43:29 +0800783 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100784 * release it
Linus Lüssing72822222013-04-15 21:43:29 +0800785 * @orig_node: the orig node to free
786 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200787void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000788{
789 if (atomic_dec_and_test(&orig_node->refcount))
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100790 batadv_orig_node_release(orig_node);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000791}
792
Sven Eckelmann56303d32012-06-05 22:31:31 +0200793void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000794{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200795 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800796 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000797 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000798 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200799 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200800 u32 i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000801
802 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000803 return;
804
805 cancel_delayed_work_sync(&bat_priv->orig_work);
806
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000807 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000808
809 for (i = 0; i < hash->size; i++) {
810 head = &hash->table[i];
811 list_lock = &hash->list_locks[i];
812
813 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800814 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000815 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800816 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200817 batadv_orig_node_free_ref(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000818 }
819 spin_unlock_bh(list_lock);
820 }
821
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200822 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000823}
824
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200825/**
826 * batadv_orig_node_new - creates a new orig_node
827 * @bat_priv: the bat priv with all the soft interface information
828 * @addr: the mac address of the originator
829 *
830 * Creates a new originator object and initialise all the generic fields.
831 * The new object is not added to the originator list.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200832 *
833 * Return: the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200834 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200835struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200836 const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000837{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200838 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200839 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200840 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200841 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000842
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200843 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
844 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000845
Sven Eckelmann704509b2011-05-14 23:14:54 +0200846 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000847 if (!orig_node)
848 return NULL;
849
Marek Lindner9591a792010-12-12 21:57:11 +0000850 INIT_HLIST_HEAD(&orig_node->neigh_list);
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800851 INIT_HLIST_HEAD(&orig_node->vlan_list);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100852 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000853 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000854 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200855 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200856 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200857 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000858
Martin Hundebølld56b1702013-01-25 11:12:39 +0100859 batadv_nc_init_orig(orig_node);
860
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000861 /* extra reference for return */
862 atomic_set(&orig_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000863
Marek Lindner16b1aba2011-01-19 20:01:42 +0000864 orig_node->bat_priv = bat_priv;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100865 ether_addr_copy(orig_node->orig, addr);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100866 batadv_dat_init_orig_node_addr(orig_node);
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200867 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200868 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200869 orig_node->tt_buff_len = 0;
Linus Lüssing2c667a32014-10-30 06:23:40 +0100870 orig_node->last_seen = jiffies;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200871 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
872 orig_node->bcast_seqno_reset = reset_time;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200873
Linus Lüssing60432d72014-02-15 17:47:51 +0100874#ifdef CONFIG_BATMAN_ADV_MCAST
875 orig_node->mcast_flags = BATADV_NO_FLAGS;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200876 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
877 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
878 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
879 spin_lock_init(&orig_node->mcast_handler_lock);
Linus Lüssing60432d72014-02-15 17:47:51 +0100880#endif
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000881
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200882 /* create a vlan object for the "untagged" LAN */
883 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
884 if (!vlan)
885 goto free_orig_node;
886 /* batadv_orig_node_vlan_new() increases the refcounter.
887 * Immediately release vlan since it is not needed anymore in this
888 * context
889 */
890 batadv_orig_node_vlan_free_ref(vlan);
891
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200892 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
893 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
894 spin_lock_init(&orig_node->fragments[i].lock);
895 orig_node->fragments[i].size = 0;
896 }
897
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000898 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000899free_orig_node:
900 kfree(orig_node);
901 return NULL;
902}
903
Simon Wunderlich89652332013-11-13 19:14:46 +0100904/**
Simon Wunderlich709de132014-03-26 15:46:24 +0100905 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
906 * @bat_priv: the bat priv with all the soft interface information
907 * @neigh: orig node which is to be checked
908 */
909static void
910batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
911 struct batadv_neigh_node *neigh)
912{
913 struct batadv_neigh_ifinfo *neigh_ifinfo;
914 struct batadv_hard_iface *if_outgoing;
915 struct hlist_node *node_tmp;
916
917 spin_lock_bh(&neigh->ifinfo_lock);
918
919 /* for all ifinfo objects for this neighinator */
920 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
921 &neigh->ifinfo_list, list) {
922 if_outgoing = neigh_ifinfo->if_outgoing;
923
924 /* always keep the default interface */
925 if (if_outgoing == BATADV_IF_DEFAULT)
926 continue;
927
928 /* don't purge if the interface is not (going) down */
929 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
930 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
931 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
932 continue;
933
934 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
935 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
936 neigh->addr, if_outgoing->net_dev->name);
937
938 hlist_del_rcu(&neigh_ifinfo->list);
939 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
940 }
941
942 spin_unlock_bh(&neigh->ifinfo_lock);
943}
944
945/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100946 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
947 * @bat_priv: the bat priv with all the soft interface information
948 * @orig_node: orig node which is to be checked
949 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200950 * Return: true if any ifinfo entry was purged, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100951 */
952static bool
953batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
954 struct batadv_orig_node *orig_node)
955{
956 struct batadv_orig_ifinfo *orig_ifinfo;
957 struct batadv_hard_iface *if_outgoing;
958 struct hlist_node *node_tmp;
959 bool ifinfo_purged = false;
960
961 spin_lock_bh(&orig_node->neigh_list_lock);
962
963 /* for all ifinfo objects for this originator */
964 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
965 &orig_node->ifinfo_list, list) {
966 if_outgoing = orig_ifinfo->if_outgoing;
967
968 /* always keep the default interface */
969 if (if_outgoing == BATADV_IF_DEFAULT)
970 continue;
971
972 /* don't purge if the interface is not (going) down */
973 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
974 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
975 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
976 continue;
977
978 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
979 "router/ifinfo purge: originator %pM, iface: %s\n",
980 orig_node->orig, if_outgoing->net_dev->name);
981
982 ifinfo_purged = true;
983
984 hlist_del_rcu(&orig_ifinfo->list);
985 batadv_orig_ifinfo_free_ref(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +0100986 if (orig_node->last_bonding_candidate == orig_ifinfo) {
987 orig_node->last_bonding_candidate = NULL;
988 batadv_orig_ifinfo_free_ref(orig_ifinfo);
989 }
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100990 }
991
992 spin_unlock_bh(&orig_node->neigh_list_lock);
993
994 return ifinfo_purged;
995}
996
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100997/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100998 * batadv_purge_orig_neighbors - purges neighbors from originator
999 * @bat_priv: the bat priv with all the soft interface information
1000 * @orig_node: orig node which is to be checked
1001 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001002 * Return: true if any neighbor was purged, false otherwise
Simon Wunderlich89652332013-11-13 19:14:46 +01001003 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001004static bool
1005batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
Simon Wunderlich89652332013-11-13 19:14:46 +01001006 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001007{
Sasha Levinb67bfe02013-02-27 17:06:00 -08001008 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001009 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001010 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +08001011 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001012 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001013
Marek Lindnerf987ed62010-12-12 21:57:12 +00001014 spin_lock_bh(&orig_node->neigh_list_lock);
1015
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001016 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001017 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +00001018 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001019 last_seen = neigh_node->last_seen;
1020 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001021
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001022 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001023 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
1024 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1025 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001026 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
1027 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1028 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001029 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001030 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
1031 orig_node->orig, neigh_node->addr,
1032 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001033 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001034 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001035 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
1036 orig_node->orig, neigh_node->addr,
1037 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001038
1039 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +00001040
Marek Lindnerf987ed62010-12-12 21:57:12 +00001041 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001042 batadv_neigh_node_free_ref(neigh_node);
Simon Wunderlich709de132014-03-26 15:46:24 +01001043 } else {
1044 /* only necessary if not the whole neighbor is to be
1045 * deleted, but some interface has been removed.
1046 */
1047 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001048 }
1049 }
Marek Lindnerf987ed62010-12-12 21:57:12 +00001050
1051 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001052 return neigh_purged;
1053}
1054
Simon Wunderlich89652332013-11-13 19:14:46 +01001055/**
1056 * batadv_find_best_neighbor - finds the best neighbor after purging
1057 * @bat_priv: the bat priv with all the soft interface information
1058 * @orig_node: orig node which is to be checked
1059 * @if_outgoing: the interface for which the metric should be compared
1060 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001061 * Return: the current best neighbor, with refcount increased.
Simon Wunderlich89652332013-11-13 19:14:46 +01001062 */
1063static struct batadv_neigh_node *
1064batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1065 struct batadv_orig_node *orig_node,
1066 struct batadv_hard_iface *if_outgoing)
1067{
1068 struct batadv_neigh_node *best = NULL, *neigh;
1069 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1070
1071 rcu_read_lock();
1072 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
1073 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
1074 best, if_outgoing) <= 0))
1075 continue;
1076
1077 if (!atomic_inc_not_zero(&neigh->refcount))
1078 continue;
1079
1080 if (best)
1081 batadv_neigh_node_free_ref(best);
1082
1083 best = neigh;
1084 }
1085 rcu_read_unlock();
1086
1087 return best;
1088}
1089
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001090/**
1091 * batadv_purge_orig_node - purges obsolete information from an orig_node
1092 * @bat_priv: the bat priv with all the soft interface information
1093 * @orig_node: orig node which is to be checked
1094 *
1095 * This function checks if the orig_node or substructures of it have become
1096 * obsolete, and purges this information if that's the case.
1097 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001098 * Return: true if the orig_node is to be removed, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001099 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001100static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
1101 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001102{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001103 struct batadv_neigh_node *best_neigh_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001104 struct batadv_hard_iface *hard_iface;
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001105 bool changed_ifinfo, changed_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001106
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001107 if (batadv_has_timed_out(orig_node->last_seen,
1108 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001109 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001110 "Originator timeout: originator %pM, last_seen %u\n",
1111 orig_node->orig,
1112 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001113 return true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001114 }
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001115 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
1116 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001117
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001118 if (!changed_ifinfo && !changed_neigh)
Simon Wunderlich89652332013-11-13 19:14:46 +01001119 return false;
1120
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001121 /* first for NULL ... */
Simon Wunderlich89652332013-11-13 19:14:46 +01001122 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
1123 BATADV_IF_DEFAULT);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001124 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
1125 best_neigh_node);
Simon Wunderlich89652332013-11-13 19:14:46 +01001126 if (best_neigh_node)
1127 batadv_neigh_node_free_ref(best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001128
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001129 /* ... then for all other interfaces. */
1130 rcu_read_lock();
1131 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
1132 if (hard_iface->if_status != BATADV_IF_ACTIVE)
1133 continue;
1134
1135 if (hard_iface->soft_iface != bat_priv->soft_iface)
1136 continue;
1137
1138 best_neigh_node = batadv_find_best_neighbor(bat_priv,
1139 orig_node,
1140 hard_iface);
1141 batadv_update_route(bat_priv, orig_node, hard_iface,
1142 best_neigh_node);
1143 if (best_neigh_node)
1144 batadv_neigh_node_free_ref(best_neigh_node);
1145 }
1146 rcu_read_unlock();
1147
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001148 return false;
1149}
1150
Sven Eckelmann56303d32012-06-05 22:31:31 +02001151static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001152{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001153 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001154 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001155 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001156 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001157 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001158 u32 i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001159
1160 if (!hash)
1161 return;
1162
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001163 /* for all origins... */
1164 for (i = 0; i < hash->size; i++) {
1165 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001166 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001167
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001168 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001169 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +00001170 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001171 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +08001172 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001173 hlist_del_rcu(&orig_node->hash_entry);
Linus Lüssing9d31b3c2014-12-13 23:32:15 +01001174 batadv_tt_global_del_orig(orig_node->bat_priv,
1175 orig_node, -1,
1176 "originator timed out");
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001177 batadv_orig_node_free_ref(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001178 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001179 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +02001180
1181 batadv_frag_purge_orig(orig_node,
1182 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001183 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001184 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001185 }
1186
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +02001187 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001188}
1189
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001190static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001191{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001192 struct delayed_work *delayed_work;
1193 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001194
Sven Eckelmann56303d32012-06-05 22:31:31 +02001195 delayed_work = container_of(work, struct delayed_work, work);
1196 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001197 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +01001198 queue_delayed_work(batadv_event_workqueue,
1199 &bat_priv->orig_work,
1200 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001201}
1202
Sven Eckelmann56303d32012-06-05 22:31:31 +02001203void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001204{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001205 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001206}
1207
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001208int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001209{
1210 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001211 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +02001212 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001213
Marek Lindner30da63a2012-08-03 17:15:46 +02001214 primary_if = batadv_seq_print_text_primary_if_get(seq);
1215 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001216 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001217
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001218 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001219 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001220 primary_if->net_dev->dev_addr, net_dev->name,
1221 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001222
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001223 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001224
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001225 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1226 seq_puts(seq,
1227 "No printing function for this routing protocol\n");
1228 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001229 }
1230
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001231 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1232 BATADV_IF_DEFAULT);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001233
Marek Lindner30da63a2012-08-03 17:15:46 +02001234 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001235}
1236
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001237/**
1238 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1239 * outgoing interface
1240 * @seq: debugfs table seq_file struct
1241 * @offset: not used
1242 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001243 * Return: 0
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001244 */
1245int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1246{
1247 struct net_device *net_dev = (struct net_device *)seq->private;
1248 struct batadv_hard_iface *hard_iface;
1249 struct batadv_priv *bat_priv;
1250
1251 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1252
1253 if (!hard_iface || !hard_iface->soft_iface) {
1254 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1255 goto out;
1256 }
1257
1258 bat_priv = netdev_priv(hard_iface->soft_iface);
1259 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1260 seq_puts(seq,
1261 "No printing function for this routing protocol\n");
1262 goto out;
1263 }
1264
1265 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1266 seq_puts(seq, "Interface not active\n");
1267 goto out;
1268 }
1269
1270 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1271 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1272 hard_iface->net_dev->dev_addr,
1273 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1274
1275 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1276
1277out:
Marek Lindner16a41422014-04-24 03:44:25 +08001278 if (hard_iface)
1279 batadv_hardif_free_ref(hard_iface);
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001280 return 0;
1281}
1282
Sven Eckelmann56303d32012-06-05 22:31:31 +02001283int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1284 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001285{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001286 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001287 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001288 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001289 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001290 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001291 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001292 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001293
1294 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001295 * if_num
1296 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001297 for (i = 0; i < hash->size; i++) {
1298 head = &hash->table[i];
1299
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001300 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001301 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001302 ret = 0;
1303 if (bao->bat_orig_add_if)
1304 ret = bao->bat_orig_add_if(orig_node,
1305 max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001306 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001307 goto err;
1308 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001309 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001310 }
1311
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001312 return 0;
1313
1314err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001315 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001316 return -ENOMEM;
1317}
1318
Sven Eckelmann56303d32012-06-05 22:31:31 +02001319int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1320 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001321{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001322 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001323 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001324 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001325 struct batadv_hard_iface *hard_iface_tmp;
1326 struct batadv_orig_node *orig_node;
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001327 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001328 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001329 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001330
1331 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001332 * if_num
1333 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001334 for (i = 0; i < hash->size; i++) {
1335 head = &hash->table[i];
1336
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001337 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001338 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001339 ret = 0;
1340 if (bao->bat_orig_del_if)
1341 ret = bao->bat_orig_del_if(orig_node,
1342 max_if_num,
1343 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001344 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001345 goto err;
1346 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001347 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001348 }
1349
1350 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1351 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001352 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001353 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001354 continue;
1355
Marek Lindnere6c10f42011-02-18 12:33:20 +00001356 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001357 continue;
1358
Marek Lindnere6c10f42011-02-18 12:33:20 +00001359 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001360 continue;
1361
Marek Lindnere6c10f42011-02-18 12:33:20 +00001362 if (hard_iface_tmp->if_num > hard_iface->if_num)
1363 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001364 }
1365 rcu_read_unlock();
1366
Marek Lindnere6c10f42011-02-18 12:33:20 +00001367 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001368 return 0;
1369
1370err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001371 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001372 return -ENOMEM;
1373}