blob: 7965fe5947fb64327ed6efa5cdd5076be0ea2290 [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "originator.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
Sven Eckelmann7c124392016-01-16 10:29:56 +010021#include <linux/atomic.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020022#include <linux/errno.h>
23#include <linux/etherdevice.h>
24#include <linux/fs.h>
25#include <linux/jiffies.h>
26#include <linux/kernel.h>
Sven Eckelmann90f564d2016-01-16 10:29:40 +010027#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020028#include <linux/list.h>
29#include <linux/lockdep.h>
30#include <linux/netdevice.h>
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080031#include <linux/rculist.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020032#include <linux/seq_file.h>
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35#include <linux/workqueue.h>
36
37#include "distributed-arp-table.h"
38#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000039#include "gateway_client.h"
40#include "hard-interface.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020041#include "hash.h"
Linus Lüssing60432d72014-02-15 17:47:51 +010042#include "multicast.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020043#include "network-coding.h"
44#include "routing.h"
45#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000046
Antonio Quartullidec05072012-11-10 11:00:32 +010047/* hash class keys */
48static struct lock_class_key batadv_orig_hash_lock_class_key;
49
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020050static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000051
Sven Eckelmann62fe7102015-09-15 19:00:48 +020052/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +010053 * batadv_compare_orig - comparing function used in the originator hash table
54 * @node: node in the local table
55 * @data2: second object to compare the node to
Sven Eckelmann62fe7102015-09-15 19:00:48 +020056 *
57 * Return: 1 if they are the same originator
58 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020059int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020060{
Sven Eckelmann56303d32012-06-05 22:31:31 +020061 const void *data1 = container_of(node, struct batadv_orig_node,
62 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020063
dingtianhong323813e2013-12-26 19:40:39 +080064 return batadv_compare_eth(data1, data2);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020065}
66
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020067/**
68 * batadv_orig_node_vlan_get - get an orig_node_vlan object
69 * @orig_node: the originator serving the VLAN
70 * @vid: the VLAN identifier
71 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +020072 * Return: the vlan object identified by vid and belonging to orig_node or NULL
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020073 * if it does not exist.
74 */
75struct batadv_orig_node_vlan *
76batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
77 unsigned short vid)
78{
79 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
80
81 rcu_read_lock();
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080082 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020083 if (tmp->vid != vid)
84 continue;
85
Sven Eckelmann161a3be2016-01-16 10:29:55 +010086 if (!kref_get_unless_zero(&tmp->refcount))
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020087 continue;
88
89 vlan = tmp;
90
91 break;
92 }
93 rcu_read_unlock();
94
95 return vlan;
96}
97
98/**
99 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
100 * object
101 * @orig_node: the originator serving the VLAN
102 * @vid: the VLAN identifier
103 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200104 * Return: NULL in case of failure or the vlan object identified by vid and
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200105 * belonging to orig_node otherwise. The object is created and added to the list
106 * if it does not exist.
107 *
108 * The object is returned with refcounter increased by 1.
109 */
110struct batadv_orig_node_vlan *
111batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
112 unsigned short vid)
113{
114 struct batadv_orig_node_vlan *vlan;
115
116 spin_lock_bh(&orig_node->vlan_list_lock);
117
118 /* first look if an object for this vid already exists */
119 vlan = batadv_orig_node_vlan_get(orig_node, vid);
120 if (vlan)
121 goto out;
122
123 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
124 if (!vlan)
125 goto out;
126
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100127 kref_init(&vlan->refcount);
128 kref_get(&vlan->refcount);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200129 vlan->vid = vid;
130
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800131 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200132
133out:
134 spin_unlock_bh(&orig_node->vlan_list_lock);
135
136 return vlan;
137}
138
139/**
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100140 * batadv_orig_node_vlan_release - release originator-vlan object from lists
141 * and queue for free after rcu grace period
142 * @ref: kref pointer of the originator-vlan object
143 */
144static void batadv_orig_node_vlan_release(struct kref *ref)
145{
146 struct batadv_orig_node_vlan *orig_vlan;
147
148 orig_vlan = container_of(ref, struct batadv_orig_node_vlan, refcount);
149
150 kfree_rcu(orig_vlan, rcu);
151}
152
153/**
154 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly
155 * release the originator-vlan object
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200156 * @orig_vlan: the originator-vlan object to release
157 */
158void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
159{
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100160 kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200161}
162
Sven Eckelmann56303d32012-06-05 22:31:31 +0200163int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000164{
165 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200166 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000167
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200168 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000169
170 if (!bat_priv->orig_hash)
171 goto err;
172
Antonio Quartullidec05072012-11-10 11:00:32 +0100173 batadv_hash_set_lock_class(bat_priv->orig_hash,
174 &batadv_orig_hash_lock_class_key);
175
Antonio Quartulli72414442012-12-25 13:14:37 +0100176 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
177 queue_delayed_work(batadv_event_workqueue,
178 &bat_priv->orig_work,
179 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
180
Sven Eckelmann5346c352012-05-05 13:27:28 +0200181 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000182
183err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200184 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000185}
186
Simon Wunderlich89652332013-11-13 19:14:46 +0100187/**
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100188 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
189 * free after rcu grace period
Sven Eckelmann962c6832016-01-16 10:29:51 +0100190 * @ref: kref pointer of the neigh_ifinfo
Simon Wunderlich89652332013-11-13 19:14:46 +0100191 */
Sven Eckelmann962c6832016-01-16 10:29:51 +0100192static void batadv_neigh_ifinfo_release(struct kref *ref)
Simon Wunderlich89652332013-11-13 19:14:46 +0100193{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100194 struct batadv_neigh_ifinfo *neigh_ifinfo;
195
196 neigh_ifinfo = container_of(ref, struct batadv_neigh_ifinfo, refcount);
197
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100198 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100199 batadv_hardif_put(neigh_ifinfo->if_outgoing);
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100200
201 kfree_rcu(neigh_ifinfo, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100202}
203
204/**
Sven Eckelmann044fa3a2016-01-17 11:01:12 +0100205 * batadv_neigh_ifinfo_put - decrement the refcounter and possibly release
Simon Wunderlich89652332013-11-13 19:14:46 +0100206 * the neigh_ifinfo
207 * @neigh_ifinfo: the neigh_ifinfo object to release
208 */
Sven Eckelmann044fa3a2016-01-17 11:01:12 +0100209void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo)
Simon Wunderlich89652332013-11-13 19:14:46 +0100210{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100211 kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release);
Simon Wunderlich89652332013-11-13 19:14:46 +0100212}
213
214/**
Sven Eckelmannf6389692016-01-05 12:06:23 +0100215 * batadv_hardif_neigh_release - release hardif neigh node from lists and
216 * queue for free after rcu grace period
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100217 * @ref: kref pointer of the neigh_node
Marek Lindnercef63412015-08-04 21:09:55 +0800218 */
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100219static void batadv_hardif_neigh_release(struct kref *ref)
Marek Lindnercef63412015-08-04 21:09:55 +0800220{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100221 struct batadv_hardif_neigh_node *hardif_neigh;
222
223 hardif_neigh = container_of(ref, struct batadv_hardif_neigh_node,
224 refcount);
225
Sven Eckelmannf6389692016-01-05 12:06:23 +0100226 spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
227 hlist_del_init_rcu(&hardif_neigh->list);
228 spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
Sven Eckelmannbab7c6c2016-01-05 12:06:17 +0100229
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100230 batadv_hardif_put(hardif_neigh->if_incoming);
Sven Eckelmannf6389692016-01-05 12:06:23 +0100231 kfree_rcu(hardif_neigh, rcu);
Marek Lindnercef63412015-08-04 21:09:55 +0800232}
233
234/**
235 * batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
Sven Eckelmannf6389692016-01-05 12:06:23 +0100236 * and possibly release it
Marek Lindnercef63412015-08-04 21:09:55 +0800237 * @hardif_neigh: hardif neigh neighbor to free
238 */
239void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
240{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100241 kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release);
Marek Lindnercef63412015-08-04 21:09:55 +0800242}
243
244/**
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100245 * batadv_neigh_node_release - release neigh_node from lists and queue for
246 * free after rcu grace period
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100247 * @ref: kref pointer of the neigh_node
Simon Wunderlich89652332013-11-13 19:14:46 +0100248 */
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100249static void batadv_neigh_node_release(struct kref *ref)
Simon Wunderlich89652332013-11-13 19:14:46 +0100250{
251 struct hlist_node *node_tmp;
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100252 struct batadv_neigh_node *neigh_node;
Marek Lindnercef63412015-08-04 21:09:55 +0800253 struct batadv_hardif_neigh_node *hardif_neigh;
Simon Wunderlich89652332013-11-13 19:14:46 +0100254 struct batadv_neigh_ifinfo *neigh_ifinfo;
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800255 struct batadv_algo_ops *bao;
Simon Wunderlich89652332013-11-13 19:14:46 +0100256
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100257 neigh_node = container_of(ref, struct batadv_neigh_node, refcount);
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800258 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
Simon Wunderlich89652332013-11-13 19:14:46 +0100259
260 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
261 &neigh_node->ifinfo_list, list) {
Sven Eckelmann044fa3a2016-01-17 11:01:12 +0100262 batadv_neigh_ifinfo_put(neigh_ifinfo);
Simon Wunderlich89652332013-11-13 19:14:46 +0100263 }
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800264
Marek Lindnercef63412015-08-04 21:09:55 +0800265 hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
266 neigh_node->addr);
267 if (hardif_neigh) {
268 /* batadv_hardif_neigh_get() increases refcount too */
Sven Eckelmannf6389692016-01-05 12:06:23 +0100269 batadv_hardif_neigh_free_ref(hardif_neigh);
270 batadv_hardif_neigh_free_ref(hardif_neigh);
Marek Lindnercef63412015-08-04 21:09:55 +0800271 }
272
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800273 if (bao->bat_neigh_free)
274 bao->bat_neigh_free(neigh_node);
275
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100276 batadv_hardif_put(neigh_node->if_incoming);
Simon Wunderlich89652332013-11-13 19:14:46 +0100277
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100278 kfree_rcu(neigh_node, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100279}
280
281/**
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100282 * batadv_neigh_node_put - decrement the neighbors refcounter and possibly
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100283 * release it
Simon Wunderlich89652332013-11-13 19:14:46 +0100284 * @neigh_node: neigh neighbor to free
285 */
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100286void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000287{
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100288 kref_put(&neigh_node->refcount, batadv_neigh_node_release);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000289}
290
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100291/**
292 * batadv_orig_node_get_router - router to the originator depending on iface
293 * @orig_node: the orig node for the router
294 * @if_outgoing: the interface where the payload packet has been received or
295 * the OGM should be sent to
296 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200297 * Return: the neighbor which should be router for this orig_node/iface.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100298 *
299 * The object is returned with refcounter increased by 1.
300 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200301struct batadv_neigh_node *
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100302batadv_orig_router_get(struct batadv_orig_node *orig_node,
303 const struct batadv_hard_iface *if_outgoing)
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000304{
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100305 struct batadv_orig_ifinfo *orig_ifinfo;
306 struct batadv_neigh_node *router = NULL;
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000307
308 rcu_read_lock();
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100309 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
310 if (orig_ifinfo->if_outgoing != if_outgoing)
311 continue;
312
313 router = rcu_dereference(orig_ifinfo->router);
314 break;
315 }
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000316
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100317 if (router && !kref_get_unless_zero(&router->refcount))
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000318 router = NULL;
319
320 rcu_read_unlock();
321 return router;
322}
323
Antonio Quartulli0538f752013-09-02 12:15:01 +0200324/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100325 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
326 * @orig_node: the orig node to be queried
327 * @if_outgoing: the interface for which the ifinfo should be acquired
328 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200329 * Return: the requested orig_ifinfo or NULL if not found.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100330 *
331 * The object is returned with refcounter increased by 1.
332 */
333struct batadv_orig_ifinfo *
334batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
335 struct batadv_hard_iface *if_outgoing)
336{
337 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
338
339 rcu_read_lock();
340 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
341 list) {
342 if (tmp->if_outgoing != if_outgoing)
343 continue;
344
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100345 if (!kref_get_unless_zero(&tmp->refcount))
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100346 continue;
347
348 orig_ifinfo = tmp;
349 break;
350 }
351 rcu_read_unlock();
352
353 return orig_ifinfo;
354}
355
356/**
357 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
358 * @orig_node: the orig node to be queried
359 * @if_outgoing: the interface for which the ifinfo should be acquired
360 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200361 * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100362 * interface otherwise. The object is created and added to the list
363 * if it does not exist.
364 *
365 * The object is returned with refcounter increased by 1.
366 */
367struct batadv_orig_ifinfo *
368batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
369 struct batadv_hard_iface *if_outgoing)
370{
371 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
372 unsigned long reset_time;
373
374 spin_lock_bh(&orig_node->neigh_list_lock);
375
376 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
377 if (orig_ifinfo)
378 goto out;
379
380 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
381 if (!orig_ifinfo)
382 goto out;
383
384 if (if_outgoing != BATADV_IF_DEFAULT &&
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100385 !kref_get_unless_zero(&if_outgoing->refcount)) {
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100386 kfree(orig_ifinfo);
387 orig_ifinfo = NULL;
388 goto out;
389 }
390
391 reset_time = jiffies - 1;
392 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
393 orig_ifinfo->batman_seqno_reset = reset_time;
394 orig_ifinfo->if_outgoing = if_outgoing;
395 INIT_HLIST_NODE(&orig_ifinfo->list);
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100396 kref_init(&orig_ifinfo->refcount);
397 kref_get(&orig_ifinfo->refcount);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100398 hlist_add_head_rcu(&orig_ifinfo->list,
399 &orig_node->ifinfo_list);
400out:
401 spin_unlock_bh(&orig_node->neigh_list_lock);
402 return orig_ifinfo;
403}
404
405/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100406 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200407 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100408 * @if_outgoing: the interface for which the ifinfo should be acquired
409 *
410 * The object is returned with refcounter increased by 1.
411 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200412 * Return: the requested neigh_ifinfo or NULL if not found
Simon Wunderlich89652332013-11-13 19:14:46 +0100413 */
414struct batadv_neigh_ifinfo *
415batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
416 struct batadv_hard_iface *if_outgoing)
417{
418 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
419 *tmp_neigh_ifinfo;
420
421 rcu_read_lock();
422 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
423 list) {
424 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
425 continue;
426
Sven Eckelmann962c6832016-01-16 10:29:51 +0100427 if (!kref_get_unless_zero(&tmp_neigh_ifinfo->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +0100428 continue;
429
430 neigh_ifinfo = tmp_neigh_ifinfo;
431 break;
432 }
433 rcu_read_unlock();
434
435 return neigh_ifinfo;
436}
437
438/**
439 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200440 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100441 * @if_outgoing: the interface for which the ifinfo should be acquired
442 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200443 * Return: NULL in case of failure or the neigh_ifinfo object for the
Simon Wunderlich89652332013-11-13 19:14:46 +0100444 * if_outgoing interface otherwise. The object is created and added to the list
445 * if it does not exist.
446 *
447 * The object is returned with refcounter increased by 1.
448 */
449struct batadv_neigh_ifinfo *
450batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
451 struct batadv_hard_iface *if_outgoing)
452{
453 struct batadv_neigh_ifinfo *neigh_ifinfo;
454
455 spin_lock_bh(&neigh->ifinfo_lock);
456
457 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
458 if (neigh_ifinfo)
459 goto out;
460
461 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
462 if (!neigh_ifinfo)
463 goto out;
464
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100465 if (if_outgoing && !kref_get_unless_zero(&if_outgoing->refcount)) {
Simon Wunderlich89652332013-11-13 19:14:46 +0100466 kfree(neigh_ifinfo);
467 neigh_ifinfo = NULL;
468 goto out;
469 }
470
471 INIT_HLIST_NODE(&neigh_ifinfo->list);
Sven Eckelmann962c6832016-01-16 10:29:51 +0100472 kref_init(&neigh_ifinfo->refcount);
473 kref_get(&neigh_ifinfo->refcount);
Simon Wunderlich89652332013-11-13 19:14:46 +0100474 neigh_ifinfo->if_outgoing = if_outgoing;
475
476 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
477
478out:
479 spin_unlock_bh(&neigh->ifinfo_lock);
480
481 return neigh_ifinfo;
482}
483
484/**
Marek Lindnered292662015-08-04 23:31:44 +0800485 * batadv_neigh_node_get - retrieve a neighbour from the list
486 * @orig_node: originator which the neighbour belongs to
487 * @hard_iface: the interface where this neighbour is connected to
488 * @addr: the address of the neighbour
489 *
490 * Looks for and possibly returns a neighbour belonging to this originator list
491 * which is connected through the provided hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200492 *
493 * Return: neighbor when found. Othwerwise NULL
Marek Lindnered292662015-08-04 23:31:44 +0800494 */
495static struct batadv_neigh_node *
496batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
497 const struct batadv_hard_iface *hard_iface,
498 const u8 *addr)
499{
500 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
501
502 rcu_read_lock();
503 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
504 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
505 continue;
506
507 if (tmp_neigh_node->if_incoming != hard_iface)
508 continue;
509
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100510 if (!kref_get_unless_zero(&tmp_neigh_node->refcount))
Marek Lindnered292662015-08-04 23:31:44 +0800511 continue;
512
513 res = tmp_neigh_node;
514 break;
515 }
516 rcu_read_unlock();
517
518 return res;
519}
520
521/**
Marek Lindnercef63412015-08-04 21:09:55 +0800522 * batadv_hardif_neigh_create - create a hardif neighbour node
523 * @hard_iface: the interface this neighbour is connected to
524 * @neigh_addr: the interface address of the neighbour to retrieve
525 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200526 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800527 */
528static struct batadv_hardif_neigh_node *
529batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
530 const u8 *neigh_addr)
531{
Marek Lindner8248a4c2015-08-04 21:09:56 +0800532 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Marek Lindnercef63412015-08-04 21:09:55 +0800533 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
534
535 spin_lock_bh(&hard_iface->neigh_list_lock);
536
537 /* check if neighbor hasn't been added in the meantime */
538 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
539 if (hardif_neigh)
540 goto out;
541
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100542 if (!kref_get_unless_zero(&hard_iface->refcount))
Marek Lindnercef63412015-08-04 21:09:55 +0800543 goto out;
544
545 hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
546 if (!hardif_neigh) {
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100547 batadv_hardif_put(hard_iface);
Marek Lindnercef63412015-08-04 21:09:55 +0800548 goto out;
549 }
550
551 INIT_HLIST_NODE(&hardif_neigh->list);
552 ether_addr_copy(hardif_neigh->addr, neigh_addr);
553 hardif_neigh->if_incoming = hard_iface;
554 hardif_neigh->last_seen = jiffies;
555
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100556 kref_init(&hardif_neigh->refcount);
Marek Lindnercef63412015-08-04 21:09:55 +0800557
Marek Lindner8248a4c2015-08-04 21:09:56 +0800558 if (bat_priv->bat_algo_ops->bat_hardif_neigh_init)
559 bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh);
560
Marek Lindnercef63412015-08-04 21:09:55 +0800561 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
562
563out:
564 spin_unlock_bh(&hard_iface->neigh_list_lock);
565 return hardif_neigh;
566}
567
568/**
569 * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
570 * node
571 * @hard_iface: the interface this neighbour is connected to
572 * @neigh_addr: the interface address of the neighbour to retrieve
573 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200574 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800575 */
576static struct batadv_hardif_neigh_node *
577batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
578 const u8 *neigh_addr)
579{
580 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
581
582 /* first check without locking to avoid the overhead */
583 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
584 if (hardif_neigh)
585 return hardif_neigh;
586
587 return batadv_hardif_neigh_create(hard_iface, neigh_addr);
588}
589
590/**
591 * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
592 * @hard_iface: the interface where this neighbour is connected to
593 * @neigh_addr: the address of the neighbour
594 *
595 * Looks for and possibly returns a neighbour belonging to this hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200596 *
597 * Return: neighbor when found. Othwerwise NULL
Marek Lindnercef63412015-08-04 21:09:55 +0800598 */
599struct batadv_hardif_neigh_node *
600batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
601 const u8 *neigh_addr)
602{
603 struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
604
605 rcu_read_lock();
606 hlist_for_each_entry_rcu(tmp_hardif_neigh,
607 &hard_iface->neigh_list, list) {
608 if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
609 continue;
610
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100611 if (!kref_get_unless_zero(&tmp_hardif_neigh->refcount))
Marek Lindnercef63412015-08-04 21:09:55 +0800612 continue;
613
614 hardif_neigh = tmp_hardif_neigh;
615 break;
616 }
617 rcu_read_unlock();
618
619 return hardif_neigh;
620}
621
622/**
Antonio Quartulli0538f752013-09-02 12:15:01 +0200623 * batadv_neigh_node_new - create and init a new neigh_node object
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800624 * @orig_node: originator object representing the neighbour
Antonio Quartulli0538f752013-09-02 12:15:01 +0200625 * @hard_iface: the interface where the neighbour is connected to
626 * @neigh_addr: the mac address of the neighbour interface
Antonio Quartulli0538f752013-09-02 12:15:01 +0200627 *
628 * Allocates a new neigh_node object and initialises all the generic fields.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200629 *
630 * Return: neighbor when found. Othwerwise NULL
Antonio Quartulli0538f752013-09-02 12:15:01 +0200631 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200632struct batadv_neigh_node *
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800633batadv_neigh_node_new(struct batadv_orig_node *orig_node,
634 struct batadv_hard_iface *hard_iface,
635 const u8 *neigh_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000636{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200637 struct batadv_neigh_node *neigh_node;
Marek Lindnercef63412015-08-04 21:09:55 +0800638 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000639
Marek Lindner741aa062015-07-26 04:57:43 +0800640 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
641 if (neigh_node)
642 goto out;
643
Marek Lindnercef63412015-08-04 21:09:55 +0800644 hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
645 neigh_addr);
646 if (!hardif_neigh)
647 goto out;
648
Sven Eckelmann704509b2011-05-14 23:14:54 +0200649 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000650 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800651 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000652
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100653 if (!kref_get_unless_zero(&hard_iface->refcount)) {
Marek Lindnerf729dc702015-07-26 04:37:15 +0800654 kfree(neigh_node);
655 neigh_node = NULL;
656 goto out;
657 }
658
Marek Lindner9591a792010-12-12 21:57:11 +0000659 INIT_HLIST_NODE(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100660 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
661 spin_lock_init(&neigh_node->ifinfo_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000662
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100663 ether_addr_copy(neigh_node->addr, neigh_addr);
Antonio Quartulli0538f752013-09-02 12:15:01 +0200664 neigh_node->if_incoming = hard_iface;
665 neigh_node->orig_node = orig_node;
666
Marek Lindner1605d0d2011-02-18 12:28:11 +0000667 /* extra reference for return */
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100668 kref_init(&neigh_node->refcount);
669 kref_get(&neigh_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000670
Marek Lindner741aa062015-07-26 04:57:43 +0800671 spin_lock_bh(&orig_node->neigh_list_lock);
672 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
673 spin_unlock_bh(&orig_node->neigh_list_lock);
674
Marek Lindnercef63412015-08-04 21:09:55 +0800675 /* increment unique neighbor refcount */
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100676 kref_get(&hardif_neigh->refcount);
Marek Lindnercef63412015-08-04 21:09:55 +0800677
Marek Lindner741aa062015-07-26 04:57:43 +0800678 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
679 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
680 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
681
Marek Lindner7ae8b282012-03-01 15:35:21 +0800682out:
Marek Lindnercef63412015-08-04 21:09:55 +0800683 if (hardif_neigh)
684 batadv_hardif_neigh_free_ref(hardif_neigh);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000685 return neigh_node;
686}
687
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100688/**
Marek Lindner75874052015-08-04 21:09:57 +0800689 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
690 * @seq: neighbour table seq_file struct
691 * @offset: not used
692 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200693 * Return: always 0
Marek Lindner75874052015-08-04 21:09:57 +0800694 */
695int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
696{
697 struct net_device *net_dev = (struct net_device *)seq->private;
698 struct batadv_priv *bat_priv = netdev_priv(net_dev);
699 struct batadv_hard_iface *primary_if;
700
701 primary_if = batadv_seq_print_text_primary_if_get(seq);
702 if (!primary_if)
703 return 0;
704
705 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
706 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
707 primary_if->net_dev->dev_addr, net_dev->name,
708 bat_priv->bat_algo_ops->name);
709
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100710 batadv_hardif_put(primary_if);
Marek Lindner75874052015-08-04 21:09:57 +0800711
712 if (!bat_priv->bat_algo_ops->bat_neigh_print) {
713 seq_puts(seq,
714 "No printing function for this routing protocol\n");
715 return 0;
716 }
717
718 bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq);
719 return 0;
720}
721
722/**
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100723 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
724 * free after rcu grace period
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100725 * @ref: kref pointer of the orig_ifinfo
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100726 */
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100727static void batadv_orig_ifinfo_release(struct kref *ref)
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100728{
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100729 struct batadv_orig_ifinfo *orig_ifinfo;
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100730 struct batadv_neigh_node *router;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100731
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100732 orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount);
733
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100734 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100735 batadv_hardif_put(orig_ifinfo->if_outgoing);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100736
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100737 /* this is the last reference to this object */
738 router = rcu_dereference_protected(orig_ifinfo->router, true);
739 if (router)
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100740 batadv_neigh_node_put(router);
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100741
742 kfree_rcu(orig_ifinfo, rcu);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100743}
744
745/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100746 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100747 * the orig_ifinfo
748 * @orig_ifinfo: the orig_ifinfo object to release
749 */
750void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
751{
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100752 kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100753}
754
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100755/**
756 * batadv_orig_node_free_rcu - free the orig_node
757 * @rcu: rcu pointer of the orig_node
758 */
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200759static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000760{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200761 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000762
Sven Eckelmann56303d32012-06-05 22:31:31 +0200763 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000764
Linus Lüssing60432d72014-02-15 17:47:51 +0100765 batadv_mcast_purge_orig(orig_node);
766
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200767 batadv_frag_purge_orig(orig_node, NULL);
768
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200769 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
770 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
771
Antonio Quartullia73105b2011-04-27 14:27:44 +0200772 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000773 kfree(orig_node);
774}
775
Linus Lüssing72822222013-04-15 21:43:29 +0800776/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100777 * batadv_orig_node_release - release orig_node from lists and queue for
778 * free after rcu grace period
Sven Eckelmann7c124392016-01-16 10:29:56 +0100779 * @ref: kref pointer of the orig_node
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100780 */
Sven Eckelmann7c124392016-01-16 10:29:56 +0100781static void batadv_orig_node_release(struct kref *ref)
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100782{
783 struct hlist_node *node_tmp;
784 struct batadv_neigh_node *neigh_node;
Sven Eckelmann7c124392016-01-16 10:29:56 +0100785 struct batadv_orig_node *orig_node;
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100786 struct batadv_orig_ifinfo *orig_ifinfo;
787
Sven Eckelmann7c124392016-01-16 10:29:56 +0100788 orig_node = container_of(ref, struct batadv_orig_node, refcount);
789
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100790 spin_lock_bh(&orig_node->neigh_list_lock);
791
792 /* for all neighbors towards this originator ... */
793 hlist_for_each_entry_safe(neigh_node, node_tmp,
794 &orig_node->neigh_list, list) {
795 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100796 batadv_neigh_node_put(neigh_node);
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100797 }
798
799 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
800 &orig_node->ifinfo_list, list) {
801 hlist_del_rcu(&orig_ifinfo->list);
802 batadv_orig_ifinfo_free_ref(orig_ifinfo);
803 }
804 spin_unlock_bh(&orig_node->neigh_list_lock);
805
806 /* Free nc_nodes */
807 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
808
809 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
810}
811
812/**
Sven Eckelmann5d967312016-01-17 11:01:09 +0100813 * batadv_orig_node_put - decrement the orig node refcounter and possibly
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100814 * release it
Linus Lüssing72822222013-04-15 21:43:29 +0800815 * @orig_node: the orig node to free
816 */
Sven Eckelmann5d967312016-01-17 11:01:09 +0100817void batadv_orig_node_put(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000818{
Sven Eckelmann7c124392016-01-16 10:29:56 +0100819 kref_put(&orig_node->refcount, batadv_orig_node_release);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000820}
821
Sven Eckelmann56303d32012-06-05 22:31:31 +0200822void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000823{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200824 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800825 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000826 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000827 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200828 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200829 u32 i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000830
831 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000832 return;
833
834 cancel_delayed_work_sync(&bat_priv->orig_work);
835
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000836 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000837
838 for (i = 0; i < hash->size; i++) {
839 head = &hash->table[i];
840 list_lock = &hash->list_locks[i];
841
842 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800843 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000844 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800845 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann5d967312016-01-17 11:01:09 +0100846 batadv_orig_node_put(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000847 }
848 spin_unlock_bh(list_lock);
849 }
850
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200851 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000852}
853
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200854/**
855 * batadv_orig_node_new - creates a new orig_node
856 * @bat_priv: the bat priv with all the soft interface information
857 * @addr: the mac address of the originator
858 *
859 * Creates a new originator object and initialise all the generic fields.
860 * The new object is not added to the originator list.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200861 *
862 * Return: the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200863 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200864struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200865 const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000866{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200867 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200868 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200869 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200870 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000871
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200872 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
873 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000874
Sven Eckelmann704509b2011-05-14 23:14:54 +0200875 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000876 if (!orig_node)
877 return NULL;
878
Marek Lindner9591a792010-12-12 21:57:11 +0000879 INIT_HLIST_HEAD(&orig_node->neigh_list);
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800880 INIT_HLIST_HEAD(&orig_node->vlan_list);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100881 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000882 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000883 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200884 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200885 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200886 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000887
Martin Hundebølld56b1702013-01-25 11:12:39 +0100888 batadv_nc_init_orig(orig_node);
889
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000890 /* extra reference for return */
Sven Eckelmann7c124392016-01-16 10:29:56 +0100891 kref_init(&orig_node->refcount);
892 kref_get(&orig_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000893
Marek Lindner16b1aba2011-01-19 20:01:42 +0000894 orig_node->bat_priv = bat_priv;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100895 ether_addr_copy(orig_node->orig, addr);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100896 batadv_dat_init_orig_node_addr(orig_node);
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200897 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200898 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200899 orig_node->tt_buff_len = 0;
Linus Lüssing2c667a32014-10-30 06:23:40 +0100900 orig_node->last_seen = jiffies;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200901 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
902 orig_node->bcast_seqno_reset = reset_time;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200903
Linus Lüssing60432d72014-02-15 17:47:51 +0100904#ifdef CONFIG_BATMAN_ADV_MCAST
905 orig_node->mcast_flags = BATADV_NO_FLAGS;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200906 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
907 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
908 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
909 spin_lock_init(&orig_node->mcast_handler_lock);
Linus Lüssing60432d72014-02-15 17:47:51 +0100910#endif
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000911
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200912 /* create a vlan object for the "untagged" LAN */
913 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
914 if (!vlan)
915 goto free_orig_node;
916 /* batadv_orig_node_vlan_new() increases the refcounter.
917 * Immediately release vlan since it is not needed anymore in this
918 * context
919 */
920 batadv_orig_node_vlan_free_ref(vlan);
921
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200922 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
923 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
924 spin_lock_init(&orig_node->fragments[i].lock);
925 orig_node->fragments[i].size = 0;
926 }
927
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000928 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000929free_orig_node:
930 kfree(orig_node);
931 return NULL;
932}
933
Simon Wunderlich89652332013-11-13 19:14:46 +0100934/**
Simon Wunderlich709de132014-03-26 15:46:24 +0100935 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
936 * @bat_priv: the bat priv with all the soft interface information
937 * @neigh: orig node which is to be checked
938 */
939static void
940batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
941 struct batadv_neigh_node *neigh)
942{
943 struct batadv_neigh_ifinfo *neigh_ifinfo;
944 struct batadv_hard_iface *if_outgoing;
945 struct hlist_node *node_tmp;
946
947 spin_lock_bh(&neigh->ifinfo_lock);
948
949 /* for all ifinfo objects for this neighinator */
950 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
951 &neigh->ifinfo_list, list) {
952 if_outgoing = neigh_ifinfo->if_outgoing;
953
954 /* always keep the default interface */
955 if (if_outgoing == BATADV_IF_DEFAULT)
956 continue;
957
958 /* don't purge if the interface is not (going) down */
959 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
960 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
961 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
962 continue;
963
964 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
965 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
966 neigh->addr, if_outgoing->net_dev->name);
967
968 hlist_del_rcu(&neigh_ifinfo->list);
Sven Eckelmann044fa3a2016-01-17 11:01:12 +0100969 batadv_neigh_ifinfo_put(neigh_ifinfo);
Simon Wunderlich709de132014-03-26 15:46:24 +0100970 }
971
972 spin_unlock_bh(&neigh->ifinfo_lock);
973}
974
975/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100976 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
977 * @bat_priv: the bat priv with all the soft interface information
978 * @orig_node: orig node which is to be checked
979 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200980 * Return: true if any ifinfo entry was purged, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100981 */
982static bool
983batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
984 struct batadv_orig_node *orig_node)
985{
986 struct batadv_orig_ifinfo *orig_ifinfo;
987 struct batadv_hard_iface *if_outgoing;
988 struct hlist_node *node_tmp;
989 bool ifinfo_purged = false;
990
991 spin_lock_bh(&orig_node->neigh_list_lock);
992
993 /* for all ifinfo objects for this originator */
994 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
995 &orig_node->ifinfo_list, list) {
996 if_outgoing = orig_ifinfo->if_outgoing;
997
998 /* always keep the default interface */
999 if (if_outgoing == BATADV_IF_DEFAULT)
1000 continue;
1001
1002 /* don't purge if the interface is not (going) down */
1003 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
1004 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
1005 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
1006 continue;
1007
1008 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1009 "router/ifinfo purge: originator %pM, iface: %s\n",
1010 orig_node->orig, if_outgoing->net_dev->name);
1011
1012 ifinfo_purged = true;
1013
1014 hlist_del_rcu(&orig_ifinfo->list);
1015 batadv_orig_ifinfo_free_ref(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +01001016 if (orig_node->last_bonding_candidate == orig_ifinfo) {
1017 orig_node->last_bonding_candidate = NULL;
1018 batadv_orig_ifinfo_free_ref(orig_ifinfo);
1019 }
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001020 }
1021
1022 spin_unlock_bh(&orig_node->neigh_list_lock);
1023
1024 return ifinfo_purged;
1025}
1026
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001027/**
Simon Wunderlich89652332013-11-13 19:14:46 +01001028 * batadv_purge_orig_neighbors - purges neighbors from originator
1029 * @bat_priv: the bat priv with all the soft interface information
1030 * @orig_node: orig node which is to be checked
1031 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001032 * Return: true if any neighbor was purged, false otherwise
Simon Wunderlich89652332013-11-13 19:14:46 +01001033 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001034static bool
1035batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
Simon Wunderlich89652332013-11-13 19:14:46 +01001036 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001037{
Sasha Levinb67bfe02013-02-27 17:06:00 -08001038 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001039 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001040 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +08001041 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001042 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001043
Marek Lindnerf987ed62010-12-12 21:57:12 +00001044 spin_lock_bh(&orig_node->neigh_list_lock);
1045
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001046 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001047 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +00001048 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001049 last_seen = neigh_node->last_seen;
1050 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001051
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001052 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001053 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
1054 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1055 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001056 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
1057 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1058 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001059 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001060 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
1061 orig_node->orig, neigh_node->addr,
1062 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001063 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001064 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001065 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
1066 orig_node->orig, neigh_node->addr,
1067 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001068
1069 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +00001070
Marek Lindnerf987ed62010-12-12 21:57:12 +00001071 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001072 batadv_neigh_node_put(neigh_node);
Simon Wunderlich709de132014-03-26 15:46:24 +01001073 } else {
1074 /* only necessary if not the whole neighbor is to be
1075 * deleted, but some interface has been removed.
1076 */
1077 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001078 }
1079 }
Marek Lindnerf987ed62010-12-12 21:57:12 +00001080
1081 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001082 return neigh_purged;
1083}
1084
Simon Wunderlich89652332013-11-13 19:14:46 +01001085/**
1086 * batadv_find_best_neighbor - finds the best neighbor after purging
1087 * @bat_priv: the bat priv with all the soft interface information
1088 * @orig_node: orig node which is to be checked
1089 * @if_outgoing: the interface for which the metric should be compared
1090 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001091 * Return: the current best neighbor, with refcount increased.
Simon Wunderlich89652332013-11-13 19:14:46 +01001092 */
1093static struct batadv_neigh_node *
1094batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1095 struct batadv_orig_node *orig_node,
1096 struct batadv_hard_iface *if_outgoing)
1097{
1098 struct batadv_neigh_node *best = NULL, *neigh;
1099 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1100
1101 rcu_read_lock();
1102 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
1103 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
1104 best, if_outgoing) <= 0))
1105 continue;
1106
Sven Eckelmann77ae32e2016-01-16 10:29:53 +01001107 if (!kref_get_unless_zero(&neigh->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +01001108 continue;
1109
1110 if (best)
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001111 batadv_neigh_node_put(best);
Simon Wunderlich89652332013-11-13 19:14:46 +01001112
1113 best = neigh;
1114 }
1115 rcu_read_unlock();
1116
1117 return best;
1118}
1119
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001120/**
1121 * batadv_purge_orig_node - purges obsolete information from an orig_node
1122 * @bat_priv: the bat priv with all the soft interface information
1123 * @orig_node: orig node which is to be checked
1124 *
1125 * This function checks if the orig_node or substructures of it have become
1126 * obsolete, and purges this information if that's the case.
1127 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001128 * Return: true if the orig_node is to be removed, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001129 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001130static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
1131 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001132{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001133 struct batadv_neigh_node *best_neigh_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001134 struct batadv_hard_iface *hard_iface;
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001135 bool changed_ifinfo, changed_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001136
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001137 if (batadv_has_timed_out(orig_node->last_seen,
1138 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001139 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001140 "Originator timeout: originator %pM, last_seen %u\n",
1141 orig_node->orig,
1142 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001143 return true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001144 }
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001145 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
1146 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001147
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001148 if (!changed_ifinfo && !changed_neigh)
Simon Wunderlich89652332013-11-13 19:14:46 +01001149 return false;
1150
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001151 /* first for NULL ... */
Simon Wunderlich89652332013-11-13 19:14:46 +01001152 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
1153 BATADV_IF_DEFAULT);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001154 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
1155 best_neigh_node);
Simon Wunderlich89652332013-11-13 19:14:46 +01001156 if (best_neigh_node)
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001157 batadv_neigh_node_put(best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001158
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001159 /* ... then for all other interfaces. */
1160 rcu_read_lock();
1161 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
1162 if (hard_iface->if_status != BATADV_IF_ACTIVE)
1163 continue;
1164
1165 if (hard_iface->soft_iface != bat_priv->soft_iface)
1166 continue;
1167
1168 best_neigh_node = batadv_find_best_neighbor(bat_priv,
1169 orig_node,
1170 hard_iface);
1171 batadv_update_route(bat_priv, orig_node, hard_iface,
1172 best_neigh_node);
1173 if (best_neigh_node)
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001174 batadv_neigh_node_put(best_neigh_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001175 }
1176 rcu_read_unlock();
1177
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001178 return false;
1179}
1180
Sven Eckelmann56303d32012-06-05 22:31:31 +02001181static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001182{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001183 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001184 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001185 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001186 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001187 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001188 u32 i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001189
1190 if (!hash)
1191 return;
1192
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001193 /* for all origins... */
1194 for (i = 0; i < hash->size; i++) {
1195 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001196 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001197
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001198 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001199 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +00001200 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001201 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +08001202 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001203 hlist_del_rcu(&orig_node->hash_entry);
Linus Lüssing9d31b3c2014-12-13 23:32:15 +01001204 batadv_tt_global_del_orig(orig_node->bat_priv,
1205 orig_node, -1,
1206 "originator timed out");
Sven Eckelmann5d967312016-01-17 11:01:09 +01001207 batadv_orig_node_put(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001208 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001209 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +02001210
1211 batadv_frag_purge_orig(orig_node,
1212 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001213 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001214 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001215 }
1216
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +02001217 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001218}
1219
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001220static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001221{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001222 struct delayed_work *delayed_work;
1223 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001224
Sven Eckelmann56303d32012-06-05 22:31:31 +02001225 delayed_work = container_of(work, struct delayed_work, work);
1226 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001227 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +01001228 queue_delayed_work(batadv_event_workqueue,
1229 &bat_priv->orig_work,
1230 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001231}
1232
Sven Eckelmann56303d32012-06-05 22:31:31 +02001233void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001234{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001235 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001236}
1237
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001238int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001239{
1240 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001241 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +02001242 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001243
Marek Lindner30da63a2012-08-03 17:15:46 +02001244 primary_if = batadv_seq_print_text_primary_if_get(seq);
1245 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001246 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001247
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001248 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001249 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001250 primary_if->net_dev->dev_addr, net_dev->name,
1251 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001252
Sven Eckelmann82047ad2016-01-17 11:01:10 +01001253 batadv_hardif_put(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001254
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001255 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1256 seq_puts(seq,
1257 "No printing function for this routing protocol\n");
1258 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001259 }
1260
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001261 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1262 BATADV_IF_DEFAULT);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001263
Marek Lindner30da63a2012-08-03 17:15:46 +02001264 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001265}
1266
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001267/**
1268 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1269 * outgoing interface
1270 * @seq: debugfs table seq_file struct
1271 * @offset: not used
1272 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001273 * Return: 0
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001274 */
1275int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1276{
1277 struct net_device *net_dev = (struct net_device *)seq->private;
1278 struct batadv_hard_iface *hard_iface;
1279 struct batadv_priv *bat_priv;
1280
1281 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1282
1283 if (!hard_iface || !hard_iface->soft_iface) {
1284 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1285 goto out;
1286 }
1287
1288 bat_priv = netdev_priv(hard_iface->soft_iface);
1289 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1290 seq_puts(seq,
1291 "No printing function for this routing protocol\n");
1292 goto out;
1293 }
1294
1295 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1296 seq_puts(seq, "Interface not active\n");
1297 goto out;
1298 }
1299
1300 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1301 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1302 hard_iface->net_dev->dev_addr,
1303 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1304
1305 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1306
1307out:
Marek Lindner16a41422014-04-24 03:44:25 +08001308 if (hard_iface)
Sven Eckelmann82047ad2016-01-17 11:01:10 +01001309 batadv_hardif_put(hard_iface);
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001310 return 0;
1311}
1312
Sven Eckelmann56303d32012-06-05 22:31:31 +02001313int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1314 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001315{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001316 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001317 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001318 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001319 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001320 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001321 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001322 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001323
1324 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001325 * if_num
1326 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001327 for (i = 0; i < hash->size; i++) {
1328 head = &hash->table[i];
1329
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001330 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001331 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001332 ret = 0;
1333 if (bao->bat_orig_add_if)
1334 ret = bao->bat_orig_add_if(orig_node,
1335 max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001336 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001337 goto err;
1338 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001339 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001340 }
1341
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001342 return 0;
1343
1344err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001345 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001346 return -ENOMEM;
1347}
1348
Sven Eckelmann56303d32012-06-05 22:31:31 +02001349int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1350 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001351{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001352 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001353 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001354 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001355 struct batadv_hard_iface *hard_iface_tmp;
1356 struct batadv_orig_node *orig_node;
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001357 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001358 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001359 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001360
1361 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001362 * if_num
1363 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001364 for (i = 0; i < hash->size; i++) {
1365 head = &hash->table[i];
1366
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001367 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001368 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001369 ret = 0;
1370 if (bao->bat_orig_del_if)
1371 ret = bao->bat_orig_del_if(orig_node,
1372 max_if_num,
1373 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001374 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001375 goto err;
1376 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001377 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001378 }
1379
1380 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1381 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001382 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001383 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001384 continue;
1385
Marek Lindnere6c10f42011-02-18 12:33:20 +00001386 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001387 continue;
1388
Marek Lindnere6c10f42011-02-18 12:33:20 +00001389 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001390 continue;
1391
Marek Lindnere6c10f42011-02-18 12:33:20 +00001392 if (hard_iface_tmp->if_num > hard_iface->if_num)
1393 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001394 }
1395 rcu_read_unlock();
1396
Marek Lindnere6c10f42011-02-18 12:33:20 +00001397 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001398 return 0;
1399
1400err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001401 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001402 return -ENOMEM;
1403}