blob: 8ab14340d10f6433c849face4852e4de1b5b9547 [file] [log] [blame]
Antonio Quartulli0b873932013-01-04 03:05:31 +01001/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018 */
19
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000020#include "main.h"
Antonio Quartulli785ea112011-11-23 11:35:44 +010021#include "distributed-arp-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000022#include "originator.h"
23#include "hash.h"
24#include "translation-table.h"
25#include "routing.h"
26#include "gateway_client.h"
27#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000028#include "soft-interface.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010029#include "bridge_loop_avoidance.h"
Martin Hundebølld56b1702013-01-25 11:12:39 +010030#include "network-coding.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020031#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000032
Antonio Quartullidec05072012-11-10 11:00:32 +010033/* hash class keys */
34static struct lock_class_key batadv_orig_hash_lock_class_key;
35
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020036static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000037
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020038/* returns 1 if they are the same originator */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020039int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020040{
Sven Eckelmann56303d32012-06-05 22:31:31 +020041 const void *data1 = container_of(node, struct batadv_orig_node,
42 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020043
44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
45}
46
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020047/**
48 * batadv_orig_node_vlan_get - get an orig_node_vlan object
49 * @orig_node: the originator serving the VLAN
50 * @vid: the VLAN identifier
51 *
52 * Returns the vlan object identified by vid and belonging to orig_node or NULL
53 * if it does not exist.
54 */
55struct batadv_orig_node_vlan *
56batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
57 unsigned short vid)
58{
59 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
60
61 rcu_read_lock();
62 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
63 if (tmp->vid != vid)
64 continue;
65
66 if (!atomic_inc_not_zero(&tmp->refcount))
67 continue;
68
69 vlan = tmp;
70
71 break;
72 }
73 rcu_read_unlock();
74
75 return vlan;
76}
77
78/**
79 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
80 * object
81 * @orig_node: the originator serving the VLAN
82 * @vid: the VLAN identifier
83 *
84 * Returns NULL in case of failure or the vlan object identified by vid and
85 * belonging to orig_node otherwise. The object is created and added to the list
86 * if it does not exist.
87 *
88 * The object is returned with refcounter increased by 1.
89 */
90struct batadv_orig_node_vlan *
91batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
92 unsigned short vid)
93{
94 struct batadv_orig_node_vlan *vlan;
95
96 spin_lock_bh(&orig_node->vlan_list_lock);
97
98 /* first look if an object for this vid already exists */
99 vlan = batadv_orig_node_vlan_get(orig_node, vid);
100 if (vlan)
101 goto out;
102
103 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
104 if (!vlan)
105 goto out;
106
107 atomic_set(&vlan->refcount, 2);
108 vlan->vid = vid;
109
110 list_add_rcu(&vlan->list, &orig_node->vlan_list);
111
112out:
113 spin_unlock_bh(&orig_node->vlan_list_lock);
114
115 return vlan;
116}
117
118/**
119 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
120 * the originator-vlan object
121 * @orig_vlan: the originator-vlan object to release
122 */
123void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
124{
125 if (atomic_dec_and_test(&orig_vlan->refcount))
126 kfree_rcu(orig_vlan, rcu);
127}
128
Sven Eckelmann56303d32012-06-05 22:31:31 +0200129int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000130{
131 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200132 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000133
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200134 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000135
136 if (!bat_priv->orig_hash)
137 goto err;
138
Antonio Quartullidec05072012-11-10 11:00:32 +0100139 batadv_hash_set_lock_class(bat_priv->orig_hash,
140 &batadv_orig_hash_lock_class_key);
141
Antonio Quartulli72414442012-12-25 13:14:37 +0100142 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
143 queue_delayed_work(batadv_event_workqueue,
144 &bat_priv->orig_work,
145 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
146
Sven Eckelmann5346c352012-05-05 13:27:28 +0200147 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000148
149err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200150 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000151}
152
Sven Eckelmann56303d32012-06-05 22:31:31 +0200153void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000154{
Marek Lindner44524fc2011-02-10 14:33:53 +0000155 if (atomic_dec_and_test(&neigh_node->refcount))
Paul E. McKenneyae179ae2011-05-01 23:27:50 -0700156 kfree_rcu(neigh_node, rcu);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000157}
158
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000159/* increases the refcounter of a found router */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200160struct batadv_neigh_node *
161batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000162{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200163 struct batadv_neigh_node *router;
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000164
165 rcu_read_lock();
166 router = rcu_dereference(orig_node->router);
167
168 if (router && !atomic_inc_not_zero(&router->refcount))
169 router = NULL;
170
171 rcu_read_unlock();
172 return router;
173}
174
Antonio Quartulli0538f752013-09-02 12:15:01 +0200175/**
176 * batadv_neigh_node_new - create and init a new neigh_node object
177 * @hard_iface: the interface where the neighbour is connected to
178 * @neigh_addr: the mac address of the neighbour interface
179 * @orig_node: originator object representing the neighbour
180 *
181 * Allocates a new neigh_node object and initialises all the generic fields.
182 * Returns the new object or NULL on failure.
183 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200184struct batadv_neigh_node *
185batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
Antonio Quartulli0538f752013-09-02 12:15:01 +0200186 const uint8_t *neigh_addr,
187 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000188{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200189 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190
Sven Eckelmann704509b2011-05-14 23:14:54 +0200191 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000192 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800193 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000194
Marek Lindner9591a792010-12-12 21:57:11 +0000195 INIT_HLIST_NODE(&neigh_node->list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000196
Marek Lindner7ae8b282012-03-01 15:35:21 +0800197 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
Antonio Quartulli0538f752013-09-02 12:15:01 +0200198 neigh_node->if_incoming = hard_iface;
199 neigh_node->orig_node = orig_node;
200
201 INIT_LIST_HEAD(&neigh_node->bonding_list);
Marek Lindner1605d0d2011-02-18 12:28:11 +0000202
203 /* extra reference for return */
204 atomic_set(&neigh_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000205
Marek Lindner7ae8b282012-03-01 15:35:21 +0800206out:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000207 return neigh_node;
208}
209
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200210static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000211{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800212 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200213 struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
214 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000215
Sven Eckelmann56303d32012-06-05 22:31:31 +0200216 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000217
Marek Lindnerf987ed62010-12-12 21:57:12 +0000218 spin_lock_bh(&orig_node->neigh_list_lock);
219
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000220 /* for all bonding members ... */
221 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
222 &orig_node->bond_list, bonding_list) {
223 list_del_rcu(&neigh_node->bonding_list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200224 batadv_neigh_node_free_ref(neigh_node);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000225 }
226
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000227 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800228 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000229 &orig_node->neigh_list, list) {
Marek Lindnerf987ed62010-12-12 21:57:12 +0000230 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200231 batadv_neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000232 }
233
Marek Lindnerf987ed62010-12-12 21:57:12 +0000234 spin_unlock_bh(&orig_node->neigh_list_lock);
235
Martin Hundebølld56b1702013-01-25 11:12:39 +0100236 /* Free nc_nodes */
237 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
238
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200239 batadv_frag_purge_orig(orig_node, NULL);
240
Antonio Quartulli95fb1302013-08-07 18:28:55 +0200241 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200242 "originator timed out");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000243
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200244 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
245 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
246
Antonio Quartullia73105b2011-04-27 14:27:44 +0200247 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000248 kfree(orig_node);
249}
250
Linus LĂĽssing72822222013-04-15 21:43:29 +0800251/**
252 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
253 * schedule an rcu callback for freeing it
254 * @orig_node: the orig node to free
255 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200256void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000257{
258 if (atomic_dec_and_test(&orig_node->refcount))
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200259 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000260}
261
Linus LĂĽssing72822222013-04-15 21:43:29 +0800262/**
263 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
264 * possibly free it (without rcu callback)
265 * @orig_node: the orig node to free
266 */
267void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
268{
269 if (atomic_dec_and_test(&orig_node->refcount))
270 batadv_orig_node_free_rcu(&orig_node->rcu);
271}
272
Sven Eckelmann56303d32012-06-05 22:31:31 +0200273void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000274{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200275 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800276 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000277 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000278 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200279 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200280 uint32_t i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000281
282 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000283 return;
284
285 cancel_delayed_work_sync(&bat_priv->orig_work);
286
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000287 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000288
289 for (i = 0; i < hash->size; i++) {
290 head = &hash->table[i];
291 list_lock = &hash->list_locks[i];
292
293 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800294 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000295 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800296 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200297 batadv_orig_node_free_ref(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000298 }
299 spin_unlock_bh(list_lock);
300 }
301
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200302 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000303}
304
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200305/**
306 * batadv_orig_node_new - creates a new orig_node
307 * @bat_priv: the bat priv with all the soft interface information
308 * @addr: the mac address of the originator
309 *
310 * Creates a new originator object and initialise all the generic fields.
311 * The new object is not added to the originator list.
312 * Returns the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200313 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200314struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200315 const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000316{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200317 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200318 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200319 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200320 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000321
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200322 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
323 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000324
Sven Eckelmann704509b2011-05-14 23:14:54 +0200325 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000326 if (!orig_node)
327 return NULL;
328
Marek Lindner9591a792010-12-12 21:57:11 +0000329 INIT_HLIST_HEAD(&orig_node->neigh_list);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000330 INIT_LIST_HEAD(&orig_node->bond_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200331 INIT_LIST_HEAD(&orig_node->vlan_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000332 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000333 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200334 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200335 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200336 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000337
Martin Hundebølld56b1702013-01-25 11:12:39 +0100338 batadv_nc_init_orig(orig_node);
339
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000340 /* extra reference for return */
341 atomic_set(&orig_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000342
Antonio Quartulli17071572011-11-07 16:36:40 +0100343 orig_node->tt_initialised = false;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000344 orig_node->bat_priv = bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000345 memcpy(orig_node->orig, addr, ETH_ALEN);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100346 batadv_dat_init_orig_node_addr(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000347 orig_node->router = NULL;
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200348 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200349 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200350 orig_node->tt_buff_len = 0;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200351 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
352 orig_node->bcast_seqno_reset = reset_time;
353 orig_node->batman_seqno_reset = reset_time;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000354
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000355 atomic_set(&orig_node->bond_candidates, 0);
356
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200357 /* create a vlan object for the "untagged" LAN */
358 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
359 if (!vlan)
360 goto free_orig_node;
361 /* batadv_orig_node_vlan_new() increases the refcounter.
362 * Immediately release vlan since it is not needed anymore in this
363 * context
364 */
365 batadv_orig_node_vlan_free_ref(vlan);
366
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200367 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
368 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
369 spin_lock_init(&orig_node->fragments[i].lock);
370 orig_node->fragments[i].size = 0;
371 }
372
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000373 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000374free_orig_node:
375 kfree(orig_node);
376 return NULL;
377}
378
Sven Eckelmann56303d32012-06-05 22:31:31 +0200379static bool
380batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
381 struct batadv_orig_node *orig_node,
Antonio Quartulli81e26b12013-09-02 12:15:07 +0200382 struct batadv_neigh_node **best_neigh)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000383{
Antonio Quartulli81e26b12013-09-02 12:15:07 +0200384 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800385 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200386 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000387 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +0800388 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200389 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000390
Antonio Quartulli81e26b12013-09-02 12:15:07 +0200391 *best_neigh = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000392
Marek Lindnerf987ed62010-12-12 21:57:12 +0000393 spin_lock_bh(&orig_node->neigh_list_lock);
394
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000395 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800396 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000397 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200398 last_seen = neigh_node->last_seen;
399 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000400
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200401 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200402 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
403 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
404 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200405 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
406 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
407 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200408 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200409 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
410 orig_node->orig, neigh_node->addr,
411 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000412 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200413 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200414 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
415 orig_node->orig, neigh_node->addr,
416 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000417
418 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +0000419
Marek Lindnerf987ed62010-12-12 21:57:12 +0000420 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200421 batadv_bonding_candidate_del(orig_node, neigh_node);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200422 batadv_neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000423 } else {
Antonio Quartulli81e26b12013-09-02 12:15:07 +0200424 /* store the best_neighbour if this is the first
425 * iteration or if a better neighbor has been found
426 */
427 if (!*best_neigh ||
428 bao->bat_neigh_cmp(neigh_node, *best_neigh) > 0)
429 *best_neigh = neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000430 }
431 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000432
433 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000434 return neigh_purged;
435}
436
Sven Eckelmann56303d32012-06-05 22:31:31 +0200437static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
438 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000439{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200440 struct batadv_neigh_node *best_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000441
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200442 if (batadv_has_timed_out(orig_node->last_seen,
443 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200444 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200445 "Originator timeout: originator %pM, last_seen %u\n",
446 orig_node->orig,
447 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000448 return true;
449 } else {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200450 if (batadv_purge_orig_neighbors(bat_priv, orig_node,
451 &best_neigh_node))
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200452 batadv_update_route(bat_priv, orig_node,
453 best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000454 }
455
456 return false;
457}
458
Sven Eckelmann56303d32012-06-05 22:31:31 +0200459static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000460{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200461 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800462 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000463 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000464 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200465 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200466 uint32_t i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000467
468 if (!hash)
469 return;
470
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000471 /* for all origins... */
472 for (i = 0; i < hash->size; i++) {
473 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000474 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000475
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000476 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800477 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000478 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200479 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +0800480 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800481 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200482 batadv_orig_node_free_ref(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000483 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000484 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200485
486 batadv_frag_purge_orig(orig_node,
487 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000488 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000489 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000490 }
491
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200492 batadv_gw_node_purge(bat_priv);
493 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000494}
495
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200496static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000497{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200498 struct delayed_work *delayed_work;
499 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000500
Sven Eckelmann56303d32012-06-05 22:31:31 +0200501 delayed_work = container_of(work, struct delayed_work, work);
502 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200503 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +0100504 queue_delayed_work(batadv_event_workqueue,
505 &bat_priv->orig_work,
506 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000507}
508
Sven Eckelmann56303d32012-06-05 22:31:31 +0200509void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000510{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200511 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000512}
513
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200514int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000515{
516 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200517 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200518 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000519
Marek Lindner30da63a2012-08-03 17:15:46 +0200520 primary_if = batadv_seq_print_text_primary_if_get(seq);
521 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200522 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000523
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200524 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200525 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200526 primary_if->net_dev->dev_addr, net_dev->name,
527 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000528
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200529 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000530
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200531 if (!bat_priv->bat_algo_ops->bat_orig_print) {
532 seq_puts(seq,
533 "No printing function for this routing protocol\n");
534 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000535 }
536
Antonio Quartulli737a2a222013-09-02 12:15:03 +0200537 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000538
Marek Lindner30da63a2012-08-03 17:15:46 +0200539 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000540}
541
Sven Eckelmann56303d32012-06-05 22:31:31 +0200542int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
543 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000544{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200545 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200546 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200547 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000548 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200549 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200550 uint32_t i;
551 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000552
553 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200554 * if_num
555 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000556 for (i = 0; i < hash->size; i++) {
557 head = &hash->table[i];
558
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000559 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -0800560 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200561 ret = 0;
562 if (bao->bat_orig_add_if)
563 ret = bao->bat_orig_add_if(orig_node,
564 max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200565 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000566 goto err;
567 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000568 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000569 }
570
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000571 return 0;
572
573err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000574 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000575 return -ENOMEM;
576}
577
Sven Eckelmann56303d32012-06-05 22:31:31 +0200578int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
579 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000580{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200581 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200582 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000583 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200584 struct batadv_hard_iface *hard_iface_tmp;
585 struct batadv_orig_node *orig_node;
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200586 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200587 uint32_t i;
588 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000589
590 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200591 * if_num
592 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000593 for (i = 0; i < hash->size; i++) {
594 head = &hash->table[i];
595
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000596 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -0800597 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200598 ret = 0;
599 if (bao->bat_orig_del_if)
600 ret = bao->bat_orig_del_if(orig_node,
601 max_if_num,
602 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200603 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000604 goto err;
605 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000606 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000607 }
608
609 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
610 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200611 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200612 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000613 continue;
614
Marek Lindnere6c10f42011-02-18 12:33:20 +0000615 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000616 continue;
617
Marek Lindnere6c10f42011-02-18 12:33:20 +0000618 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000619 continue;
620
Marek Lindnere6c10f42011-02-18 12:33:20 +0000621 if (hard_iface_tmp->if_num > hard_iface->if_num)
622 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000623 }
624 rcu_read_unlock();
625
Marek Lindnere6c10f42011-02-18 12:33:20 +0000626 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000627 return 0;
628
629err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000630 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000631 return -ENOMEM;
632}