blob: 50f6d99647805d3fa225c7158f96763379660269 [file] [log] [blame]
Antonio Quartulli0b873932013-01-04 03:05:31 +01001/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018 */
19
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000020#include "main.h"
Antonio Quartulli785ea112011-11-23 11:35:44 +010021#include "distributed-arp-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000022#include "originator.h"
23#include "hash.h"
24#include "translation-table.h"
25#include "routing.h"
26#include "gateway_client.h"
27#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000028#include "soft-interface.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010029#include "bridge_loop_avoidance.h"
Martin Hundebølld56b1702013-01-25 11:12:39 +010030#include "network-coding.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020031#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000032
Antonio Quartullidec05072012-11-10 11:00:32 +010033/* hash class keys */
34static struct lock_class_key batadv_orig_hash_lock_class_key;
35
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020036static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000037
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020038/* returns 1 if they are the same originator */
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020039static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020040{
Sven Eckelmann56303d32012-06-05 22:31:31 +020041 const void *data1 = container_of(node, struct batadv_orig_node,
42 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020043
44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
45}
46
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020047/**
48 * batadv_orig_node_vlan_get - get an orig_node_vlan object
49 * @orig_node: the originator serving the VLAN
50 * @vid: the VLAN identifier
51 *
52 * Returns the vlan object identified by vid and belonging to orig_node or NULL
53 * if it does not exist.
54 */
55struct batadv_orig_node_vlan *
56batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
57 unsigned short vid)
58{
59 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
60
61 rcu_read_lock();
62 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
63 if (tmp->vid != vid)
64 continue;
65
66 if (!atomic_inc_not_zero(&tmp->refcount))
67 continue;
68
69 vlan = tmp;
70
71 break;
72 }
73 rcu_read_unlock();
74
75 return vlan;
76}
77
78/**
79 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
80 * object
81 * @orig_node: the originator serving the VLAN
82 * @vid: the VLAN identifier
83 *
84 * Returns NULL in case of failure or the vlan object identified by vid and
85 * belonging to orig_node otherwise. The object is created and added to the list
86 * if it does not exist.
87 *
88 * The object is returned with refcounter increased by 1.
89 */
90struct batadv_orig_node_vlan *
91batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
92 unsigned short vid)
93{
94 struct batadv_orig_node_vlan *vlan;
95
96 spin_lock_bh(&orig_node->vlan_list_lock);
97
98 /* first look if an object for this vid already exists */
99 vlan = batadv_orig_node_vlan_get(orig_node, vid);
100 if (vlan)
101 goto out;
102
103 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
104 if (!vlan)
105 goto out;
106
107 atomic_set(&vlan->refcount, 2);
108 vlan->vid = vid;
109
110 list_add_rcu(&vlan->list, &orig_node->vlan_list);
111
112out:
113 spin_unlock_bh(&orig_node->vlan_list_lock);
114
115 return vlan;
116}
117
118/**
119 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
120 * the originator-vlan object
121 * @orig_vlan: the originator-vlan object to release
122 */
123void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
124{
125 if (atomic_dec_and_test(&orig_vlan->refcount))
126 kfree_rcu(orig_vlan, rcu);
127}
128
Sven Eckelmann56303d32012-06-05 22:31:31 +0200129int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000130{
131 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200132 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000133
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200134 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000135
136 if (!bat_priv->orig_hash)
137 goto err;
138
Antonio Quartullidec05072012-11-10 11:00:32 +0100139 batadv_hash_set_lock_class(bat_priv->orig_hash,
140 &batadv_orig_hash_lock_class_key);
141
Antonio Quartulli72414442012-12-25 13:14:37 +0100142 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
143 queue_delayed_work(batadv_event_workqueue,
144 &bat_priv->orig_work,
145 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
146
Sven Eckelmann5346c352012-05-05 13:27:28 +0200147 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000148
149err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200150 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000151}
152
Sven Eckelmann56303d32012-06-05 22:31:31 +0200153void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000154{
Marek Lindner44524fc2011-02-10 14:33:53 +0000155 if (atomic_dec_and_test(&neigh_node->refcount))
Paul E. McKenneyae179ae2011-05-01 23:27:50 -0700156 kfree_rcu(neigh_node, rcu);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000157}
158
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000159/* increases the refcounter of a found router */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200160struct batadv_neigh_node *
161batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000162{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200163 struct batadv_neigh_node *router;
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000164
165 rcu_read_lock();
166 router = rcu_dereference(orig_node->router);
167
168 if (router && !atomic_inc_not_zero(&router->refcount))
169 router = NULL;
170
171 rcu_read_unlock();
172 return router;
173}
174
Antonio Quartulli0538f752013-09-02 12:15:01 +0200175/**
176 * batadv_neigh_node_new - create and init a new neigh_node object
177 * @hard_iface: the interface where the neighbour is connected to
178 * @neigh_addr: the mac address of the neighbour interface
179 * @orig_node: originator object representing the neighbour
180 *
181 * Allocates a new neigh_node object and initialises all the generic fields.
182 * Returns the new object or NULL on failure.
183 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200184struct batadv_neigh_node *
185batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
Antonio Quartulli0538f752013-09-02 12:15:01 +0200186 const uint8_t *neigh_addr,
187 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000188{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200189 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190
Sven Eckelmann704509b2011-05-14 23:14:54 +0200191 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000192 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800193 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000194
Marek Lindner9591a792010-12-12 21:57:11 +0000195 INIT_HLIST_NODE(&neigh_node->list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000196
Marek Lindner7ae8b282012-03-01 15:35:21 +0800197 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
Antonio Quartulli0538f752013-09-02 12:15:01 +0200198 neigh_node->if_incoming = hard_iface;
199 neigh_node->orig_node = orig_node;
200
201 INIT_LIST_HEAD(&neigh_node->bonding_list);
Marek Lindner1605d0d2011-02-18 12:28:11 +0000202
203 /* extra reference for return */
204 atomic_set(&neigh_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000205
Marek Lindner7ae8b282012-03-01 15:35:21 +0800206out:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000207 return neigh_node;
208}
209
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200210static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000211{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800212 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200213 struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
214 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000215
Sven Eckelmann56303d32012-06-05 22:31:31 +0200216 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000217
Marek Lindnerf987ed62010-12-12 21:57:12 +0000218 spin_lock_bh(&orig_node->neigh_list_lock);
219
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000220 /* for all bonding members ... */
221 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
222 &orig_node->bond_list, bonding_list) {
223 list_del_rcu(&neigh_node->bonding_list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200224 batadv_neigh_node_free_ref(neigh_node);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000225 }
226
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000227 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800228 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000229 &orig_node->neigh_list, list) {
Marek Lindnerf987ed62010-12-12 21:57:12 +0000230 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200231 batadv_neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000232 }
233
Marek Lindnerf987ed62010-12-12 21:57:12 +0000234 spin_unlock_bh(&orig_node->neigh_list_lock);
235
Martin Hundebølld56b1702013-01-25 11:12:39 +0100236 /* Free nc_nodes */
237 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
238
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200239 batadv_frag_purge_orig(orig_node, NULL);
240
Antonio Quartulli95fb1302013-08-07 18:28:55 +0200241 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200242 "originator timed out");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000243
Antonio Quartullia73105b2011-04-27 14:27:44 +0200244 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000245 kfree(orig_node->bcast_own);
246 kfree(orig_node->bcast_own_sum);
247 kfree(orig_node);
248}
249
Linus LĂĽssing72822222013-04-15 21:43:29 +0800250/**
251 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
252 * schedule an rcu callback for freeing it
253 * @orig_node: the orig node to free
254 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200255void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000256{
257 if (atomic_dec_and_test(&orig_node->refcount))
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200258 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000259}
260
Linus LĂĽssing72822222013-04-15 21:43:29 +0800261/**
262 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
263 * possibly free it (without rcu callback)
264 * @orig_node: the orig node to free
265 */
266void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
267{
268 if (atomic_dec_and_test(&orig_node->refcount))
269 batadv_orig_node_free_rcu(&orig_node->rcu);
270}
271
Sven Eckelmann56303d32012-06-05 22:31:31 +0200272void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000273{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200274 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800275 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000276 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000277 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200278 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200279 uint32_t i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000280
281 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000282 return;
283
284 cancel_delayed_work_sync(&bat_priv->orig_work);
285
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000286 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000287
288 for (i = 0; i < hash->size; i++) {
289 head = &hash->table[i];
290 list_lock = &hash->list_locks[i];
291
292 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800293 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000294 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800295 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200296 batadv_orig_node_free_ref(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000297 }
298 spin_unlock_bh(list_lock);
299 }
300
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200301 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000302}
303
304/* this function finds or creates an originator entry for the given
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200305 * address if it does not exits
306 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200307struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
308 const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000309{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200310 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200311 struct batadv_orig_node_vlan *vlan;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200312 int size, i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000313 int hash_added;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200314 unsigned long reset_time;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000315
Sven Eckelmannda641192012-05-12 13:48:56 +0200316 orig_node = batadv_orig_hash_find(bat_priv, addr);
Marek Lindner7aadf882011-02-18 12:28:09 +0000317 if (orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000318 return orig_node;
319
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200320 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
321 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000322
Sven Eckelmann704509b2011-05-14 23:14:54 +0200323 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000324 if (!orig_node)
325 return NULL;
326
Marek Lindner9591a792010-12-12 21:57:11 +0000327 INIT_HLIST_HEAD(&orig_node->neigh_list);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000328 INIT_LIST_HEAD(&orig_node->bond_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200329 INIT_LIST_HEAD(&orig_node->vlan_list);
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000330 spin_lock_init(&orig_node->ogm_cnt_lock);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000331 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000332 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200333 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200334 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200335 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000336
Martin Hundebølld56b1702013-01-25 11:12:39 +0100337 batadv_nc_init_orig(orig_node);
338
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000339 /* extra reference for return */
340 atomic_set(&orig_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000341
Antonio Quartulli17071572011-11-07 16:36:40 +0100342 orig_node->tt_initialised = false;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000343 orig_node->bat_priv = bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000344 memcpy(orig_node->orig, addr, ETH_ALEN);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100345 batadv_dat_init_orig_node_addr(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000346 orig_node->router = NULL;
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200347 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200348 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200349 orig_node->tt_buff_len = 0;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200350 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
351 orig_node->bcast_seqno_reset = reset_time;
352 orig_node->batman_seqno_reset = reset_time;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000353
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000354 atomic_set(&orig_node->bond_candidates, 0);
355
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200356 /* create a vlan object for the "untagged" LAN */
357 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
358 if (!vlan)
359 goto free_orig_node;
360 /* batadv_orig_node_vlan_new() increases the refcounter.
361 * Immediately release vlan since it is not needed anymore in this
362 * context
363 */
364 batadv_orig_node_vlan_free_ref(vlan);
365
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200366 size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000367
368 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
369 if (!orig_node->bcast_own)
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200370 goto free_vlan;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000371
372 size = bat_priv->num_ifaces * sizeof(uint8_t);
373 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
374
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200375 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
376 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
377 spin_lock_init(&orig_node->fragments[i].lock);
378 orig_node->fragments[i].size = 0;
379 }
380
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000381 if (!orig_node->bcast_own_sum)
382 goto free_bcast_own;
383
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200384 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
Sven Eckelmannda641192012-05-12 13:48:56 +0200385 batadv_choose_orig, orig_node,
Sven Eckelmannc0a55922012-05-12 13:48:55 +0200386 &orig_node->hash_entry);
Antonio Quartulli1a1f37d2011-07-10 00:36:36 +0200387 if (hash_added != 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000388 goto free_bcast_own_sum;
389
390 return orig_node;
391free_bcast_own_sum:
392 kfree(orig_node->bcast_own_sum);
393free_bcast_own:
394 kfree(orig_node->bcast_own);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200395free_vlan:
396 batadv_orig_node_vlan_free_ref(vlan);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000397free_orig_node:
398 kfree(orig_node);
399 return NULL;
400}
401
Sven Eckelmann56303d32012-06-05 22:31:31 +0200402static bool
403batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
404 struct batadv_orig_node *orig_node,
405 struct batadv_neigh_node **best_neigh_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000406{
Sasha Levinb67bfe02013-02-27 17:06:00 -0800407 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200408 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000409 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +0800410 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200411 struct batadv_hard_iface *if_incoming;
Antonio Quartulli0538f752013-09-02 12:15:01 +0200412 uint8_t best_metric = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000413
414 *best_neigh_node = NULL;
415
Marek Lindnerf987ed62010-12-12 21:57:12 +0000416 spin_lock_bh(&orig_node->neigh_list_lock);
417
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000418 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800419 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +0000420 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200421 last_seen = neigh_node->last_seen;
422 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000423
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200424 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200425 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
426 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
427 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200428 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
429 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
430 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200431 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200432 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
433 orig_node->orig, neigh_node->addr,
434 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000435 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200436 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200437 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
438 orig_node->orig, neigh_node->addr,
439 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000440
441 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +0000442
Marek Lindnerf987ed62010-12-12 21:57:12 +0000443 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200444 batadv_bonding_candidate_del(orig_node, neigh_node);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200445 batadv_neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000446 } else {
447 if ((!*best_neigh_node) ||
Antonio Quartulli0538f752013-09-02 12:15:01 +0200448 (neigh_node->bat_iv.tq_avg > best_metric)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000449 *best_neigh_node = neigh_node;
Antonio Quartulli0538f752013-09-02 12:15:01 +0200450 best_metric = neigh_node->bat_iv.tq_avg;
451 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000452 }
453 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000454
455 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000456 return neigh_purged;
457}
458
Sven Eckelmann56303d32012-06-05 22:31:31 +0200459static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
460 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000461{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200462 struct batadv_neigh_node *best_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000463
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200464 if (batadv_has_timed_out(orig_node->last_seen,
465 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200466 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200467 "Originator timeout: originator %pM, last_seen %u\n",
468 orig_node->orig,
469 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000470 return true;
471 } else {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200472 if (batadv_purge_orig_neighbors(bat_priv, orig_node,
473 &best_neigh_node))
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200474 batadv_update_route(bat_priv, orig_node,
475 best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000476 }
477
478 return false;
479}
480
Sven Eckelmann56303d32012-06-05 22:31:31 +0200481static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000482{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200483 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800484 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000485 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000486 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200487 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200488 uint32_t i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000489
490 if (!hash)
491 return;
492
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000493 /* for all origins... */
494 for (i = 0; i < hash->size; i++) {
495 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000496 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000497
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000498 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800499 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000500 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200501 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +0800502 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800503 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200504 batadv_orig_node_free_ref(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000505 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000506 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200507
508 batadv_frag_purge_orig(orig_node,
509 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000510 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000511 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000512 }
513
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200514 batadv_gw_node_purge(bat_priv);
515 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000516}
517
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200518static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000519{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200520 struct delayed_work *delayed_work;
521 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000522
Sven Eckelmann56303d32012-06-05 22:31:31 +0200523 delayed_work = container_of(work, struct delayed_work, work);
524 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200525 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +0100526 queue_delayed_work(batadv_event_workqueue,
527 &bat_priv->orig_work,
528 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000529}
530
Sven Eckelmann56303d32012-06-05 22:31:31 +0200531void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000532{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200533 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000534}
535
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200536int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000537{
538 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200539 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200540 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000541 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200542 struct batadv_hard_iface *primary_if;
543 struct batadv_orig_node *orig_node;
544 struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000545 int batman_count = 0;
546 int last_seen_secs;
547 int last_seen_msecs;
Sven Eckelmann0aca2362012-06-19 20:26:30 +0200548 unsigned long last_seen_jiffies;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200549 uint32_t i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000550
Marek Lindner30da63a2012-08-03 17:15:46 +0200551 primary_if = batadv_seq_print_text_primary_if_get(seq);
552 if (!primary_if)
Marek Lindner32ae9b22011-04-20 15:40:58 +0200553 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000554
Sven Eckelmann44c43492011-07-05 10:42:51 +0200555 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200556 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Marek Lindner32ae9b22011-04-20 15:40:58 +0200557 primary_if->net_dev->dev_addr, net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000558 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200559 "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
560 "Nexthop", "outgoingIF", "Potential nexthops");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000561
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000562 for (i = 0; i < hash->size; i++) {
563 head = &hash->table[i];
564
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000565 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -0800566 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200567 neigh_node = batadv_orig_node_get_router(orig_node);
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000568 if (!neigh_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000569 continue;
570
Antonio Quartulli0538f752013-09-02 12:15:01 +0200571 if (neigh_node->bat_iv.tq_avg == 0)
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000572 goto next;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000573
Sven Eckelmann0aca2362012-06-19 20:26:30 +0200574 last_seen_jiffies = jiffies - orig_node->last_seen;
575 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
576 last_seen_secs = last_seen_msecs / 1000;
577 last_seen_msecs = last_seen_msecs % 1000;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000578
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000579 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
580 orig_node->orig, last_seen_secs,
Antonio Quartulli0538f752013-09-02 12:15:01 +0200581 last_seen_msecs, neigh_node->bat_iv.tq_avg,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000582 neigh_node->addr,
583 neigh_node->if_incoming->net_dev->name);
584
Sasha Levinb67bfe02013-02-27 17:06:00 -0800585 hlist_for_each_entry_rcu(neigh_node_tmp,
Marek Lindnerf987ed62010-12-12 21:57:12 +0000586 &orig_node->neigh_list, list) {
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000587 seq_printf(seq, " %pM (%3i)",
588 neigh_node_tmp->addr,
Antonio Quartulli0538f752013-09-02 12:15:01 +0200589 neigh_node_tmp->bat_iv.tq_avg);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000590 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000591
Antonio Quartulli0c814652013-03-21 09:23:29 +0100592 seq_puts(seq, "\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000593 batman_count++;
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000594
595next:
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200596 batadv_neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000597 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000598 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000599 }
600
Linus LĂĽssinge1a5382f2011-03-14 22:43:37 +0000601 if (batman_count == 0)
Antonio Quartulli0c814652013-03-21 09:23:29 +0100602 seq_puts(seq, "No batman nodes in range ...\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000603
Marek Lindner32ae9b22011-04-20 15:40:58 +0200604out:
605 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200606 batadv_hardif_free_ref(primary_if);
Marek Lindner30da63a2012-08-03 17:15:46 +0200607 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000608}
609
Sven Eckelmann56303d32012-06-05 22:31:31 +0200610static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
611 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000612{
613 void *data_ptr;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200614 size_t data_size, old_size;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000615
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200616 data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
617 old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
618 data_ptr = kmalloc(data_size, GFP_ATOMIC);
Joe Perches320f4222011-08-29 14:17:24 -0700619 if (!data_ptr)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200620 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000621
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200622 memcpy(data_ptr, orig_node->bcast_own, old_size);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000623 kfree(orig_node->bcast_own);
624 orig_node->bcast_own = data_ptr;
625
626 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
Joe Perches320f4222011-08-29 14:17:24 -0700627 if (!data_ptr)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200628 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000629
630 memcpy(data_ptr, orig_node->bcast_own_sum,
631 (max_if_num - 1) * sizeof(uint8_t));
632 kfree(orig_node->bcast_own_sum);
633 orig_node->bcast_own_sum = data_ptr;
634
635 return 0;
636}
637
Sven Eckelmann56303d32012-06-05 22:31:31 +0200638int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
639 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000640{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200641 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200642 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000643 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200644 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200645 uint32_t i;
646 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000647
648 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200649 * if_num
650 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000651 for (i = 0; i < hash->size; i++) {
652 head = &hash->table[i];
653
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000654 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -0800655 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000656 spin_lock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200657 ret = batadv_orig_node_add_if(orig_node, max_if_num);
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000658 spin_unlock_bh(&orig_node->ogm_cnt_lock);
659
Sven Eckelmann5346c352012-05-05 13:27:28 +0200660 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000661 goto err;
662 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000663 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000664 }
665
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000666 return 0;
667
668err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000669 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000670 return -ENOMEM;
671}
672
Sven Eckelmann56303d32012-06-05 22:31:31 +0200673static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200674 int max_if_num, int del_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000675{
676 void *data_ptr = NULL;
677 int chunk_size;
678
679 /* last interface was removed */
680 if (max_if_num == 0)
681 goto free_bcast_own;
682
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200683 chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000684 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
Joe Perches320f4222011-08-29 14:17:24 -0700685 if (!data_ptr)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200686 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000687
688 /* copy first part */
689 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
690
691 /* copy second part */
Sven Eckelmann38e3c5f2011-05-14 23:14:49 +0200692 memcpy((char *)data_ptr + del_if_num * chunk_size,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000693 orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
694 (max_if_num - del_if_num) * chunk_size);
695
696free_bcast_own:
697 kfree(orig_node->bcast_own);
698 orig_node->bcast_own = data_ptr;
699
700 if (max_if_num == 0)
701 goto free_own_sum;
702
703 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
Joe Perches320f4222011-08-29 14:17:24 -0700704 if (!data_ptr)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200705 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000706
707 memcpy(data_ptr, orig_node->bcast_own_sum,
708 del_if_num * sizeof(uint8_t));
709
Sven Eckelmann38e3c5f2011-05-14 23:14:49 +0200710 memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000711 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
712 (max_if_num - del_if_num) * sizeof(uint8_t));
713
714free_own_sum:
715 kfree(orig_node->bcast_own_sum);
716 orig_node->bcast_own_sum = data_ptr;
717
718 return 0;
719}
720
Sven Eckelmann56303d32012-06-05 22:31:31 +0200721int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
722 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000723{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200724 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200725 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000726 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200727 struct batadv_hard_iface *hard_iface_tmp;
728 struct batadv_orig_node *orig_node;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200729 uint32_t i;
730 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000731
732 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200733 * if_num
734 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000735 for (i = 0; i < hash->size; i++) {
736 head = &hash->table[i];
737
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000738 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -0800739 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000740 spin_lock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200741 ret = batadv_orig_node_del_if(orig_node, max_if_num,
742 hard_iface->if_num);
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000743 spin_unlock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000744
Sven Eckelmann5346c352012-05-05 13:27:28 +0200745 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000746 goto err;
747 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000748 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000749 }
750
751 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
752 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200753 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200754 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000755 continue;
756
Marek Lindnere6c10f42011-02-18 12:33:20 +0000757 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000758 continue;
759
Marek Lindnere6c10f42011-02-18 12:33:20 +0000760 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000761 continue;
762
Marek Lindnere6c10f42011-02-18 12:33:20 +0000763 if (hard_iface_tmp->if_num > hard_iface->if_num)
764 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000765 }
766 rcu_read_unlock();
767
Marek Lindnere6c10f42011-02-18 12:33:20 +0000768 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000769 return 0;
770
771err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000772 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000773 return -ENOMEM;
774}