blob: ecef827ae28f5d1437eea25cb164eed200eec4dc [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann567db7b2012-01-01 00:41:38 +01002 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
Antonio Quartulli35c133a2012-03-14 13:03:01 +01004 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "translation-table.h"
24#include "soft-interface.h"
Marek Lindner32ae9b22011-04-20 15:40:58 +020025#include "hard-interface.h"
Antonio Quartullia73105b2011-04-27 14:27:44 +020026#include "send.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000027#include "hash.h"
28#include "originator.h"
Antonio Quartullia73105b2011-04-27 14:27:44 +020029#include "routing.h"
Simon Wunderlich20ff9d52012-01-22 20:00:23 +010030#include "bridge_loop_avoidance.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000031
Antonio Quartullia73105b2011-04-27 14:27:44 +020032#include <linux/crc16.h>
33
Sven Eckelmannde7aae62012-02-05 18:55:22 +010034static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
35 struct orig_node *orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +020036static void tt_purge(struct work_struct *work);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +020037static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000038
Marek Lindner7aadf882011-02-18 12:28:09 +000039/* returns 1 if they are the same mac addr */
Antonio Quartulli48100ba2011-10-30 12:17:33 +010040static int compare_tt(const struct hlist_node *node, const void *data2)
Marek Lindner7aadf882011-02-18 12:28:09 +000041{
Antonio Quartulli48100ba2011-10-30 12:17:33 +010042 const void *data1 = container_of(node, struct tt_common_entry,
Sven Eckelmann747e4222011-05-14 23:14:50 +020043 hash_entry);
Marek Lindner7aadf882011-02-18 12:28:09 +000044
45 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
46}
47
Antonio Quartullia73105b2011-04-27 14:27:44 +020048static void tt_start_timer(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000049{
Antonio Quartullia73105b2011-04-27 14:27:44 +020050 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
51 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
52 msecs_to_jiffies(5000));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000053}
54
Antonio Quartulli48100ba2011-10-30 12:17:33 +010055static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
56 const void *data)
Marek Lindner7aadf882011-02-18 12:28:09 +000057{
Marek Lindner7aadf882011-02-18 12:28:09 +000058 struct hlist_head *head;
59 struct hlist_node *node;
Antonio Quartulli48100ba2011-10-30 12:17:33 +010060 struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
Antonio Quartullic90681b2011-10-05 17:05:25 +020061 uint32_t index;
Marek Lindner7aadf882011-02-18 12:28:09 +000062
63 if (!hash)
64 return NULL;
65
66 index = choose_orig(data, hash->size);
67 head = &hash->table[index];
68
69 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +010070 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
71 if (!compare_eth(tt_common_entry, data))
Marek Lindner7aadf882011-02-18 12:28:09 +000072 continue;
73
Antonio Quartulli48100ba2011-10-30 12:17:33 +010074 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
Antonio Quartulli7683fdc2011-04-27 14:28:07 +020075 continue;
76
Antonio Quartulli48100ba2011-10-30 12:17:33 +010077 tt_common_entry_tmp = tt_common_entry;
Marek Lindner7aadf882011-02-18 12:28:09 +000078 break;
79 }
80 rcu_read_unlock();
81
Antonio Quartulli48100ba2011-10-30 12:17:33 +010082 return tt_common_entry_tmp;
83}
84
85static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
86 const void *data)
87{
88 struct tt_common_entry *tt_common_entry;
89 struct tt_local_entry *tt_local_entry = NULL;
90
91 tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
92 if (tt_common_entry)
93 tt_local_entry = container_of(tt_common_entry,
94 struct tt_local_entry, common);
95 return tt_local_entry;
Marek Lindner7aadf882011-02-18 12:28:09 +000096}
97
Antonio Quartulli2dafb492011-05-05 08:42:45 +020098static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
Sven Eckelmann747e4222011-05-14 23:14:50 +020099 const void *data)
Marek Lindner7aadf882011-02-18 12:28:09 +0000100{
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100101 struct tt_common_entry *tt_common_entry;
102 struct tt_global_entry *tt_global_entry = NULL;
Marek Lindner7aadf882011-02-18 12:28:09 +0000103
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100104 tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
105 if (tt_common_entry)
106 tt_global_entry = container_of(tt_common_entry,
107 struct tt_global_entry, common);
108 return tt_global_entry;
Marek Lindner7aadf882011-02-18 12:28:09 +0000109
Marek Lindner7aadf882011-02-18 12:28:09 +0000110}
111
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200112static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
113{
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100114 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
115 kfree_rcu(tt_local_entry, common.rcu);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200116}
117
Simon Wunderlich531027f2011-10-19 11:02:25 +0200118static void tt_global_entry_free_rcu(struct rcu_head *rcu)
119{
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100120 struct tt_common_entry *tt_common_entry;
Simon Wunderlich531027f2011-10-19 11:02:25 +0200121 struct tt_global_entry *tt_global_entry;
122
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100123 tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
124 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
125 common);
Simon Wunderlich531027f2011-10-19 11:02:25 +0200126
Simon Wunderlich531027f2011-10-19 11:02:25 +0200127 kfree(tt_global_entry);
128}
129
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200130static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
131{
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200132 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
133 tt_global_del_orig_list(tt_global_entry);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100134 call_rcu(&tt_global_entry->common.rcu,
135 tt_global_entry_free_rcu);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200136 }
137}
138
139static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
140{
141 struct tt_orig_list_entry *orig_entry;
142
143 orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
144 atomic_dec(&orig_entry->orig_node->tt_size);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200145 batadv_orig_node_free_ref(orig_entry->orig_node);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200146 kfree(orig_entry);
147}
148
149static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
150{
151 call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200152}
153
Antonio Quartulliff66c972011-06-30 01:14:00 +0200154static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
155 uint8_t flags)
Antonio Quartullia73105b2011-04-27 14:27:44 +0200156{
157 struct tt_change_node *tt_change_node;
158
159 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
160
161 if (!tt_change_node)
162 return;
163
Antonio Quartulliff66c972011-06-30 01:14:00 +0200164 tt_change_node->change.flags = flags;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200165 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
166
167 spin_lock_bh(&bat_priv->tt_changes_list_lock);
168 /* track the change in the OGMinterval list */
169 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
170 atomic_inc(&bat_priv->tt_local_changes);
171 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
172
173 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
174}
175
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200176int batadv_tt_len(int changes_num)
Antonio Quartullia73105b2011-04-27 14:27:44 +0200177{
178 return changes_num * sizeof(struct tt_change);
179}
180
181static int tt_local_init(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000182{
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200183 if (bat_priv->tt_local_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200184 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000185
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200186 bat_priv->tt_local_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000187
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200188 if (!bat_priv->tt_local_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200189 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190
Sven Eckelmann5346c352012-05-05 13:27:28 +0200191 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000192}
193
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200194void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
195 int ifindex)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000196{
197 struct bat_priv *bat_priv = netdev_priv(soft_iface);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200198 struct tt_local_entry *tt_local_entry = NULL;
199 struct tt_global_entry *tt_global_entry = NULL;
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200200 struct hlist_head *head;
201 struct hlist_node *node;
202 struct tt_orig_list_entry *orig_entry;
Simon Wunderlich80b3f582011-11-02 20:26:45 +0100203 int hash_added;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000204
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200205 tt_local_entry = tt_local_hash_find(bat_priv, addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000206
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200207 if (tt_local_entry) {
208 tt_local_entry->last_seen = jiffies;
Antonio Quartulli521251f2012-01-16 00:36:58 +0100209 /* possibly unset the TT_CLIENT_PENDING flag */
210 tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200211 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000212 }
213
Sven Eckelmann704509b2011-05-14 23:14:54 +0200214 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200215 if (!tt_local_entry)
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200216 goto out;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200217
Antonio Quartullia73105b2011-04-27 14:27:44 +0200218 bat_dbg(DBG_TT, bat_priv,
219 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
220 (uint8_t)atomic_read(&bat_priv->ttvn));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000221
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100222 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
223 tt_local_entry->common.flags = NO_FLAGS;
Sven Eckelmann95638772012-05-12 02:09:31 +0200224 if (batadv_is_wifi_iface(ifindex))
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100225 tt_local_entry->common.flags |= TT_CLIENT_WIFI;
226 atomic_set(&tt_local_entry->common.refcount, 2);
227 tt_local_entry->last_seen = jiffies;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000228
229 /* the batman interface mac address should never be purged */
Marek Lindner39901e72011-02-18 12:28:08 +0000230 if (compare_eth(addr, soft_iface->dev_addr))
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100231 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000232
Antonio Quartullic40ed2b2012-01-06 21:31:33 +0100233 /* The local entry has to be marked as NEW to avoid to send it in
234 * a full table response going out before the next ttvn increment
235 * (consistency check) */
236 tt_local_entry->common.flags |= TT_CLIENT_NEW;
237
Simon Wunderlich80b3f582011-11-02 20:26:45 +0100238 hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
239 &tt_local_entry->common,
240 &tt_local_entry->common.hash_entry);
241
242 if (unlikely(hash_added != 0)) {
243 /* remove the reference for the hash */
244 tt_local_entry_free_ref(tt_local_entry);
245 goto out;
246 }
247
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100248 tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
Antonio Quartulliff66c972011-06-30 01:14:00 +0200249
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000250 /* remove address from global hash if present */
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200251 tt_global_entry = tt_global_hash_find(bat_priv, addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000252
Antonio Quartullicc47f662011-04-27 14:27:57 +0200253 /* Check whether it is a roaming! */
254 if (tt_global_entry) {
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200255 /* These node are probably going to update their tt table */
256 head = &tt_global_entry->orig_list;
257 rcu_read_lock();
258 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
259 orig_entry->orig_node->tt_poss_change = true;
260
261 send_roam_adv(bat_priv, tt_global_entry->common.addr,
262 orig_entry->orig_node);
263 }
264 rcu_read_unlock();
265 /* The global entry has to be marked as ROAMING and
266 * has to be kept for consistency purpose
267 */
David S. Miller220b07e2011-12-16 15:07:28 -0500268 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
Antonio Quartulli03fc3072011-12-04 12:26:50 +0100269 tt_global_entry->roam_at = jiffies;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200270 }
271out:
272 if (tt_local_entry)
273 tt_local_entry_free_ref(tt_local_entry);
274 if (tt_global_entry)
275 tt_global_entry_free_ref(tt_global_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000276}
277
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800278static void tt_realloc_packet_buff(unsigned char **packet_buff,
279 int *packet_buff_len, int min_packet_len,
280 int new_packet_len)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000281{
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800282 unsigned char *new_buff;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000283
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800284 new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
285
286 /* keep old buffer if kmalloc should fail */
287 if (new_buff) {
288 memcpy(new_buff, *packet_buff, min_packet_len);
289 kfree(*packet_buff);
290 *packet_buff = new_buff;
291 *packet_buff_len = new_packet_len;
292 }
293}
294
295static void tt_prepare_packet_buff(struct bat_priv *bat_priv,
296 unsigned char **packet_buff,
297 int *packet_buff_len, int min_packet_len)
298{
299 struct hard_iface *primary_if;
300 int req_len;
301
302 primary_if = primary_if_get_selected(bat_priv);
303
304 req_len = min_packet_len;
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200305 req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800306
307 /* if we have too many changes for one packet don't send any
308 * and wait for the tt table request which will be fragmented
309 */
310 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
311 req_len = min_packet_len;
312
313 tt_realloc_packet_buff(packet_buff, packet_buff_len,
314 min_packet_len, req_len);
315
316 if (primary_if)
317 hardif_free_ref(primary_if);
318}
319
320static int tt_changes_fill_buff(struct bat_priv *bat_priv,
321 unsigned char **packet_buff,
322 int *packet_buff_len, int min_packet_len)
323{
324 struct tt_change_node *entry, *safe;
325 int count = 0, tot_changes = 0, new_len;
326 unsigned char *tt_buff;
327
328 tt_prepare_packet_buff(bat_priv, packet_buff,
329 packet_buff_len, min_packet_len);
330
331 new_len = *packet_buff_len - min_packet_len;
332 tt_buff = *packet_buff + min_packet_len;
333
334 if (new_len > 0)
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200335 tot_changes = new_len / batadv_tt_len(1);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000336
Antonio Quartullia73105b2011-04-27 14:27:44 +0200337 spin_lock_bh(&bat_priv->tt_changes_list_lock);
338 atomic_set(&bat_priv->tt_local_changes, 0);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000339
Antonio Quartullia73105b2011-04-27 14:27:44 +0200340 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
Sven Eckelmann7c64fd92012-02-28 10:55:36 +0100341 list) {
Antonio Quartullia73105b2011-04-27 14:27:44 +0200342 if (count < tot_changes) {
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200343 memcpy(tt_buff + batadv_tt_len(count),
Antonio Quartullia73105b2011-04-27 14:27:44 +0200344 &entry->change, sizeof(struct tt_change));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000345 count++;
346 }
Antonio Quartullia73105b2011-04-27 14:27:44 +0200347 list_del(&entry->list);
348 kfree(entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000349 }
Antonio Quartullia73105b2011-04-27 14:27:44 +0200350 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000351
Antonio Quartullia73105b2011-04-27 14:27:44 +0200352 /* Keep the buffer for possible tt_request */
353 spin_lock_bh(&bat_priv->tt_buff_lock);
354 kfree(bat_priv->tt_buff);
355 bat_priv->tt_buff_len = 0;
356 bat_priv->tt_buff = NULL;
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800357 /* check whether this new OGM has no changes due to size problems */
358 if (new_len > 0) {
359 /* if kmalloc() fails we will reply with the full table
Antonio Quartullia73105b2011-04-27 14:27:44 +0200360 * instead of providing the diff
361 */
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800362 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200363 if (bat_priv->tt_buff) {
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800364 memcpy(bat_priv->tt_buff, tt_buff, new_len);
365 bat_priv->tt_buff_len = new_len;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200366 }
367 }
368 spin_unlock_bh(&bat_priv->tt_buff_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000369
Marek Lindner08ad76e2012-04-23 16:32:55 +0800370 return count;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000371}
372
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200373int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000374{
375 struct net_device *net_dev = (struct net_device *)seq->private;
376 struct bat_priv *bat_priv = netdev_priv(net_dev);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200377 struct hashtable_t *hash = bat_priv->tt_local_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100378 struct tt_common_entry *tt_common_entry;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200379 struct hard_iface *primary_if;
Marek Lindner7aadf882011-02-18 12:28:09 +0000380 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000381 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200382 uint32_t i;
383 int ret = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000384
Marek Lindner32ae9b22011-04-20 15:40:58 +0200385 primary_if = primary_if_get_selected(bat_priv);
386 if (!primary_if) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100387 ret = seq_printf(seq,
388 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
Marek Lindner32ae9b22011-04-20 15:40:58 +0200389 net_dev->name);
390 goto out;
391 }
392
393 if (primary_if->if_status != IF_ACTIVE) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100394 ret = seq_printf(seq,
395 "BATMAN mesh %s disabled - primary interface not active\n",
Marek Lindner32ae9b22011-04-20 15:40:58 +0200396 net_dev->name);
397 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000398 }
399
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100400 seq_printf(seq,
401 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
Antonio Quartullia73105b2011-04-27 14:27:44 +0200402 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000403
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000404 for (i = 0; i < hash->size; i++) {
405 head = &hash->table[i];
406
Marek Lindner7aadf882011-02-18 12:28:09 +0000407 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100408 hlist_for_each_entry_rcu(tt_common_entry, node,
Marek Lindner7aadf882011-02-18 12:28:09 +0000409 head, hash_entry) {
Simon Wunderlichd099c2c2011-10-22 18:15:26 +0200410 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
Sven Eckelmann7c64fd92012-02-28 10:55:36 +0100411 tt_common_entry->addr,
412 (tt_common_entry->flags &
413 TT_CLIENT_ROAM ? 'R' : '.'),
414 (tt_common_entry->flags &
415 TT_CLIENT_NOPURGE ? 'P' : '.'),
416 (tt_common_entry->flags &
417 TT_CLIENT_NEW ? 'N' : '.'),
418 (tt_common_entry->flags &
419 TT_CLIENT_PENDING ? 'X' : '.'),
420 (tt_common_entry->flags &
421 TT_CLIENT_WIFI ? 'W' : '.'));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000422 }
Marek Lindner7aadf882011-02-18 12:28:09 +0000423 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000424 }
Marek Lindner32ae9b22011-04-20 15:40:58 +0200425out:
426 if (primary_if)
427 hardif_free_ref(primary_if);
428 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000429}
430
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200431static void tt_local_set_pending(struct bat_priv *bat_priv,
432 struct tt_local_entry *tt_local_entry,
Antonio Quartullic566dbb2012-01-06 21:31:34 +0100433 uint16_t flags, const char *message)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000434{
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100435 tt_local_event(bat_priv, tt_local_entry->common.addr,
436 tt_local_entry->common.flags | flags);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000437
Antonio Quartulli015758d2011-07-09 17:52:13 +0200438 /* The local client has to be marked as "pending to be removed" but has
439 * to be kept in the table in order to send it in a full table
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200440 * response issued before the net ttvn increment (consistency check) */
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100441 tt_local_entry->common.flags |= TT_CLIENT_PENDING;
Antonio Quartullic566dbb2012-01-06 21:31:34 +0100442
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100443 bat_dbg(DBG_TT, bat_priv,
444 "Local tt entry (%pM) pending to be removed: %s\n",
445 tt_local_entry->common.addr, message);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000446}
447
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200448void batadv_tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
449 const char *message, bool roaming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000450{
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200451 struct tt_local_entry *tt_local_entry = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000452
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200453 tt_local_entry = tt_local_hash_find(bat_priv, addr);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200454 if (!tt_local_entry)
455 goto out;
456
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200457 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
Antonio Quartullic566dbb2012-01-06 21:31:34 +0100458 (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200459out:
460 if (tt_local_entry)
461 tt_local_entry_free_ref(tt_local_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000462}
463
Antonio Quartullia73105b2011-04-27 14:27:44 +0200464static void tt_local_purge(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000465{
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200466 struct hashtable_t *hash = bat_priv->tt_local_hash;
467 struct tt_local_entry *tt_local_entry;
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100468 struct tt_common_entry *tt_common_entry;
Marek Lindner7aadf882011-02-18 12:28:09 +0000469 struct hlist_node *node, *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000470 struct hlist_head *head;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200471 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartullic90681b2011-10-05 17:05:25 +0200472 uint32_t i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000473
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000474 for (i = 0; i < hash->size; i++) {
475 head = &hash->table[i];
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200476 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000477
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200478 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100479 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000480 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100481 tt_local_entry = container_of(tt_common_entry,
482 struct tt_local_entry,
483 common);
484 if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
Marek Lindner7aadf882011-02-18 12:28:09 +0000485 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000486
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200487 /* entry already marked for deletion */
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100488 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200489 continue;
490
Martin Hundebølla04ccd52011-12-08 13:32:41 +0100491 if (!has_timed_out(tt_local_entry->last_seen,
Marek Lindner032b7962011-12-20 19:30:40 +0800492 TT_LOCAL_TIMEOUT))
Marek Lindner7aadf882011-02-18 12:28:09 +0000493 continue;
494
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200495 tt_local_set_pending(bat_priv, tt_local_entry,
Antonio Quartullic566dbb2012-01-06 21:31:34 +0100496 TT_CLIENT_DEL, "timed out");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000497 }
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200498 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000499 }
500
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000501}
502
Antonio Quartullia73105b2011-04-27 14:27:44 +0200503static void tt_local_table_free(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000504{
Antonio Quartullia73105b2011-04-27 14:27:44 +0200505 struct hashtable_t *hash;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200506 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100507 struct tt_common_entry *tt_common_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200508 struct tt_local_entry *tt_local_entry;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200509 struct hlist_node *node, *node_tmp;
510 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200511 uint32_t i;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200512
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200513 if (!bat_priv->tt_local_hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000514 return;
515
Antonio Quartullia73105b2011-04-27 14:27:44 +0200516 hash = bat_priv->tt_local_hash;
517
518 for (i = 0; i < hash->size; i++) {
519 head = &hash->table[i];
520 list_lock = &hash->list_locks[i];
521
522 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100523 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
Antonio Quartullia73105b2011-04-27 14:27:44 +0200524 head, hash_entry) {
525 hlist_del_rcu(node);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100526 tt_local_entry = container_of(tt_common_entry,
527 struct tt_local_entry,
528 common);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200529 tt_local_entry_free_ref(tt_local_entry);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200530 }
531 spin_unlock_bh(list_lock);
532 }
533
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200534 batadv_hash_destroy(hash);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200535
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200536 bat_priv->tt_local_hash = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000537}
538
Antonio Quartullia73105b2011-04-27 14:27:44 +0200539static int tt_global_init(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000540{
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200541 if (bat_priv->tt_global_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200542 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000543
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200544 bat_priv->tt_global_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000545
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200546 if (!bat_priv->tt_global_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200547 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000548
Sven Eckelmann5346c352012-05-05 13:27:28 +0200549 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000550}
551
Antonio Quartullia73105b2011-04-27 14:27:44 +0200552static void tt_changes_list_free(struct bat_priv *bat_priv)
553{
554 struct tt_change_node *entry, *safe;
555
556 spin_lock_bh(&bat_priv->tt_changes_list_lock);
557
558 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
559 list) {
560 list_del(&entry->list);
561 kfree(entry);
562 }
563
564 atomic_set(&bat_priv->tt_local_changes, 0);
565 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
566}
567
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200568/* find out if an orig_node is already in the list of a tt_global_entry.
569 * returns 1 if found, 0 otherwise
570 */
571static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
572 const struct orig_node *orig_node)
573{
574 struct tt_orig_list_entry *tmp_orig_entry;
575 const struct hlist_head *head;
576 struct hlist_node *node;
577 bool found = false;
578
579 rcu_read_lock();
580 head = &entry->orig_list;
581 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
582 if (tmp_orig_entry->orig_node == orig_node) {
583 found = true;
584 break;
585 }
586 }
587 rcu_read_unlock();
588 return found;
589}
590
591static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
592 struct orig_node *orig_node,
593 int ttvn)
594{
595 struct tt_orig_list_entry *orig_entry;
596
597 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
598 if (!orig_entry)
599 return;
600
601 INIT_HLIST_NODE(&orig_entry->list);
602 atomic_inc(&orig_node->refcount);
603 atomic_inc(&orig_node->tt_size);
604 orig_entry->orig_node = orig_node;
605 orig_entry->ttvn = ttvn;
606
607 spin_lock_bh(&tt_global_entry->list_lock);
608 hlist_add_head_rcu(&orig_entry->list,
609 &tt_global_entry->orig_list);
610 spin_unlock_bh(&tt_global_entry->list_lock);
611}
612
Antonio Quartullia73105b2011-04-27 14:27:44 +0200613/* caller must hold orig_node refcount */
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200614int batadv_tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
615 const unsigned char *tt_addr, uint8_t ttvn,
616 bool roaming, bool wifi)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000617{
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200618 struct tt_global_entry *tt_global_entry = NULL;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200619 int ret = 0;
Simon Wunderlich80b3f582011-11-02 20:26:45 +0100620 int hash_added;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000621
Antonio Quartullia73105b2011-04-27 14:27:44 +0200622 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000623
Antonio Quartullia73105b2011-04-27 14:27:44 +0200624 if (!tt_global_entry) {
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200625 tt_global_entry = kzalloc(sizeof(*tt_global_entry),
626 GFP_ATOMIC);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200627 if (!tt_global_entry)
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200628 goto out;
629
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100630 memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200631
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100632 tt_global_entry->common.flags = NO_FLAGS;
Antonio Quartullicc47f662011-04-27 14:27:57 +0200633 tt_global_entry->roam_at = 0;
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200634 atomic_set(&tt_global_entry->common.refcount, 2);
635
636 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
637 spin_lock_init(&tt_global_entry->list_lock);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200638
Simon Wunderlich80b3f582011-11-02 20:26:45 +0100639 hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
640 choose_orig, &tt_global_entry->common,
641 &tt_global_entry->common.hash_entry);
642
643 if (unlikely(hash_added != 0)) {
644 /* remove the reference for the hash */
645 tt_global_entry_free_ref(tt_global_entry);
646 goto out_remove;
647 }
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200648
649 tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200650 } else {
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200651 /* there is already a global entry, use this one. */
652
653 /* If there is the TT_CLIENT_ROAM flag set, there is only one
654 * originator left in the list and we previously received a
655 * delete + roaming change for this originator.
656 *
657 * We should first delete the old originator before adding the
658 * new one.
659 */
660 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
661 tt_global_del_orig_list(tt_global_entry);
662 tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
663 tt_global_entry->roam_at = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000664 }
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200665
666 if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
667 tt_global_add_orig_entry(tt_global_entry, orig_node,
668 ttvn);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000669 }
Antonio Quartullia73105b2011-04-27 14:27:44 +0200670
Antonio Quartullibc279082011-07-07 15:35:35 +0200671 if (wifi)
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100672 tt_global_entry->common.flags |= TT_CLIENT_WIFI;
Antonio Quartullibc279082011-07-07 15:35:35 +0200673
Antonio Quartullia73105b2011-04-27 14:27:44 +0200674 bat_dbg(DBG_TT, bat_priv,
675 "Creating new global tt entry: %pM (via %pM)\n",
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100676 tt_global_entry->common.addr, orig_node->orig);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200677
Simon Wunderlich80b3f582011-11-02 20:26:45 +0100678out_remove:
Antonio Quartullia73105b2011-04-27 14:27:44 +0200679 /* remove address from local hash if present */
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200680 batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
681 "global tt received", roaming);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200682 ret = 1;
683out:
684 if (tt_global_entry)
685 tt_global_entry_free_ref(tt_global_entry);
686 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000687}
688
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200689/* print all orig nodes who announce the address for this global entry.
690 * it is assumed that the caller holds rcu_read_lock();
691 */
692static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
693 struct seq_file *seq)
694{
695 struct hlist_head *head;
696 struct hlist_node *node;
697 struct tt_orig_list_entry *orig_entry;
698 struct tt_common_entry *tt_common_entry;
699 uint16_t flags;
700 uint8_t last_ttvn;
701
702 tt_common_entry = &tt_global_entry->common;
703
704 head = &tt_global_entry->orig_list;
705
706 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
707 flags = tt_common_entry->flags;
708 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
709 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
710 tt_global_entry->common.addr, orig_entry->ttvn,
711 orig_entry->orig_node->orig, last_ttvn,
712 (flags & TT_CLIENT_ROAM ? 'R' : '.'),
713 (flags & TT_CLIENT_WIFI ? 'W' : '.'));
714 }
715}
716
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200717int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000718{
719 struct net_device *net_dev = (struct net_device *)seq->private;
720 struct bat_priv *bat_priv = netdev_priv(net_dev);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200721 struct hashtable_t *hash = bat_priv->tt_global_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100722 struct tt_common_entry *tt_common_entry;
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200723 struct tt_global_entry *tt_global_entry;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200724 struct hard_iface *primary_if;
Marek Lindner7aadf882011-02-18 12:28:09 +0000725 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000726 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200727 uint32_t i;
728 int ret = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000729
Marek Lindner32ae9b22011-04-20 15:40:58 +0200730 primary_if = primary_if_get_selected(bat_priv);
731 if (!primary_if) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100732 ret = seq_printf(seq,
733 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
Marek Lindner32ae9b22011-04-20 15:40:58 +0200734 net_dev->name);
735 goto out;
736 }
737
738 if (primary_if->if_status != IF_ACTIVE) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100739 ret = seq_printf(seq,
740 "BATMAN mesh %s disabled - primary interface not active\n",
Marek Lindner32ae9b22011-04-20 15:40:58 +0200741 net_dev->name);
742 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000743 }
744
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200745 seq_printf(seq,
746 "Globally announced TT entries received via the mesh %s\n",
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000747 net_dev->name);
Antonio Quartullidf6edb92011-07-07 15:35:38 +0200748 seq_printf(seq, " %-13s %s %-15s %s %s\n",
749 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000750
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000751 for (i = 0; i < hash->size; i++) {
752 head = &hash->table[i];
753
Marek Lindner7aadf882011-02-18 12:28:09 +0000754 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100755 hlist_for_each_entry_rcu(tt_common_entry, node,
Marek Lindner7aadf882011-02-18 12:28:09 +0000756 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100757 tt_global_entry = container_of(tt_common_entry,
758 struct tt_global_entry,
759 common);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200760 tt_global_print_entry(tt_global_entry, seq);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000761 }
Marek Lindner7aadf882011-02-18 12:28:09 +0000762 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000763 }
Marek Lindner32ae9b22011-04-20 15:40:58 +0200764out:
765 if (primary_if)
766 hardif_free_ref(primary_if);
767 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000768}
769
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200770/* deletes the orig list of a tt_global_entry */
771static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000772{
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200773 struct hlist_head *head;
774 struct hlist_node *node, *safe;
775 struct tt_orig_list_entry *orig_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200776
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200777 spin_lock_bh(&tt_global_entry->list_lock);
778 head = &tt_global_entry->orig_list;
779 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
780 hlist_del_rcu(node);
781 tt_orig_list_entry_free_ref(orig_entry);
782 }
783 spin_unlock_bh(&tt_global_entry->list_lock);
784
785}
786
787static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
788 struct tt_global_entry *tt_global_entry,
789 struct orig_node *orig_node,
790 const char *message)
791{
792 struct hlist_head *head;
793 struct hlist_node *node, *safe;
794 struct tt_orig_list_entry *orig_entry;
795
796 spin_lock_bh(&tt_global_entry->list_lock);
797 head = &tt_global_entry->orig_list;
798 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
799 if (orig_entry->orig_node == orig_node) {
800 bat_dbg(DBG_TT, bat_priv,
801 "Deleting %pM from global tt entry %pM: %s\n",
802 orig_node->orig, tt_global_entry->common.addr,
803 message);
804 hlist_del_rcu(node);
805 tt_orig_list_entry_free_ref(orig_entry);
806 }
807 }
808 spin_unlock_bh(&tt_global_entry->list_lock);
809}
810
811static void tt_global_del_struct(struct bat_priv *bat_priv,
812 struct tt_global_entry *tt_global_entry,
813 const char *message)
814{
Antonio Quartullia73105b2011-04-27 14:27:44 +0200815 bat_dbg(DBG_TT, bat_priv,
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200816 "Deleting global tt entry %pM: %s\n",
817 tt_global_entry->common.addr, message);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200818
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100819 hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
820 tt_global_entry->common.addr);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200821 tt_global_entry_free_ref(tt_global_entry);
822
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000823}
824
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200825/* If the client is to be deleted, we check if it is the last origantor entry
826 * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
827 * otherwise we simply remove the originator scheduled for deletion.
828 */
829static void tt_global_del_roaming(struct bat_priv *bat_priv,
830 struct tt_global_entry *tt_global_entry,
831 struct orig_node *orig_node,
832 const char *message)
833{
834 bool last_entry = true;
835 struct hlist_head *head;
836 struct hlist_node *node;
837 struct tt_orig_list_entry *orig_entry;
838
839 /* no local entry exists, case 1:
840 * Check if this is the last one or if other entries exist.
841 */
842
843 rcu_read_lock();
844 head = &tt_global_entry->orig_list;
845 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
846 if (orig_entry->orig_node != orig_node) {
847 last_entry = false;
848 break;
849 }
850 }
851 rcu_read_unlock();
852
853 if (last_entry) {
854 /* its the last one, mark for roaming. */
855 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
856 tt_global_entry->roam_at = jiffies;
857 } else
858 /* there is another entry, we can simply delete this
859 * one and can still use the other one.
860 */
861 tt_global_del_orig_entry(bat_priv, tt_global_entry,
862 orig_node, message);
863}
864
865
866
Sven Eckelmannde7aae62012-02-05 18:55:22 +0100867static void tt_global_del(struct bat_priv *bat_priv,
868 struct orig_node *orig_node,
869 const unsigned char *addr,
870 const char *message, bool roaming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000871{
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200872 struct tt_global_entry *tt_global_entry = NULL;
Antonio Quartulli797399b2011-12-04 22:38:27 +0100873 struct tt_local_entry *tt_local_entry = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000874
Antonio Quartullia73105b2011-04-27 14:27:44 +0200875 tt_global_entry = tt_global_hash_find(bat_priv, addr);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200876 if (!tt_global_entry)
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200877 goto out;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200878
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200879 if (!roaming) {
880 tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
881 message);
Sven Eckelmann92f90f52011-12-22 20:31:12 +0800882
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200883 if (hlist_empty(&tt_global_entry->orig_list))
884 tt_global_del_struct(bat_priv, tt_global_entry,
885 message);
886
Sven Eckelmann92f90f52011-12-22 20:31:12 +0800887 goto out;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200888 }
Sven Eckelmann92f90f52011-12-22 20:31:12 +0800889
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200890 /* if we are deleting a global entry due to a roam
891 * event, there are two possibilities:
892 * 1) the client roamed from node A to node B => if there
893 * is only one originator left for this client, we mark
894 * it with TT_CLIENT_ROAM, we start a timer and we
895 * wait for node B to claim it. In case of timeout
896 * the entry is purged.
897 *
898 * If there are other originators left, we directly delete
899 * the originator.
900 * 2) the client roamed to us => we can directly delete
901 * the global entry, since it is useless now. */
902
903 tt_local_entry = tt_local_hash_find(bat_priv,
904 tt_global_entry->common.addr);
905 if (tt_local_entry) {
906 /* local entry exists, case 2: client roamed to us. */
907 tt_global_del_orig_list(tt_global_entry);
908 tt_global_del_struct(bat_priv, tt_global_entry, message);
909 } else
910 /* no local entry exists, case 1: check for roaming */
911 tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
912 message);
913
Sven Eckelmann92f90f52011-12-22 20:31:12 +0800914
Antonio Quartullicc47f662011-04-27 14:27:57 +0200915out:
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200916 if (tt_global_entry)
917 tt_global_entry_free_ref(tt_global_entry);
Antonio Quartulli797399b2011-12-04 22:38:27 +0100918 if (tt_local_entry)
919 tt_local_entry_free_ref(tt_local_entry);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200920}
921
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200922void batadv_tt_global_del_orig(struct bat_priv *bat_priv,
923 struct orig_node *orig_node, const char *message)
Antonio Quartullia73105b2011-04-27 14:27:44 +0200924{
925 struct tt_global_entry *tt_global_entry;
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100926 struct tt_common_entry *tt_common_entry;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200927 uint32_t i;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200928 struct hashtable_t *hash = bat_priv->tt_global_hash;
929 struct hlist_node *node, *safe;
930 struct hlist_head *head;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200931 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartullia73105b2011-04-27 14:27:44 +0200932
Simon Wunderlich6e801492011-10-19 10:28:26 +0200933 if (!hash)
934 return;
935
Antonio Quartullia73105b2011-04-27 14:27:44 +0200936 for (i = 0; i < hash->size; i++) {
937 head = &hash->table[i];
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200938 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000939
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200940 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100941 hlist_for_each_entry_safe(tt_common_entry, node, safe,
Sven Eckelmann7c64fd92012-02-28 10:55:36 +0100942 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100943 tt_global_entry = container_of(tt_common_entry,
944 struct tt_global_entry,
945 common);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200946
947 tt_global_del_orig_entry(bat_priv, tt_global_entry,
948 orig_node, message);
949
950 if (hlist_empty(&tt_global_entry->orig_list)) {
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200951 bat_dbg(DBG_TT, bat_priv,
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200952 "Deleting global tt entry %pM: %s\n",
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100953 tt_global_entry->common.addr,
Antonio Quartulli87944972011-09-19 12:29:19 +0200954 message);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200955 hlist_del_rcu(node);
956 tt_global_entry_free_ref(tt_global_entry);
957 }
Antonio Quartullia73105b2011-04-27 14:27:44 +0200958 }
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200959 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000960 }
Antonio Quartullia73105b2011-04-27 14:27:44 +0200961 atomic_set(&orig_node->tt_size, 0);
Antonio Quartulli17071572011-11-07 16:36:40 +0100962 orig_node->tt_initialised = false;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000963}
964
Antonio Quartullicc47f662011-04-27 14:27:57 +0200965static void tt_global_roam_purge(struct bat_priv *bat_priv)
966{
967 struct hashtable_t *hash = bat_priv->tt_global_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100968 struct tt_common_entry *tt_common_entry;
Antonio Quartullicc47f662011-04-27 14:27:57 +0200969 struct tt_global_entry *tt_global_entry;
970 struct hlist_node *node, *node_tmp;
971 struct hlist_head *head;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200972 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartullic90681b2011-10-05 17:05:25 +0200973 uint32_t i;
Antonio Quartullicc47f662011-04-27 14:27:57 +0200974
Antonio Quartullicc47f662011-04-27 14:27:57 +0200975 for (i = 0; i < hash->size; i++) {
976 head = &hash->table[i];
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200977 list_lock = &hash->list_locks[i];
Antonio Quartullicc47f662011-04-27 14:27:57 +0200978
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200979 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100980 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
Antonio Quartullicc47f662011-04-27 14:27:57 +0200981 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100982 tt_global_entry = container_of(tt_common_entry,
983 struct tt_global_entry,
984 common);
985 if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
Antonio Quartullicc47f662011-04-27 14:27:57 +0200986 continue;
Martin Hundebølla04ccd52011-12-08 13:32:41 +0100987 if (!has_timed_out(tt_global_entry->roam_at,
Marek Lindner032b7962011-12-20 19:30:40 +0800988 TT_CLIENT_ROAM_TIMEOUT))
Antonio Quartullicc47f662011-04-27 14:27:57 +0200989 continue;
990
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100991 bat_dbg(DBG_TT, bat_priv,
992 "Deleting global tt entry (%pM): Roaming timeout\n",
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100993 tt_global_entry->common.addr);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200994
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200995 hlist_del_rcu(node);
996 tt_global_entry_free_ref(tt_global_entry);
Antonio Quartullicc47f662011-04-27 14:27:57 +0200997 }
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200998 spin_unlock_bh(list_lock);
Antonio Quartullicc47f662011-04-27 14:27:57 +0200999 }
1000
Antonio Quartullicc47f662011-04-27 14:27:57 +02001001}
1002
Antonio Quartullia73105b2011-04-27 14:27:44 +02001003static void tt_global_table_free(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001004{
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001005 struct hashtable_t *hash;
1006 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001007 struct tt_common_entry *tt_common_entry;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001008 struct tt_global_entry *tt_global_entry;
1009 struct hlist_node *node, *node_tmp;
1010 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001011 uint32_t i;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001012
Antonio Quartulli2dafb492011-05-05 08:42:45 +02001013 if (!bat_priv->tt_global_hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001014 return;
1015
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001016 hash = bat_priv->tt_global_hash;
1017
1018 for (i = 0; i < hash->size; i++) {
1019 head = &hash->table[i];
1020 list_lock = &hash->list_locks[i];
1021
1022 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001023 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001024 head, hash_entry) {
1025 hlist_del_rcu(node);
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001026 tt_global_entry = container_of(tt_common_entry,
1027 struct tt_global_entry,
1028 common);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001029 tt_global_entry_free_ref(tt_global_entry);
1030 }
1031 spin_unlock_bh(list_lock);
1032 }
1033
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +02001034 batadv_hash_destroy(hash);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001035
Antonio Quartulli2dafb492011-05-05 08:42:45 +02001036 bat_priv->tt_global_hash = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001037}
1038
Antonio Quartulli59b699c2011-07-07 15:35:36 +02001039static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
1040 struct tt_global_entry *tt_global_entry)
1041{
1042 bool ret = false;
1043
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001044 if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
1045 tt_global_entry->common.flags & TT_CLIENT_WIFI)
Antonio Quartulli59b699c2011-07-07 15:35:36 +02001046 ret = true;
1047
1048 return ret;
1049}
1050
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001051struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv,
1052 const uint8_t *src,
1053 const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001054{
Antonio Quartulli3d393e42011-07-07 15:35:37 +02001055 struct tt_local_entry *tt_local_entry = NULL;
1056 struct tt_global_entry *tt_global_entry = NULL;
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001057 struct orig_node *orig_node = NULL;
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001058 struct neigh_node *router = NULL;
1059 struct hlist_head *head;
1060 struct hlist_node *node;
1061 struct tt_orig_list_entry *orig_entry;
1062 int best_tq;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001063
Antonio Quartulli3d393e42011-07-07 15:35:37 +02001064 if (src && atomic_read(&bat_priv->ap_isolation)) {
1065 tt_local_entry = tt_local_hash_find(bat_priv, src);
1066 if (!tt_local_entry)
1067 goto out;
1068 }
Marek Lindner7aadf882011-02-18 12:28:09 +00001069
Antonio Quartulli3d393e42011-07-07 15:35:37 +02001070 tt_global_entry = tt_global_hash_find(bat_priv, addr);
Antonio Quartulli2dafb492011-05-05 08:42:45 +02001071 if (!tt_global_entry)
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001072 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001073
Antonio Quartulli3d393e42011-07-07 15:35:37 +02001074 /* check whether the clients should not communicate due to AP
1075 * isolation */
1076 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
1077 goto out;
1078
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001079 best_tq = 0;
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001080
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001081 rcu_read_lock();
1082 head = &tt_global_entry->orig_list;
1083 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001084 router = batadv_orig_node_get_router(orig_entry->orig_node);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001085 if (!router)
1086 continue;
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001087
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001088 if (router->tq_avg > best_tq) {
1089 orig_node = orig_entry->orig_node;
1090 best_tq = router->tq_avg;
1091 }
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001092 batadv_neigh_node_free_ref(router);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001093 }
1094 /* found anything? */
1095 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1096 orig_node = NULL;
1097 rcu_read_unlock();
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001098out:
Antonio Quartulli3d393e42011-07-07 15:35:37 +02001099 if (tt_global_entry)
1100 tt_global_entry_free_ref(tt_global_entry);
1101 if (tt_local_entry)
1102 tt_local_entry_free_ref(tt_local_entry);
1103
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001104 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001105}
Antonio Quartullia73105b2011-04-27 14:27:44 +02001106
1107/* Calculates the checksum of the local table of a given orig_node */
Sven Eckelmannde7aae62012-02-05 18:55:22 +01001108static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1109 struct orig_node *orig_node)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001110{
1111 uint16_t total = 0, total_one;
1112 struct hashtable_t *hash = bat_priv->tt_global_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001113 struct tt_common_entry *tt_common_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001114 struct tt_global_entry *tt_global_entry;
1115 struct hlist_node *node;
1116 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001117 uint32_t i;
1118 int j;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001119
1120 for (i = 0; i < hash->size; i++) {
1121 head = &hash->table[i];
1122
1123 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001124 hlist_for_each_entry_rcu(tt_common_entry, node,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001125 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001126 tt_global_entry = container_of(tt_common_entry,
1127 struct tt_global_entry,
1128 common);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001129 /* Roaming clients are in the global table for
1130 * consistency only. They don't have to be
1131 * taken into account while computing the
1132 * global crc
1133 */
1134 if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
1135 continue;
1136
1137 /* find out if this global entry is announced by this
1138 * originator
1139 */
1140 if (!tt_global_entry_has_orig(tt_global_entry,
1141 orig_node))
1142 continue;
1143
1144 total_one = 0;
1145 for (j = 0; j < ETH_ALEN; j++)
1146 total_one = crc16_byte(total_one,
1147 tt_global_entry->common.addr[j]);
1148 total ^= total_one;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001149 }
1150 rcu_read_unlock();
1151 }
1152
1153 return total;
1154}
1155
1156/* Calculates the checksum of the local table */
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +08001157static uint16_t batadv_tt_local_crc(struct bat_priv *bat_priv)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001158{
1159 uint16_t total = 0, total_one;
1160 struct hashtable_t *hash = bat_priv->tt_local_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001161 struct tt_common_entry *tt_common_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001162 struct hlist_node *node;
1163 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001164 uint32_t i;
1165 int j;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001166
1167 for (i = 0; i < hash->size; i++) {
1168 head = &hash->table[i];
1169
1170 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001171 hlist_for_each_entry_rcu(tt_common_entry, node,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001172 head, hash_entry) {
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001173 /* not yet committed clients have not to be taken into
1174 * account while computing the CRC */
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001175 if (tt_common_entry->flags & TT_CLIENT_NEW)
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001176 continue;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001177 total_one = 0;
1178 for (j = 0; j < ETH_ALEN; j++)
1179 total_one = crc16_byte(total_one,
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001180 tt_common_entry->addr[j]);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001181 total ^= total_one;
1182 }
Antonio Quartullia73105b2011-04-27 14:27:44 +02001183 rcu_read_unlock();
1184 }
1185
1186 return total;
1187}
1188
1189static void tt_req_list_free(struct bat_priv *bat_priv)
1190{
1191 struct tt_req_node *node, *safe;
1192
1193 spin_lock_bh(&bat_priv->tt_req_list_lock);
1194
1195 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1196 list_del(&node->list);
1197 kfree(node);
1198 }
1199
1200 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1201}
1202
Sven Eckelmannde7aae62012-02-05 18:55:22 +01001203static void tt_save_orig_buffer(struct bat_priv *bat_priv,
1204 struct orig_node *orig_node,
1205 const unsigned char *tt_buff,
1206 uint8_t tt_num_changes)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001207{
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001208 uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001209
1210 /* Replace the old buffer only if I received something in the
1211 * last OGM (the OGM could carry no changes) */
1212 spin_lock_bh(&orig_node->tt_buff_lock);
1213 if (tt_buff_len > 0) {
1214 kfree(orig_node->tt_buff);
1215 orig_node->tt_buff_len = 0;
1216 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1217 if (orig_node->tt_buff) {
1218 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1219 orig_node->tt_buff_len = tt_buff_len;
1220 }
1221 }
1222 spin_unlock_bh(&orig_node->tt_buff_lock);
1223}
1224
1225static void tt_req_purge(struct bat_priv *bat_priv)
1226{
1227 struct tt_req_node *node, *safe;
1228
1229 spin_lock_bh(&bat_priv->tt_req_list_lock);
1230 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
Marek Lindner032b7962011-12-20 19:30:40 +08001231 if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
Antonio Quartullia73105b2011-04-27 14:27:44 +02001232 list_del(&node->list);
1233 kfree(node);
1234 }
1235 }
1236 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1237}
1238
1239/* returns the pointer to the new tt_req_node struct if no request
1240 * has already been issued for this orig_node, NULL otherwise */
1241static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
1242 struct orig_node *orig_node)
1243{
1244 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1245
1246 spin_lock_bh(&bat_priv->tt_req_list_lock);
1247 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
1248 if (compare_eth(tt_req_node_tmp, orig_node) &&
Martin Hundebølla04ccd52011-12-08 13:32:41 +01001249 !has_timed_out(tt_req_node_tmp->issued_at,
Marek Lindner032b7962011-12-20 19:30:40 +08001250 TT_REQUEST_TIMEOUT))
Antonio Quartullia73105b2011-04-27 14:27:44 +02001251 goto unlock;
1252 }
1253
1254 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1255 if (!tt_req_node)
1256 goto unlock;
1257
1258 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1259 tt_req_node->issued_at = jiffies;
1260
1261 list_add(&tt_req_node->list, &bat_priv->tt_req_list);
1262unlock:
1263 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1264 return tt_req_node;
1265}
1266
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001267/* data_ptr is useless here, but has to be kept to respect the prototype */
1268static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
1269{
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001270 const struct tt_common_entry *tt_common_entry = entry_ptr;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001271
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001272 if (tt_common_entry->flags & TT_CLIENT_NEW)
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001273 return 0;
1274 return 1;
1275}
1276
Antonio Quartullia73105b2011-04-27 14:27:44 +02001277static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
1278{
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001279 const struct tt_common_entry *tt_common_entry = entry_ptr;
1280 const struct tt_global_entry *tt_global_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001281 const struct orig_node *orig_node = data_ptr;
1282
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001283 if (tt_common_entry->flags & TT_CLIENT_ROAM)
Antonio Quartullicc47f662011-04-27 14:27:57 +02001284 return 0;
1285
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001286 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1287 common);
1288
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001289 return tt_global_entry_has_orig(tt_global_entry, orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001290}
1291
1292static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1293 struct hashtable_t *hash,
1294 struct hard_iface *primary_if,
1295 int (*valid_cb)(const void *,
1296 const void *),
1297 void *cb_data)
1298{
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001299 struct tt_common_entry *tt_common_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001300 struct tt_query_packet *tt_response;
1301 struct tt_change *tt_change;
1302 struct hlist_node *node;
1303 struct hlist_head *head;
1304 struct sk_buff *skb = NULL;
1305 uint16_t tt_tot, tt_count;
1306 ssize_t tt_query_size = sizeof(struct tt_query_packet);
Antonio Quartullic90681b2011-10-05 17:05:25 +02001307 uint32_t i;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001308
1309 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1310 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1311 tt_len -= tt_len % sizeof(struct tt_change);
1312 }
1313 tt_tot = tt_len / sizeof(struct tt_change);
1314
1315 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
1316 if (!skb)
1317 goto out;
1318
1319 skb_reserve(skb, ETH_HLEN);
1320 tt_response = (struct tt_query_packet *)skb_put(skb,
1321 tt_query_size + tt_len);
1322 tt_response->ttvn = ttvn;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001323
1324 tt_change = (struct tt_change *)(skb->data + tt_query_size);
1325 tt_count = 0;
1326
1327 rcu_read_lock();
1328 for (i = 0; i < hash->size; i++) {
1329 head = &hash->table[i];
1330
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001331 hlist_for_each_entry_rcu(tt_common_entry, node,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001332 head, hash_entry) {
1333 if (tt_count == tt_tot)
1334 break;
1335
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001336 if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
Antonio Quartullia73105b2011-04-27 14:27:44 +02001337 continue;
1338
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001339 memcpy(tt_change->addr, tt_common_entry->addr,
1340 ETH_ALEN);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001341 tt_change->flags = NO_FLAGS;
1342
1343 tt_count++;
1344 tt_change++;
1345 }
1346 }
1347 rcu_read_unlock();
1348
Antonio Quartulli9d852392011-10-17 14:25:13 +02001349 /* store in the message the number of entries we have successfully
1350 * copied */
1351 tt_response->tt_data = htons(tt_count);
1352
Antonio Quartullia73105b2011-04-27 14:27:44 +02001353out:
1354 return skb;
1355}
1356
Marek Lindnera943cac2011-07-30 13:10:18 +02001357static int send_tt_request(struct bat_priv *bat_priv,
1358 struct orig_node *dst_orig_node,
1359 uint8_t ttvn, uint16_t tt_crc, bool full_table)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001360{
1361 struct sk_buff *skb = NULL;
1362 struct tt_query_packet *tt_request;
1363 struct neigh_node *neigh_node = NULL;
1364 struct hard_iface *primary_if;
1365 struct tt_req_node *tt_req_node = NULL;
1366 int ret = 1;
1367
1368 primary_if = primary_if_get_selected(bat_priv);
1369 if (!primary_if)
1370 goto out;
1371
1372 /* The new tt_req will be issued only if I'm not waiting for a
1373 * reply from the same orig_node yet */
1374 tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
1375 if (!tt_req_node)
1376 goto out;
1377
1378 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1379 if (!skb)
1380 goto out;
1381
1382 skb_reserve(skb, ETH_HLEN);
1383
1384 tt_request = (struct tt_query_packet *)skb_put(skb,
1385 sizeof(struct tt_query_packet));
1386
Sven Eckelmann76543d12011-11-20 15:47:38 +01001387 tt_request->header.packet_type = BAT_TT_QUERY;
1388 tt_request->header.version = COMPAT_VERSION;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001389 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1390 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
Sven Eckelmann76543d12011-11-20 15:47:38 +01001391 tt_request->header.ttl = TTL;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001392 tt_request->ttvn = ttvn;
Antonio Quartulli6d2003f2012-04-14 13:15:27 +02001393 tt_request->tt_data = htons(tt_crc);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001394 tt_request->flags = TT_REQUEST;
1395
1396 if (full_table)
1397 tt_request->flags |= TT_FULL_TABLE;
1398
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001399 neigh_node = batadv_orig_node_get_router(dst_orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001400 if (!neigh_node)
1401 goto out;
1402
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001403 bat_dbg(DBG_TT, bat_priv,
1404 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1405 dst_orig_node->orig, neigh_node->addr,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001406 (full_table ? 'F' : '.'));
1407
Martin Hundebøllf8214862012-04-20 17:02:45 +02001408 batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_TX);
1409
Sven Eckelmann9455e342012-05-12 02:09:37 +02001410 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001411 ret = 0;
1412
1413out:
1414 if (neigh_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001415 batadv_neigh_node_free_ref(neigh_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001416 if (primary_if)
1417 hardif_free_ref(primary_if);
1418 if (ret)
1419 kfree_skb(skb);
1420 if (ret && tt_req_node) {
1421 spin_lock_bh(&bat_priv->tt_req_list_lock);
1422 list_del(&tt_req_node->list);
1423 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1424 kfree(tt_req_node);
1425 }
1426 return ret;
1427}
1428
1429static bool send_other_tt_response(struct bat_priv *bat_priv,
1430 struct tt_query_packet *tt_request)
1431{
1432 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1433 struct neigh_node *neigh_node = NULL;
1434 struct hard_iface *primary_if = NULL;
1435 uint8_t orig_ttvn, req_ttvn, ttvn;
1436 int ret = false;
1437 unsigned char *tt_buff;
1438 bool full_table;
1439 uint16_t tt_len, tt_tot;
1440 struct sk_buff *skb = NULL;
1441 struct tt_query_packet *tt_response;
1442
1443 bat_dbg(DBG_TT, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001444 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1445 tt_request->src, tt_request->ttvn, tt_request->dst,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001446 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1447
1448 /* Let's get the orig node of the REAL destination */
Antonio Quartullieb7e2a12011-10-12 14:54:50 +02001449 req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001450 if (!req_dst_orig_node)
1451 goto out;
1452
Antonio Quartullieb7e2a12011-10-12 14:54:50 +02001453 res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001454 if (!res_dst_orig_node)
1455 goto out;
1456
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001457 neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001458 if (!neigh_node)
1459 goto out;
1460
1461 primary_if = primary_if_get_selected(bat_priv);
1462 if (!primary_if)
1463 goto out;
1464
1465 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1466 req_ttvn = tt_request->ttvn;
1467
Antonio Quartulli015758d2011-07-09 17:52:13 +02001468 /* I don't have the requested data */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001469 if (orig_ttvn != req_ttvn ||
Al Virof25bd582012-04-22 07:44:27 +01001470 tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
Antonio Quartullia73105b2011-04-27 14:27:44 +02001471 goto out;
1472
Antonio Quartulli015758d2011-07-09 17:52:13 +02001473 /* If the full table has been explicitly requested */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001474 if (tt_request->flags & TT_FULL_TABLE ||
1475 !req_dst_orig_node->tt_buff)
1476 full_table = true;
1477 else
1478 full_table = false;
1479
1480 /* In this version, fragmentation is not implemented, then
1481 * I'll send only one packet with as much TT entries as I can */
1482 if (!full_table) {
1483 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1484 tt_len = req_dst_orig_node->tt_buff_len;
1485 tt_tot = tt_len / sizeof(struct tt_change);
1486
1487 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1488 tt_len + ETH_HLEN);
1489 if (!skb)
1490 goto unlock;
1491
1492 skb_reserve(skb, ETH_HLEN);
1493 tt_response = (struct tt_query_packet *)skb_put(skb,
1494 sizeof(struct tt_query_packet) + tt_len);
1495 tt_response->ttvn = req_ttvn;
1496 tt_response->tt_data = htons(tt_tot);
1497
1498 tt_buff = skb->data + sizeof(struct tt_query_packet);
1499 /* Copy the last orig_node's OGM buffer */
1500 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1501 req_dst_orig_node->tt_buff_len);
1502
1503 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1504 } else {
1505 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1506 sizeof(struct tt_change);
1507 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1508
1509 skb = tt_response_fill_table(tt_len, ttvn,
1510 bat_priv->tt_global_hash,
1511 primary_if, tt_global_valid_entry,
1512 req_dst_orig_node);
1513 if (!skb)
1514 goto out;
1515
1516 tt_response = (struct tt_query_packet *)skb->data;
1517 }
1518
Sven Eckelmann76543d12011-11-20 15:47:38 +01001519 tt_response->header.packet_type = BAT_TT_QUERY;
1520 tt_response->header.version = COMPAT_VERSION;
1521 tt_response->header.ttl = TTL;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001522 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1523 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1524 tt_response->flags = TT_RESPONSE;
1525
1526 if (full_table)
1527 tt_response->flags |= TT_FULL_TABLE;
1528
1529 bat_dbg(DBG_TT, bat_priv,
1530 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1531 res_dst_orig_node->orig, neigh_node->addr,
1532 req_dst_orig_node->orig, req_ttvn);
1533
Martin Hundebøllf8214862012-04-20 17:02:45 +02001534 batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1535
Sven Eckelmann9455e342012-05-12 02:09:37 +02001536 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001537 ret = true;
1538 goto out;
1539
1540unlock:
1541 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1542
1543out:
1544 if (res_dst_orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001545 batadv_orig_node_free_ref(res_dst_orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001546 if (req_dst_orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001547 batadv_orig_node_free_ref(req_dst_orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001548 if (neigh_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001549 batadv_neigh_node_free_ref(neigh_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001550 if (primary_if)
1551 hardif_free_ref(primary_if);
1552 if (!ret)
1553 kfree_skb(skb);
1554 return ret;
1555
1556}
1557static bool send_my_tt_response(struct bat_priv *bat_priv,
1558 struct tt_query_packet *tt_request)
1559{
1560 struct orig_node *orig_node = NULL;
1561 struct neigh_node *neigh_node = NULL;
1562 struct hard_iface *primary_if = NULL;
1563 uint8_t my_ttvn, req_ttvn, ttvn;
1564 int ret = false;
1565 unsigned char *tt_buff;
1566 bool full_table;
1567 uint16_t tt_len, tt_tot;
1568 struct sk_buff *skb = NULL;
1569 struct tt_query_packet *tt_response;
1570
1571 bat_dbg(DBG_TT, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001572 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1573 tt_request->src, tt_request->ttvn,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001574 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1575
1576
1577 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1578 req_ttvn = tt_request->ttvn;
1579
Antonio Quartullieb7e2a12011-10-12 14:54:50 +02001580 orig_node = orig_hash_find(bat_priv, tt_request->src);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001581 if (!orig_node)
1582 goto out;
1583
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001584 neigh_node = batadv_orig_node_get_router(orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001585 if (!neigh_node)
1586 goto out;
1587
1588 primary_if = primary_if_get_selected(bat_priv);
1589 if (!primary_if)
1590 goto out;
1591
1592 /* If the full table has been explicitly requested or the gap
1593 * is too big send the whole local translation table */
1594 if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1595 !bat_priv->tt_buff)
1596 full_table = true;
1597 else
1598 full_table = false;
1599
1600 /* In this version, fragmentation is not implemented, then
1601 * I'll send only one packet with as much TT entries as I can */
1602 if (!full_table) {
1603 spin_lock_bh(&bat_priv->tt_buff_lock);
1604 tt_len = bat_priv->tt_buff_len;
1605 tt_tot = tt_len / sizeof(struct tt_change);
1606
1607 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1608 tt_len + ETH_HLEN);
1609 if (!skb)
1610 goto unlock;
1611
1612 skb_reserve(skb, ETH_HLEN);
1613 tt_response = (struct tt_query_packet *)skb_put(skb,
1614 sizeof(struct tt_query_packet) + tt_len);
1615 tt_response->ttvn = req_ttvn;
1616 tt_response->tt_data = htons(tt_tot);
1617
1618 tt_buff = skb->data + sizeof(struct tt_query_packet);
1619 memcpy(tt_buff, bat_priv->tt_buff,
1620 bat_priv->tt_buff_len);
1621 spin_unlock_bh(&bat_priv->tt_buff_lock);
1622 } else {
1623 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1624 sizeof(struct tt_change);
1625 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1626
1627 skb = tt_response_fill_table(tt_len, ttvn,
1628 bat_priv->tt_local_hash,
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001629 primary_if, tt_local_valid_entry,
1630 NULL);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001631 if (!skb)
1632 goto out;
1633
1634 tt_response = (struct tt_query_packet *)skb->data;
1635 }
1636
Sven Eckelmann76543d12011-11-20 15:47:38 +01001637 tt_response->header.packet_type = BAT_TT_QUERY;
1638 tt_response->header.version = COMPAT_VERSION;
1639 tt_response->header.ttl = TTL;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001640 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1641 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1642 tt_response->flags = TT_RESPONSE;
1643
1644 if (full_table)
1645 tt_response->flags |= TT_FULL_TABLE;
1646
1647 bat_dbg(DBG_TT, bat_priv,
1648 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1649 orig_node->orig, neigh_node->addr,
1650 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1651
Martin Hundebøllf8214862012-04-20 17:02:45 +02001652 batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1653
Sven Eckelmann9455e342012-05-12 02:09:37 +02001654 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001655 ret = true;
1656 goto out;
1657
1658unlock:
1659 spin_unlock_bh(&bat_priv->tt_buff_lock);
1660out:
1661 if (orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001662 batadv_orig_node_free_ref(orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001663 if (neigh_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001664 batadv_neigh_node_free_ref(neigh_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001665 if (primary_if)
1666 hardif_free_ref(primary_if);
1667 if (!ret)
1668 kfree_skb(skb);
1669 /* This packet was for me, so it doesn't need to be re-routed */
1670 return true;
1671}
1672
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001673bool batadv_send_tt_response(struct bat_priv *bat_priv,
1674 struct tt_query_packet *tt_request)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001675{
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001676 if (is_my_mac(tt_request->dst)) {
1677 /* don't answer backbone gws! */
Sven Eckelmann08adf152012-05-12 13:38:47 +02001678 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001679 return true;
1680
Antonio Quartullia73105b2011-04-27 14:27:44 +02001681 return send_my_tt_response(bat_priv, tt_request);
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001682 } else {
Antonio Quartullia73105b2011-04-27 14:27:44 +02001683 return send_other_tt_response(bat_priv, tt_request);
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001684 }
Antonio Quartullia73105b2011-04-27 14:27:44 +02001685}
1686
1687static void _tt_update_changes(struct bat_priv *bat_priv,
1688 struct orig_node *orig_node,
1689 struct tt_change *tt_change,
1690 uint16_t tt_num_changes, uint8_t ttvn)
1691{
1692 int i;
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001693 int is_wifi;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001694
1695 for (i = 0; i < tt_num_changes; i++) {
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001696 if ((tt_change + i)->flags & TT_CLIENT_DEL) {
Antonio Quartullia73105b2011-04-27 14:27:44 +02001697 tt_global_del(bat_priv, orig_node,
1698 (tt_change + i)->addr,
Antonio Quartullicc47f662011-04-27 14:27:57 +02001699 "tt removed by changes",
1700 (tt_change + i)->flags & TT_CLIENT_ROAM);
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001701 } else {
1702 is_wifi = (tt_change + i)->flags & TT_CLIENT_WIFI;
1703 if (!batadv_tt_global_add(bat_priv, orig_node,
1704 (tt_change + i)->addr, ttvn,
1705 false, is_wifi))
Antonio Quartullia73105b2011-04-27 14:27:44 +02001706 /* In case of problem while storing a
1707 * global_entry, we stop the updating
1708 * procedure without committing the
1709 * ttvn change. This will avoid to send
1710 * corrupted data on tt_request
1711 */
1712 return;
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001713 }
Antonio Quartullia73105b2011-04-27 14:27:44 +02001714 }
Antonio Quartulli17071572011-11-07 16:36:40 +01001715 orig_node->tt_initialised = true;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001716}
1717
1718static void tt_fill_gtable(struct bat_priv *bat_priv,
1719 struct tt_query_packet *tt_response)
1720{
1721 struct orig_node *orig_node = NULL;
1722
1723 orig_node = orig_hash_find(bat_priv, tt_response->src);
1724 if (!orig_node)
1725 goto out;
1726
1727 /* Purge the old table first.. */
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001728 batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
Antonio Quartullia73105b2011-04-27 14:27:44 +02001729
1730 _tt_update_changes(bat_priv, orig_node,
1731 (struct tt_change *)(tt_response + 1),
Al Virof25bd582012-04-22 07:44:27 +01001732 ntohs(tt_response->tt_data), tt_response->ttvn);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001733
1734 spin_lock_bh(&orig_node->tt_buff_lock);
1735 kfree(orig_node->tt_buff);
1736 orig_node->tt_buff_len = 0;
1737 orig_node->tt_buff = NULL;
1738 spin_unlock_bh(&orig_node->tt_buff_lock);
1739
1740 atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1741
1742out:
1743 if (orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001744 batadv_orig_node_free_ref(orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001745}
1746
Marek Lindnera943cac2011-07-30 13:10:18 +02001747static void tt_update_changes(struct bat_priv *bat_priv,
1748 struct orig_node *orig_node,
1749 uint16_t tt_num_changes, uint8_t ttvn,
1750 struct tt_change *tt_change)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001751{
1752 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1753 ttvn);
1754
1755 tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
1756 tt_num_changes);
1757 atomic_set(&orig_node->last_ttvn, ttvn);
1758}
1759
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001760bool batadv_is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001761{
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001762 struct tt_local_entry *tt_local_entry = NULL;
1763 bool ret = false;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001764
Antonio Quartullia73105b2011-04-27 14:27:44 +02001765 tt_local_entry = tt_local_hash_find(bat_priv, addr);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001766 if (!tt_local_entry)
1767 goto out;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001768 /* Check if the client has been logically deleted (but is kept for
1769 * consistency purpose) */
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001770 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001771 goto out;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001772 ret = true;
1773out:
Antonio Quartullia73105b2011-04-27 14:27:44 +02001774 if (tt_local_entry)
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001775 tt_local_entry_free_ref(tt_local_entry);
1776 return ret;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001777}
1778
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001779void batadv_handle_tt_response(struct bat_priv *bat_priv,
1780 struct tt_query_packet *tt_response)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001781{
1782 struct tt_req_node *node, *safe;
1783 struct orig_node *orig_node = NULL;
1784
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001785 bat_dbg(DBG_TT, bat_priv,
1786 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
Al Virof25bd582012-04-22 07:44:27 +01001787 tt_response->src, tt_response->ttvn,
1788 ntohs(tt_response->tt_data),
Antonio Quartullia73105b2011-04-27 14:27:44 +02001789 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1790
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001791 /* we should have never asked a backbone gw */
Sven Eckelmann08adf152012-05-12 13:38:47 +02001792 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001793 goto out;
1794
Antonio Quartullia73105b2011-04-27 14:27:44 +02001795 orig_node = orig_hash_find(bat_priv, tt_response->src);
1796 if (!orig_node)
1797 goto out;
1798
1799 if (tt_response->flags & TT_FULL_TABLE)
1800 tt_fill_gtable(bat_priv, tt_response);
1801 else
Al Virof25bd582012-04-22 07:44:27 +01001802 tt_update_changes(bat_priv, orig_node,
1803 ntohs(tt_response->tt_data),
Antonio Quartullia73105b2011-04-27 14:27:44 +02001804 tt_response->ttvn,
1805 (struct tt_change *)(tt_response + 1));
1806
1807 /* Delete the tt_req_node from pending tt_requests list */
1808 spin_lock_bh(&bat_priv->tt_req_list_lock);
1809 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1810 if (!compare_eth(node->addr, tt_response->src))
1811 continue;
1812 list_del(&node->list);
1813 kfree(node);
1814 }
1815 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1816
1817 /* Recalculate the CRC for this orig_node and store it */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001818 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001819 /* Roaming phase is over: tables are in sync again. I can
1820 * unset the flag */
1821 orig_node->tt_poss_change = false;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001822out:
1823 if (orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001824 batadv_orig_node_free_ref(orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001825}
1826
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001827int batadv_tt_init(struct bat_priv *bat_priv)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001828{
Sven Eckelmann5346c352012-05-05 13:27:28 +02001829 int ret;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001830
Sven Eckelmann5346c352012-05-05 13:27:28 +02001831 ret = tt_local_init(bat_priv);
1832 if (ret < 0)
1833 return ret;
1834
1835 ret = tt_global_init(bat_priv);
1836 if (ret < 0)
1837 return ret;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001838
1839 tt_start_timer(bat_priv);
1840
1841 return 1;
1842}
1843
Antonio Quartullicc47f662011-04-27 14:27:57 +02001844static void tt_roam_list_free(struct bat_priv *bat_priv)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001845{
Antonio Quartullicc47f662011-04-27 14:27:57 +02001846 struct tt_roam_node *node, *safe;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001847
Antonio Quartullicc47f662011-04-27 14:27:57 +02001848 spin_lock_bh(&bat_priv->tt_roam_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001849
Antonio Quartullicc47f662011-04-27 14:27:57 +02001850 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1851 list_del(&node->list);
1852 kfree(node);
1853 }
1854
1855 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1856}
1857
1858static void tt_roam_purge(struct bat_priv *bat_priv)
1859{
1860 struct tt_roam_node *node, *safe;
1861
1862 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1863 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
Marek Lindner032b7962011-12-20 19:30:40 +08001864 if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
Antonio Quartullicc47f662011-04-27 14:27:57 +02001865 continue;
1866
1867 list_del(&node->list);
1868 kfree(node);
1869 }
1870 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1871}
1872
1873/* This function checks whether the client already reached the
1874 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1875 * will not be sent.
1876 *
1877 * returns true if the ROAMING_ADV can be sent, false otherwise */
1878static bool tt_check_roam_count(struct bat_priv *bat_priv,
1879 uint8_t *client)
1880{
1881 struct tt_roam_node *tt_roam_node;
1882 bool ret = false;
1883
1884 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1885 /* The new tt_req will be issued only if I'm not waiting for a
1886 * reply from the same orig_node yet */
1887 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1888 if (!compare_eth(tt_roam_node->addr, client))
1889 continue;
1890
Marek Lindner032b7962011-12-20 19:30:40 +08001891 if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
Antonio Quartullicc47f662011-04-27 14:27:57 +02001892 continue;
1893
1894 if (!atomic_dec_not_zero(&tt_roam_node->counter))
1895 /* Sorry, you roamed too many times! */
1896 goto unlock;
1897 ret = true;
1898 break;
1899 }
1900
1901 if (!ret) {
1902 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1903 if (!tt_roam_node)
1904 goto unlock;
1905
1906 tt_roam_node->first_time = jiffies;
1907 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
1908 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1909
1910 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1911 ret = true;
1912 }
1913
1914unlock:
1915 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1916 return ret;
1917}
1918
Sven Eckelmannde7aae62012-02-05 18:55:22 +01001919static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1920 struct orig_node *orig_node)
Antonio Quartullicc47f662011-04-27 14:27:57 +02001921{
1922 struct neigh_node *neigh_node = NULL;
1923 struct sk_buff *skb = NULL;
1924 struct roam_adv_packet *roam_adv_packet;
1925 int ret = 1;
1926 struct hard_iface *primary_if;
1927
1928 /* before going on we have to check whether the client has
1929 * already roamed to us too many times */
1930 if (!tt_check_roam_count(bat_priv, client))
1931 goto out;
1932
1933 skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
1934 if (!skb)
1935 goto out;
1936
1937 skb_reserve(skb, ETH_HLEN);
1938
1939 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1940 sizeof(struct roam_adv_packet));
1941
Sven Eckelmann76543d12011-11-20 15:47:38 +01001942 roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
1943 roam_adv_packet->header.version = COMPAT_VERSION;
1944 roam_adv_packet->header.ttl = TTL;
Antonio Quartullicc47f662011-04-27 14:27:57 +02001945 primary_if = primary_if_get_selected(bat_priv);
1946 if (!primary_if)
1947 goto out;
1948 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1949 hardif_free_ref(primary_if);
1950 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1951 memcpy(roam_adv_packet->client, client, ETH_ALEN);
1952
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001953 neigh_node = batadv_orig_node_get_router(orig_node);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001954 if (!neigh_node)
1955 goto out;
1956
1957 bat_dbg(DBG_TT, bat_priv,
1958 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1959 orig_node->orig, client, neigh_node->addr);
1960
Martin Hundebøllf8214862012-04-20 17:02:45 +02001961 batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_TX);
1962
Sven Eckelmann9455e342012-05-12 02:09:37 +02001963 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001964 ret = 0;
1965
1966out:
1967 if (neigh_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001968 batadv_neigh_node_free_ref(neigh_node);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001969 if (ret)
1970 kfree_skb(skb);
1971 return;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001972}
1973
1974static void tt_purge(struct work_struct *work)
1975{
1976 struct delayed_work *delayed_work =
1977 container_of(work, struct delayed_work, work);
1978 struct bat_priv *bat_priv =
1979 container_of(delayed_work, struct bat_priv, tt_work);
1980
1981 tt_local_purge(bat_priv);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001982 tt_global_roam_purge(bat_priv);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001983 tt_req_purge(bat_priv);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001984 tt_roam_purge(bat_priv);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001985
1986 tt_start_timer(bat_priv);
1987}
Antonio Quartullicc47f662011-04-27 14:27:57 +02001988
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001989void batadv_tt_free(struct bat_priv *bat_priv)
Antonio Quartullicc47f662011-04-27 14:27:57 +02001990{
1991 cancel_delayed_work_sync(&bat_priv->tt_work);
1992
1993 tt_local_table_free(bat_priv);
1994 tt_global_table_free(bat_priv);
1995 tt_req_list_free(bat_priv);
1996 tt_changes_list_free(bat_priv);
1997 tt_roam_list_free(bat_priv);
1998
1999 kfree(bat_priv->tt_buff);
2000}
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002001
Antonio Quartulli697f2532011-11-07 16:47:01 +01002002/* This function will enable or disable the specified flags for all the entries
2003 * in the given hash table and returns the number of modified entries */
2004static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
2005 bool enable)
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002006{
Antonio Quartullic90681b2011-10-05 17:05:25 +02002007 uint32_t i;
Antonio Quartulli697f2532011-11-07 16:47:01 +01002008 uint16_t changed_num = 0;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002009 struct hlist_head *head;
2010 struct hlist_node *node;
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002011 struct tt_common_entry *tt_common_entry;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002012
2013 if (!hash)
Antonio Quartulli697f2532011-11-07 16:47:01 +01002014 goto out;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002015
2016 for (i = 0; i < hash->size; i++) {
2017 head = &hash->table[i];
2018
2019 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002020 hlist_for_each_entry_rcu(tt_common_entry, node,
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002021 head, hash_entry) {
Antonio Quartulli697f2532011-11-07 16:47:01 +01002022 if (enable) {
2023 if ((tt_common_entry->flags & flags) == flags)
2024 continue;
2025 tt_common_entry->flags |= flags;
2026 } else {
2027 if (!(tt_common_entry->flags & flags))
2028 continue;
2029 tt_common_entry->flags &= ~flags;
2030 }
2031 changed_num++;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002032 }
2033 rcu_read_unlock();
2034 }
Antonio Quartulli697f2532011-11-07 16:47:01 +01002035out:
2036 return changed_num;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002037}
2038
2039/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
2040static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
2041{
2042 struct hashtable_t *hash = bat_priv->tt_local_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002043 struct tt_common_entry *tt_common_entry;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002044 struct tt_local_entry *tt_local_entry;
2045 struct hlist_node *node, *node_tmp;
2046 struct hlist_head *head;
2047 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartullic90681b2011-10-05 17:05:25 +02002048 uint32_t i;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002049
2050 if (!hash)
2051 return;
2052
2053 for (i = 0; i < hash->size; i++) {
2054 head = &hash->table[i];
2055 list_lock = &hash->list_locks[i];
2056
2057 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002058 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002059 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002060 if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002061 continue;
2062
Sven Eckelmann86ceb362012-03-07 09:07:45 +01002063 bat_dbg(DBG_TT, bat_priv,
2064 "Deleting local tt entry (%pM): pending\n",
2065 tt_common_entry->addr);
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002066
2067 atomic_dec(&bat_priv->num_local_tt);
2068 hlist_del_rcu(node);
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002069 tt_local_entry = container_of(tt_common_entry,
2070 struct tt_local_entry,
2071 common);
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002072 tt_local_entry_free_ref(tt_local_entry);
2073 }
2074 spin_unlock_bh(list_lock);
2075 }
2076
2077}
2078
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +08002079static int tt_commit_changes(struct bat_priv *bat_priv,
2080 unsigned char **packet_buff, int *packet_buff_len,
2081 int packet_min_len)
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002082{
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +08002083 uint16_t changed_num = 0;
2084
2085 if (atomic_read(&bat_priv->tt_local_changes) < 1)
2086 return -ENOENT;
2087
2088 changed_num = tt_set_flags(bat_priv->tt_local_hash,
2089 TT_CLIENT_NEW, false);
2090
2091 /* all reset entries have to be counted as local entries */
Antonio Quartulli697f2532011-11-07 16:47:01 +01002092 atomic_add(changed_num, &bat_priv->num_local_tt);
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002093 tt_local_purge_pending_clients(bat_priv);
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +08002094 bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002095
2096 /* Increment the TTVN only once per OGM interval */
2097 atomic_inc(&bat_priv->ttvn);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02002098 bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
2099 (uint8_t)atomic_read(&bat_priv->ttvn));
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002100 bat_priv->tt_poss_change = false;
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +08002101
2102 /* reset the sending counter */
2103 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
2104
2105 return tt_changes_fill_buff(bat_priv, packet_buff,
2106 packet_buff_len, packet_min_len);
2107}
2108
2109/* when calling this function (hard_iface == primary_if) has to be true */
2110int batadv_tt_append_diff(struct bat_priv *bat_priv,
2111 unsigned char **packet_buff, int *packet_buff_len,
2112 int packet_min_len)
2113{
2114 int tt_num_changes;
2115
2116 /* if at least one change happened */
2117 tt_num_changes = tt_commit_changes(bat_priv, packet_buff,
2118 packet_buff_len, packet_min_len);
2119
2120 /* if the changes have been sent often enough */
2121 if ((tt_num_changes < 0) &&
2122 (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
2123 tt_realloc_packet_buff(packet_buff, packet_buff_len,
2124 packet_min_len, packet_min_len);
2125 tt_num_changes = 0;
2126 }
2127
2128 return tt_num_changes;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002129}
Antonio Quartulli59b699c2011-07-07 15:35:36 +02002130
Sven Eckelmann08c36d32012-05-12 02:09:39 +02002131bool batadv_is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src,
2132 uint8_t *dst)
Antonio Quartulli59b699c2011-07-07 15:35:36 +02002133{
2134 struct tt_local_entry *tt_local_entry = NULL;
2135 struct tt_global_entry *tt_global_entry = NULL;
2136 bool ret = true;
2137
2138 if (!atomic_read(&bat_priv->ap_isolation))
2139 return false;
2140
2141 tt_local_entry = tt_local_hash_find(bat_priv, dst);
2142 if (!tt_local_entry)
2143 goto out;
2144
2145 tt_global_entry = tt_global_hash_find(bat_priv, src);
2146 if (!tt_global_entry)
2147 goto out;
2148
2149 if (_is_ap_isolated(tt_local_entry, tt_global_entry))
2150 goto out;
2151
2152 ret = false;
2153
2154out:
2155 if (tt_global_entry)
2156 tt_global_entry_free_ref(tt_global_entry);
2157 if (tt_local_entry)
2158 tt_local_entry_free_ref(tt_local_entry);
2159 return ret;
2160}
Marek Lindnera943cac2011-07-30 13:10:18 +02002161
Sven Eckelmann08c36d32012-05-12 02:09:39 +02002162void batadv_tt_update_orig(struct bat_priv *bat_priv,
2163 struct orig_node *orig_node,
2164 const unsigned char *tt_buff, uint8_t tt_num_changes,
2165 uint8_t ttvn, uint16_t tt_crc)
Marek Lindnera943cac2011-07-30 13:10:18 +02002166{
2167 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2168 bool full_table = true;
2169
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01002170 /* don't care about a backbone gateways updates. */
Sven Eckelmann08adf152012-05-12 13:38:47 +02002171 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01002172 return;
2173
Antonio Quartulli17071572011-11-07 16:36:40 +01002174 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2175 * increased by one -> we can apply the attached changes */
2176 if ((!orig_node->tt_initialised && ttvn == 1) ||
2177 ttvn - orig_ttvn == 1) {
Marek Lindnera943cac2011-07-30 13:10:18 +02002178 /* the OGM could not contain the changes due to their size or
2179 * because they have already been sent TT_OGM_APPEND_MAX times.
2180 * In this case send a tt request */
2181 if (!tt_num_changes) {
2182 full_table = false;
2183 goto request_table;
2184 }
2185
2186 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
2187 (struct tt_change *)tt_buff);
2188
2189 /* Even if we received the precomputed crc with the OGM, we
2190 * prefer to recompute it to spot any possible inconsistency
2191 * in the global table */
2192 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
2193
2194 /* The ttvn alone is not enough to guarantee consistency
2195 * because a single value could represent different states
2196 * (due to the wrap around). Thus a node has to check whether
2197 * the resulting table (after applying the changes) is still
2198 * consistent or not. E.g. a node could disconnect while its
2199 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2200 * checking the CRC value is mandatory to detect the
2201 * inconsistency */
2202 if (orig_node->tt_crc != tt_crc)
2203 goto request_table;
2204
2205 /* Roaming phase is over: tables are in sync again. I can
2206 * unset the flag */
2207 orig_node->tt_poss_change = false;
2208 } else {
2209 /* if we missed more than one change or our tables are not
2210 * in sync anymore -> request fresh tt data */
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02002211
Antonio Quartulli17071572011-11-07 16:36:40 +01002212 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2213 orig_node->tt_crc != tt_crc) {
Marek Lindnera943cac2011-07-30 13:10:18 +02002214request_table:
Sven Eckelmann86ceb362012-03-07 09:07:45 +01002215 bat_dbg(DBG_TT, bat_priv,
2216 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2217 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2218 orig_node->tt_crc, tt_num_changes);
Marek Lindnera943cac2011-07-30 13:10:18 +02002219 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
2220 full_table);
2221 return;
2222 }
2223 }
2224}
Antonio Quartulli3275e7c2012-03-16 18:03:28 +01002225
2226/* returns true whether we know that the client has moved from its old
2227 * originator to another one. This entry is kept is still kept for consistency
2228 * purposes
2229 */
Sven Eckelmann08c36d32012-05-12 02:09:39 +02002230bool batadv_tt_global_client_is_roaming(struct bat_priv *bat_priv,
2231 uint8_t *addr)
Antonio Quartulli3275e7c2012-03-16 18:03:28 +01002232{
2233 struct tt_global_entry *tt_global_entry;
2234 bool ret = false;
2235
2236 tt_global_entry = tt_global_hash_find(bat_priv, addr);
2237 if (!tt_global_entry)
2238 goto out;
2239
2240 ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
2241 tt_global_entry_free_ref(tt_global_entry);
2242out:
2243 return ret;
2244}