blob: 72a8548515ae00bb5232cddfaf13f8bccd1f315b [file] [log] [blame]
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
Antonio Quartulli35c133a2012-03-14 13:03:01 +01003 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00004 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018 */
19
20#include "main.h"
21#include "translation-table.h"
22#include "soft-interface.h"
Marek Lindner32ae9b22011-04-20 15:40:58 +020023#include "hard-interface.h"
Antonio Quartullia73105b2011-04-27 14:27:44 +020024#include "send.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000025#include "hash.h"
26#include "originator.h"
Antonio Quartullia73105b2011-04-27 14:27:44 +020027#include "routing.h"
Simon Wunderlich20ff9d52012-01-22 20:00:23 +010028#include "bridge_loop_avoidance.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000029
Antonio Quartullia73105b2011-04-27 14:27:44 +020030#include <linux/crc16.h>
31
Sven Eckelmannde7aae62012-02-05 18:55:22 +010032static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
33 struct orig_node *orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +020034static void tt_purge(struct work_struct *work);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +020035static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000036
Marek Lindner7aadf882011-02-18 12:28:09 +000037/* returns 1 if they are the same mac addr */
Antonio Quartulli48100ba2011-10-30 12:17:33 +010038static int compare_tt(const struct hlist_node *node, const void *data2)
Marek Lindner7aadf882011-02-18 12:28:09 +000039{
Antonio Quartulli48100ba2011-10-30 12:17:33 +010040 const void *data1 = container_of(node, struct tt_common_entry,
Sven Eckelmann747e4222011-05-14 23:14:50 +020041 hash_entry);
Marek Lindner7aadf882011-02-18 12:28:09 +000042
43 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
44}
45
Antonio Quartullia73105b2011-04-27 14:27:44 +020046static void tt_start_timer(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000047{
Antonio Quartullia73105b2011-04-27 14:27:44 +020048 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020049 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
Antonio Quartullia73105b2011-04-27 14:27:44 +020050 msecs_to_jiffies(5000));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000051}
52
Antonio Quartulli48100ba2011-10-30 12:17:33 +010053static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
54 const void *data)
Marek Lindner7aadf882011-02-18 12:28:09 +000055{
Marek Lindner7aadf882011-02-18 12:28:09 +000056 struct hlist_head *head;
57 struct hlist_node *node;
Antonio Quartulli48100ba2011-10-30 12:17:33 +010058 struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
Antonio Quartullic90681b2011-10-05 17:05:25 +020059 uint32_t index;
Marek Lindner7aadf882011-02-18 12:28:09 +000060
61 if (!hash)
62 return NULL;
63
64 index = choose_orig(data, hash->size);
65 head = &hash->table[index];
66
67 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +010068 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
69 if (!compare_eth(tt_common_entry, data))
Marek Lindner7aadf882011-02-18 12:28:09 +000070 continue;
71
Antonio Quartulli48100ba2011-10-30 12:17:33 +010072 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
Antonio Quartulli7683fdc2011-04-27 14:28:07 +020073 continue;
74
Antonio Quartulli48100ba2011-10-30 12:17:33 +010075 tt_common_entry_tmp = tt_common_entry;
Marek Lindner7aadf882011-02-18 12:28:09 +000076 break;
77 }
78 rcu_read_unlock();
79
Antonio Quartulli48100ba2011-10-30 12:17:33 +010080 return tt_common_entry_tmp;
81}
82
83static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
84 const void *data)
85{
86 struct tt_common_entry *tt_common_entry;
87 struct tt_local_entry *tt_local_entry = NULL;
88
89 tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
90 if (tt_common_entry)
91 tt_local_entry = container_of(tt_common_entry,
92 struct tt_local_entry, common);
93 return tt_local_entry;
Marek Lindner7aadf882011-02-18 12:28:09 +000094}
95
Antonio Quartulli2dafb492011-05-05 08:42:45 +020096static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
Sven Eckelmann747e4222011-05-14 23:14:50 +020097 const void *data)
Marek Lindner7aadf882011-02-18 12:28:09 +000098{
Antonio Quartulli48100ba2011-10-30 12:17:33 +010099 struct tt_common_entry *tt_common_entry;
100 struct tt_global_entry *tt_global_entry = NULL;
Marek Lindner7aadf882011-02-18 12:28:09 +0000101
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100102 tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
103 if (tt_common_entry)
104 tt_global_entry = container_of(tt_common_entry,
105 struct tt_global_entry, common);
106 return tt_global_entry;
Marek Lindner7aadf882011-02-18 12:28:09 +0000107
Marek Lindner7aadf882011-02-18 12:28:09 +0000108}
109
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200110static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
111{
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100112 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
113 kfree_rcu(tt_local_entry, common.rcu);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200114}
115
Simon Wunderlich531027f2011-10-19 11:02:25 +0200116static void tt_global_entry_free_rcu(struct rcu_head *rcu)
117{
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100118 struct tt_common_entry *tt_common_entry;
Simon Wunderlich531027f2011-10-19 11:02:25 +0200119 struct tt_global_entry *tt_global_entry;
120
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100121 tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
122 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
123 common);
Simon Wunderlich531027f2011-10-19 11:02:25 +0200124
Simon Wunderlich531027f2011-10-19 11:02:25 +0200125 kfree(tt_global_entry);
126}
127
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200128static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
129{
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200130 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
131 tt_global_del_orig_list(tt_global_entry);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100132 call_rcu(&tt_global_entry->common.rcu,
133 tt_global_entry_free_rcu);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200134 }
135}
136
137static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
138{
139 struct tt_orig_list_entry *orig_entry;
140
141 orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
142 atomic_dec(&orig_entry->orig_node->tt_size);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200143 batadv_orig_node_free_ref(orig_entry->orig_node);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200144 kfree(orig_entry);
145}
146
147static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
148{
149 call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200150}
151
Antonio Quartulliff66c972011-06-30 01:14:00 +0200152static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
153 uint8_t flags)
Antonio Quartullia73105b2011-04-27 14:27:44 +0200154{
155 struct tt_change_node *tt_change_node;
156
157 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
158
159 if (!tt_change_node)
160 return;
161
Antonio Quartulliff66c972011-06-30 01:14:00 +0200162 tt_change_node->change.flags = flags;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200163 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
164
165 spin_lock_bh(&bat_priv->tt_changes_list_lock);
166 /* track the change in the OGMinterval list */
167 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
168 atomic_inc(&bat_priv->tt_local_changes);
169 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
170
171 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
172}
173
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200174int batadv_tt_len(int changes_num)
Antonio Quartullia73105b2011-04-27 14:27:44 +0200175{
176 return changes_num * sizeof(struct tt_change);
177}
178
179static int tt_local_init(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000180{
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200181 if (bat_priv->tt_local_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200182 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000183
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200184 bat_priv->tt_local_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000185
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200186 if (!bat_priv->tt_local_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200187 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000188
Sven Eckelmann5346c352012-05-05 13:27:28 +0200189 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190}
191
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200192void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
193 int ifindex)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000194{
195 struct bat_priv *bat_priv = netdev_priv(soft_iface);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200196 struct tt_local_entry *tt_local_entry = NULL;
197 struct tt_global_entry *tt_global_entry = NULL;
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200198 struct hlist_head *head;
199 struct hlist_node *node;
200 struct tt_orig_list_entry *orig_entry;
Simon Wunderlich80b3f582011-11-02 20:26:45 +0100201 int hash_added;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000202
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200203 tt_local_entry = tt_local_hash_find(bat_priv, addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000204
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200205 if (tt_local_entry) {
206 tt_local_entry->last_seen = jiffies;
Antonio Quartulli521251f2012-01-16 00:36:58 +0100207 /* possibly unset the TT_CLIENT_PENDING flag */
208 tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200209 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000210 }
211
Sven Eckelmann704509b2011-05-14 23:14:54 +0200212 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200213 if (!tt_local_entry)
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200214 goto out;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200215
Antonio Quartullia73105b2011-04-27 14:27:44 +0200216 bat_dbg(DBG_TT, bat_priv,
217 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
218 (uint8_t)atomic_read(&bat_priv->ttvn));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000219
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100220 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
221 tt_local_entry->common.flags = NO_FLAGS;
Sven Eckelmann95638772012-05-12 02:09:31 +0200222 if (batadv_is_wifi_iface(ifindex))
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100223 tt_local_entry->common.flags |= TT_CLIENT_WIFI;
224 atomic_set(&tt_local_entry->common.refcount, 2);
225 tt_local_entry->last_seen = jiffies;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000226
227 /* the batman interface mac address should never be purged */
Marek Lindner39901e72011-02-18 12:28:08 +0000228 if (compare_eth(addr, soft_iface->dev_addr))
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100229 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000230
Antonio Quartullic40ed2b2012-01-06 21:31:33 +0100231 /* The local entry has to be marked as NEW to avoid to send it in
232 * a full table response going out before the next ttvn increment
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200233 * (consistency check)
234 */
Antonio Quartullic40ed2b2012-01-06 21:31:33 +0100235 tt_local_entry->common.flags |= TT_CLIENT_NEW;
236
Sven Eckelmannc0a55922012-05-12 13:48:55 +0200237 hash_added = batadv_hash_add(bat_priv->tt_local_hash, compare_tt,
238 choose_orig, &tt_local_entry->common,
239 &tt_local_entry->common.hash_entry);
Simon Wunderlich80b3f582011-11-02 20:26:45 +0100240
241 if (unlikely(hash_added != 0)) {
242 /* remove the reference for the hash */
243 tt_local_entry_free_ref(tt_local_entry);
244 goto out;
245 }
246
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100247 tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
Antonio Quartulliff66c972011-06-30 01:14:00 +0200248
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000249 /* remove address from global hash if present */
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200250 tt_global_entry = tt_global_hash_find(bat_priv, addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000251
Antonio Quartullicc47f662011-04-27 14:27:57 +0200252 /* Check whether it is a roaming! */
253 if (tt_global_entry) {
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200254 /* These node are probably going to update their tt table */
255 head = &tt_global_entry->orig_list;
256 rcu_read_lock();
257 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
258 orig_entry->orig_node->tt_poss_change = true;
259
260 send_roam_adv(bat_priv, tt_global_entry->common.addr,
261 orig_entry->orig_node);
262 }
263 rcu_read_unlock();
264 /* The global entry has to be marked as ROAMING and
265 * has to be kept for consistency purpose
266 */
David S. Miller220b07e2011-12-16 15:07:28 -0500267 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
Antonio Quartulli03fc3072011-12-04 12:26:50 +0100268 tt_global_entry->roam_at = jiffies;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200269 }
270out:
271 if (tt_local_entry)
272 tt_local_entry_free_ref(tt_local_entry);
273 if (tt_global_entry)
274 tt_global_entry_free_ref(tt_global_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000275}
276
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800277static void tt_realloc_packet_buff(unsigned char **packet_buff,
278 int *packet_buff_len, int min_packet_len,
279 int new_packet_len)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000280{
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800281 unsigned char *new_buff;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000282
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800283 new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
284
285 /* keep old buffer if kmalloc should fail */
286 if (new_buff) {
287 memcpy(new_buff, *packet_buff, min_packet_len);
288 kfree(*packet_buff);
289 *packet_buff = new_buff;
290 *packet_buff_len = new_packet_len;
291 }
292}
293
294static void tt_prepare_packet_buff(struct bat_priv *bat_priv,
295 unsigned char **packet_buff,
296 int *packet_buff_len, int min_packet_len)
297{
298 struct hard_iface *primary_if;
299 int req_len;
300
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200301 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800302
303 req_len = min_packet_len;
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200304 req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800305
306 /* if we have too many changes for one packet don't send any
307 * and wait for the tt table request which will be fragmented
308 */
309 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
310 req_len = min_packet_len;
311
312 tt_realloc_packet_buff(packet_buff, packet_buff_len,
313 min_packet_len, req_len);
314
315 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200316 batadv_hardif_free_ref(primary_if);
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800317}
318
319static int tt_changes_fill_buff(struct bat_priv *bat_priv,
320 unsigned char **packet_buff,
321 int *packet_buff_len, int min_packet_len)
322{
323 struct tt_change_node *entry, *safe;
324 int count = 0, tot_changes = 0, new_len;
325 unsigned char *tt_buff;
326
327 tt_prepare_packet_buff(bat_priv, packet_buff,
328 packet_buff_len, min_packet_len);
329
330 new_len = *packet_buff_len - min_packet_len;
331 tt_buff = *packet_buff + min_packet_len;
332
333 if (new_len > 0)
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200334 tot_changes = new_len / batadv_tt_len(1);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000335
Antonio Quartullia73105b2011-04-27 14:27:44 +0200336 spin_lock_bh(&bat_priv->tt_changes_list_lock);
337 atomic_set(&bat_priv->tt_local_changes, 0);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000338
Antonio Quartullia73105b2011-04-27 14:27:44 +0200339 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
Sven Eckelmann7c64fd92012-02-28 10:55:36 +0100340 list) {
Antonio Quartullia73105b2011-04-27 14:27:44 +0200341 if (count < tot_changes) {
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200342 memcpy(tt_buff + batadv_tt_len(count),
Antonio Quartullia73105b2011-04-27 14:27:44 +0200343 &entry->change, sizeof(struct tt_change));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000344 count++;
345 }
Antonio Quartullia73105b2011-04-27 14:27:44 +0200346 list_del(&entry->list);
347 kfree(entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000348 }
Antonio Quartullia73105b2011-04-27 14:27:44 +0200349 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000350
Antonio Quartullia73105b2011-04-27 14:27:44 +0200351 /* Keep the buffer for possible tt_request */
352 spin_lock_bh(&bat_priv->tt_buff_lock);
353 kfree(bat_priv->tt_buff);
354 bat_priv->tt_buff_len = 0;
355 bat_priv->tt_buff = NULL;
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800356 /* check whether this new OGM has no changes due to size problems */
357 if (new_len > 0) {
358 /* if kmalloc() fails we will reply with the full table
Antonio Quartullia73105b2011-04-27 14:27:44 +0200359 * instead of providing the diff
360 */
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800361 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200362 if (bat_priv->tt_buff) {
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800363 memcpy(bat_priv->tt_buff, tt_buff, new_len);
364 bat_priv->tt_buff_len = new_len;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200365 }
366 }
367 spin_unlock_bh(&bat_priv->tt_buff_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000368
Marek Lindner08ad76e2012-04-23 16:32:55 +0800369 return count;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000370}
371
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200372int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000373{
374 struct net_device *net_dev = (struct net_device *)seq->private;
375 struct bat_priv *bat_priv = netdev_priv(net_dev);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200376 struct hashtable_t *hash = bat_priv->tt_local_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100377 struct tt_common_entry *tt_common_entry;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200378 struct hard_iface *primary_if;
Marek Lindner7aadf882011-02-18 12:28:09 +0000379 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000380 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200381 uint32_t i;
382 int ret = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000383
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200384 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200385 if (!primary_if) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100386 ret = seq_printf(seq,
387 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
Marek Lindner32ae9b22011-04-20 15:40:58 +0200388 net_dev->name);
389 goto out;
390 }
391
392 if (primary_if->if_status != IF_ACTIVE) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100393 ret = seq_printf(seq,
394 "BATMAN mesh %s disabled - primary interface not active\n",
Marek Lindner32ae9b22011-04-20 15:40:58 +0200395 net_dev->name);
396 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000397 }
398
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100399 seq_printf(seq,
400 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
Antonio Quartullia73105b2011-04-27 14:27:44 +0200401 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000402
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000403 for (i = 0; i < hash->size; i++) {
404 head = &hash->table[i];
405
Marek Lindner7aadf882011-02-18 12:28:09 +0000406 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100407 hlist_for_each_entry_rcu(tt_common_entry, node,
Marek Lindner7aadf882011-02-18 12:28:09 +0000408 head, hash_entry) {
Simon Wunderlichd099c2c2011-10-22 18:15:26 +0200409 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
Sven Eckelmann7c64fd92012-02-28 10:55:36 +0100410 tt_common_entry->addr,
411 (tt_common_entry->flags &
412 TT_CLIENT_ROAM ? 'R' : '.'),
413 (tt_common_entry->flags &
414 TT_CLIENT_NOPURGE ? 'P' : '.'),
415 (tt_common_entry->flags &
416 TT_CLIENT_NEW ? 'N' : '.'),
417 (tt_common_entry->flags &
418 TT_CLIENT_PENDING ? 'X' : '.'),
419 (tt_common_entry->flags &
420 TT_CLIENT_WIFI ? 'W' : '.'));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000421 }
Marek Lindner7aadf882011-02-18 12:28:09 +0000422 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000423 }
Marek Lindner32ae9b22011-04-20 15:40:58 +0200424out:
425 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200426 batadv_hardif_free_ref(primary_if);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200427 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000428}
429
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200430static void tt_local_set_pending(struct bat_priv *bat_priv,
431 struct tt_local_entry *tt_local_entry,
Antonio Quartullic566dbb2012-01-06 21:31:34 +0100432 uint16_t flags, const char *message)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000433{
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100434 tt_local_event(bat_priv, tt_local_entry->common.addr,
435 tt_local_entry->common.flags | flags);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000436
Antonio Quartulli015758d2011-07-09 17:52:13 +0200437 /* The local client has to be marked as "pending to be removed" but has
438 * to be kept in the table in order to send it in a full table
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200439 * response issued before the net ttvn increment (consistency check)
440 */
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100441 tt_local_entry->common.flags |= TT_CLIENT_PENDING;
Antonio Quartullic566dbb2012-01-06 21:31:34 +0100442
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100443 bat_dbg(DBG_TT, bat_priv,
444 "Local tt entry (%pM) pending to be removed: %s\n",
445 tt_local_entry->common.addr, message);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000446}
447
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200448void batadv_tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
449 const char *message, bool roaming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000450{
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200451 struct tt_local_entry *tt_local_entry = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000452
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200453 tt_local_entry = tt_local_hash_find(bat_priv, addr);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200454 if (!tt_local_entry)
455 goto out;
456
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200457 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
Antonio Quartullic566dbb2012-01-06 21:31:34 +0100458 (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200459out:
460 if (tt_local_entry)
461 tt_local_entry_free_ref(tt_local_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000462}
463
Antonio Quartullia73105b2011-04-27 14:27:44 +0200464static void tt_local_purge(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000465{
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200466 struct hashtable_t *hash = bat_priv->tt_local_hash;
467 struct tt_local_entry *tt_local_entry;
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100468 struct tt_common_entry *tt_common_entry;
Marek Lindner7aadf882011-02-18 12:28:09 +0000469 struct hlist_node *node, *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000470 struct hlist_head *head;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200471 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartullic90681b2011-10-05 17:05:25 +0200472 uint32_t i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000473
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000474 for (i = 0; i < hash->size; i++) {
475 head = &hash->table[i];
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200476 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000477
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200478 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100479 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000480 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100481 tt_local_entry = container_of(tt_common_entry,
482 struct tt_local_entry,
483 common);
484 if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
Marek Lindner7aadf882011-02-18 12:28:09 +0000485 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000486
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200487 /* entry already marked for deletion */
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100488 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200489 continue;
490
Martin Hundebølla04ccd52011-12-08 13:32:41 +0100491 if (!has_timed_out(tt_local_entry->last_seen,
Marek Lindner032b7962011-12-20 19:30:40 +0800492 TT_LOCAL_TIMEOUT))
Marek Lindner7aadf882011-02-18 12:28:09 +0000493 continue;
494
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200495 tt_local_set_pending(bat_priv, tt_local_entry,
Antonio Quartullic566dbb2012-01-06 21:31:34 +0100496 TT_CLIENT_DEL, "timed out");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000497 }
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200498 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000499 }
500
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000501}
502
Antonio Quartullia73105b2011-04-27 14:27:44 +0200503static void tt_local_table_free(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000504{
Antonio Quartullia73105b2011-04-27 14:27:44 +0200505 struct hashtable_t *hash;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200506 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100507 struct tt_common_entry *tt_common_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200508 struct tt_local_entry *tt_local_entry;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200509 struct hlist_node *node, *node_tmp;
510 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200511 uint32_t i;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200512
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200513 if (!bat_priv->tt_local_hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000514 return;
515
Antonio Quartullia73105b2011-04-27 14:27:44 +0200516 hash = bat_priv->tt_local_hash;
517
518 for (i = 0; i < hash->size; i++) {
519 head = &hash->table[i];
520 list_lock = &hash->list_locks[i];
521
522 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100523 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
Antonio Quartullia73105b2011-04-27 14:27:44 +0200524 head, hash_entry) {
525 hlist_del_rcu(node);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100526 tt_local_entry = container_of(tt_common_entry,
527 struct tt_local_entry,
528 common);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200529 tt_local_entry_free_ref(tt_local_entry);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200530 }
531 spin_unlock_bh(list_lock);
532 }
533
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200534 batadv_hash_destroy(hash);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200535
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200536 bat_priv->tt_local_hash = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000537}
538
Antonio Quartullia73105b2011-04-27 14:27:44 +0200539static int tt_global_init(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000540{
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200541 if (bat_priv->tt_global_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200542 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000543
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200544 bat_priv->tt_global_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000545
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200546 if (!bat_priv->tt_global_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200547 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000548
Sven Eckelmann5346c352012-05-05 13:27:28 +0200549 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000550}
551
Antonio Quartullia73105b2011-04-27 14:27:44 +0200552static void tt_changes_list_free(struct bat_priv *bat_priv)
553{
554 struct tt_change_node *entry, *safe;
555
556 spin_lock_bh(&bat_priv->tt_changes_list_lock);
557
558 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
559 list) {
560 list_del(&entry->list);
561 kfree(entry);
562 }
563
564 atomic_set(&bat_priv->tt_local_changes, 0);
565 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
566}
567
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200568/* find out if an orig_node is already in the list of a tt_global_entry.
569 * returns 1 if found, 0 otherwise
570 */
571static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
572 const struct orig_node *orig_node)
573{
574 struct tt_orig_list_entry *tmp_orig_entry;
575 const struct hlist_head *head;
576 struct hlist_node *node;
577 bool found = false;
578
579 rcu_read_lock();
580 head = &entry->orig_list;
581 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
582 if (tmp_orig_entry->orig_node == orig_node) {
583 found = true;
584 break;
585 }
586 }
587 rcu_read_unlock();
588 return found;
589}
590
591static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
592 struct orig_node *orig_node,
593 int ttvn)
594{
595 struct tt_orig_list_entry *orig_entry;
596
597 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
598 if (!orig_entry)
599 return;
600
601 INIT_HLIST_NODE(&orig_entry->list);
602 atomic_inc(&orig_node->refcount);
603 atomic_inc(&orig_node->tt_size);
604 orig_entry->orig_node = orig_node;
605 orig_entry->ttvn = ttvn;
606
607 spin_lock_bh(&tt_global_entry->list_lock);
608 hlist_add_head_rcu(&orig_entry->list,
609 &tt_global_entry->orig_list);
610 spin_unlock_bh(&tt_global_entry->list_lock);
611}
612
Antonio Quartullia73105b2011-04-27 14:27:44 +0200613/* caller must hold orig_node refcount */
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200614int batadv_tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
615 const unsigned char *tt_addr, uint8_t ttvn,
616 bool roaming, bool wifi)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000617{
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200618 struct tt_global_entry *tt_global_entry = NULL;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200619 int ret = 0;
Simon Wunderlich80b3f582011-11-02 20:26:45 +0100620 int hash_added;
Sven Eckelmannc0a55922012-05-12 13:48:55 +0200621 struct tt_common_entry *common;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000622
Antonio Quartullia73105b2011-04-27 14:27:44 +0200623 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000624
Antonio Quartullia73105b2011-04-27 14:27:44 +0200625 if (!tt_global_entry) {
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200626 tt_global_entry = kzalloc(sizeof(*tt_global_entry),
627 GFP_ATOMIC);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200628 if (!tt_global_entry)
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200629 goto out;
630
Sven Eckelmannc0a55922012-05-12 13:48:55 +0200631 common = &tt_global_entry->common;
632 memcpy(common->addr, tt_addr, ETH_ALEN);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200633
Sven Eckelmannc0a55922012-05-12 13:48:55 +0200634 common->flags = NO_FLAGS;
Antonio Quartullicc47f662011-04-27 14:27:57 +0200635 tt_global_entry->roam_at = 0;
Sven Eckelmannc0a55922012-05-12 13:48:55 +0200636 atomic_set(&common->refcount, 2);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200637
638 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
639 spin_lock_init(&tt_global_entry->list_lock);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200640
Sven Eckelmannc0a55922012-05-12 13:48:55 +0200641 hash_added = batadv_hash_add(bat_priv->tt_global_hash,
642 compare_tt, choose_orig,
643 common, &common->hash_entry);
Simon Wunderlich80b3f582011-11-02 20:26:45 +0100644
645 if (unlikely(hash_added != 0)) {
646 /* remove the reference for the hash */
647 tt_global_entry_free_ref(tt_global_entry);
648 goto out_remove;
649 }
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200650
651 tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200652 } else {
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200653 /* there is already a global entry, use this one. */
654
655 /* If there is the TT_CLIENT_ROAM flag set, there is only one
656 * originator left in the list and we previously received a
657 * delete + roaming change for this originator.
658 *
659 * We should first delete the old originator before adding the
660 * new one.
661 */
662 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
663 tt_global_del_orig_list(tt_global_entry);
664 tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
665 tt_global_entry->roam_at = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000666 }
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200667
668 if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
669 tt_global_add_orig_entry(tt_global_entry, orig_node,
670 ttvn);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000671 }
Antonio Quartullia73105b2011-04-27 14:27:44 +0200672
Antonio Quartullibc279082011-07-07 15:35:35 +0200673 if (wifi)
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100674 tt_global_entry->common.flags |= TT_CLIENT_WIFI;
Antonio Quartullibc279082011-07-07 15:35:35 +0200675
Antonio Quartullia73105b2011-04-27 14:27:44 +0200676 bat_dbg(DBG_TT, bat_priv,
677 "Creating new global tt entry: %pM (via %pM)\n",
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100678 tt_global_entry->common.addr, orig_node->orig);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200679
Simon Wunderlich80b3f582011-11-02 20:26:45 +0100680out_remove:
Antonio Quartullia73105b2011-04-27 14:27:44 +0200681 /* remove address from local hash if present */
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200682 batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
683 "global tt received", roaming);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200684 ret = 1;
685out:
686 if (tt_global_entry)
687 tt_global_entry_free_ref(tt_global_entry);
688 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000689}
690
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200691/* print all orig nodes who announce the address for this global entry.
692 * it is assumed that the caller holds rcu_read_lock();
693 */
694static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
695 struct seq_file *seq)
696{
697 struct hlist_head *head;
698 struct hlist_node *node;
699 struct tt_orig_list_entry *orig_entry;
700 struct tt_common_entry *tt_common_entry;
701 uint16_t flags;
702 uint8_t last_ttvn;
703
704 tt_common_entry = &tt_global_entry->common;
705
706 head = &tt_global_entry->orig_list;
707
708 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
709 flags = tt_common_entry->flags;
710 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
711 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
712 tt_global_entry->common.addr, orig_entry->ttvn,
713 orig_entry->orig_node->orig, last_ttvn,
714 (flags & TT_CLIENT_ROAM ? 'R' : '.'),
715 (flags & TT_CLIENT_WIFI ? 'W' : '.'));
716 }
717}
718
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200719int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000720{
721 struct net_device *net_dev = (struct net_device *)seq->private;
722 struct bat_priv *bat_priv = netdev_priv(net_dev);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200723 struct hashtable_t *hash = bat_priv->tt_global_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100724 struct tt_common_entry *tt_common_entry;
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200725 struct tt_global_entry *tt_global_entry;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200726 struct hard_iface *primary_if;
Marek Lindner7aadf882011-02-18 12:28:09 +0000727 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000728 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200729 uint32_t i;
730 int ret = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000731
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200732 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200733 if (!primary_if) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100734 ret = seq_printf(seq,
735 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
Marek Lindner32ae9b22011-04-20 15:40:58 +0200736 net_dev->name);
737 goto out;
738 }
739
740 if (primary_if->if_status != IF_ACTIVE) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100741 ret = seq_printf(seq,
742 "BATMAN mesh %s disabled - primary interface not active\n",
Marek Lindner32ae9b22011-04-20 15:40:58 +0200743 net_dev->name);
744 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000745 }
746
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200747 seq_printf(seq,
748 "Globally announced TT entries received via the mesh %s\n",
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000749 net_dev->name);
Antonio Quartullidf6edb92011-07-07 15:35:38 +0200750 seq_printf(seq, " %-13s %s %-15s %s %s\n",
751 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000752
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000753 for (i = 0; i < hash->size; i++) {
754 head = &hash->table[i];
755
Marek Lindner7aadf882011-02-18 12:28:09 +0000756 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100757 hlist_for_each_entry_rcu(tt_common_entry, node,
Marek Lindner7aadf882011-02-18 12:28:09 +0000758 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100759 tt_global_entry = container_of(tt_common_entry,
760 struct tt_global_entry,
761 common);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200762 tt_global_print_entry(tt_global_entry, seq);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000763 }
Marek Lindner7aadf882011-02-18 12:28:09 +0000764 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000765 }
Marek Lindner32ae9b22011-04-20 15:40:58 +0200766out:
767 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200768 batadv_hardif_free_ref(primary_if);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200769 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000770}
771
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200772/* deletes the orig list of a tt_global_entry */
773static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000774{
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200775 struct hlist_head *head;
776 struct hlist_node *node, *safe;
777 struct tt_orig_list_entry *orig_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200778
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200779 spin_lock_bh(&tt_global_entry->list_lock);
780 head = &tt_global_entry->orig_list;
781 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
782 hlist_del_rcu(node);
783 tt_orig_list_entry_free_ref(orig_entry);
784 }
785 spin_unlock_bh(&tt_global_entry->list_lock);
786
787}
788
789static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
790 struct tt_global_entry *tt_global_entry,
791 struct orig_node *orig_node,
792 const char *message)
793{
794 struct hlist_head *head;
795 struct hlist_node *node, *safe;
796 struct tt_orig_list_entry *orig_entry;
797
798 spin_lock_bh(&tt_global_entry->list_lock);
799 head = &tt_global_entry->orig_list;
800 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
801 if (orig_entry->orig_node == orig_node) {
802 bat_dbg(DBG_TT, bat_priv,
803 "Deleting %pM from global tt entry %pM: %s\n",
804 orig_node->orig, tt_global_entry->common.addr,
805 message);
806 hlist_del_rcu(node);
807 tt_orig_list_entry_free_ref(orig_entry);
808 }
809 }
810 spin_unlock_bh(&tt_global_entry->list_lock);
811}
812
813static void tt_global_del_struct(struct bat_priv *bat_priv,
814 struct tt_global_entry *tt_global_entry,
815 const char *message)
816{
Antonio Quartullia73105b2011-04-27 14:27:44 +0200817 bat_dbg(DBG_TT, bat_priv,
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200818 "Deleting global tt entry %pM: %s\n",
819 tt_global_entry->common.addr, message);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200820
Sven Eckelmannc0a55922012-05-12 13:48:55 +0200821 batadv_hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
822 tt_global_entry->common.addr);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200823 tt_global_entry_free_ref(tt_global_entry);
824
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000825}
826
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200827/* If the client is to be deleted, we check if it is the last origantor entry
828 * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
829 * otherwise we simply remove the originator scheduled for deletion.
830 */
831static void tt_global_del_roaming(struct bat_priv *bat_priv,
832 struct tt_global_entry *tt_global_entry,
833 struct orig_node *orig_node,
834 const char *message)
835{
836 bool last_entry = true;
837 struct hlist_head *head;
838 struct hlist_node *node;
839 struct tt_orig_list_entry *orig_entry;
840
841 /* no local entry exists, case 1:
842 * Check if this is the last one or if other entries exist.
843 */
844
845 rcu_read_lock();
846 head = &tt_global_entry->orig_list;
847 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
848 if (orig_entry->orig_node != orig_node) {
849 last_entry = false;
850 break;
851 }
852 }
853 rcu_read_unlock();
854
855 if (last_entry) {
856 /* its the last one, mark for roaming. */
857 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
858 tt_global_entry->roam_at = jiffies;
859 } else
860 /* there is another entry, we can simply delete this
861 * one and can still use the other one.
862 */
863 tt_global_del_orig_entry(bat_priv, tt_global_entry,
864 orig_node, message);
865}
866
867
868
Sven Eckelmannde7aae62012-02-05 18:55:22 +0100869static void tt_global_del(struct bat_priv *bat_priv,
870 struct orig_node *orig_node,
871 const unsigned char *addr,
872 const char *message, bool roaming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000873{
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200874 struct tt_global_entry *tt_global_entry = NULL;
Antonio Quartulli797399b2011-12-04 22:38:27 +0100875 struct tt_local_entry *tt_local_entry = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000876
Antonio Quartullia73105b2011-04-27 14:27:44 +0200877 tt_global_entry = tt_global_hash_find(bat_priv, addr);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200878 if (!tt_global_entry)
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200879 goto out;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200880
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200881 if (!roaming) {
882 tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
883 message);
Sven Eckelmann92f90f52011-12-22 20:31:12 +0800884
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200885 if (hlist_empty(&tt_global_entry->orig_list))
886 tt_global_del_struct(bat_priv, tt_global_entry,
887 message);
888
Sven Eckelmann92f90f52011-12-22 20:31:12 +0800889 goto out;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200890 }
Sven Eckelmann92f90f52011-12-22 20:31:12 +0800891
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200892 /* if we are deleting a global entry due to a roam
893 * event, there are two possibilities:
894 * 1) the client roamed from node A to node B => if there
895 * is only one originator left for this client, we mark
896 * it with TT_CLIENT_ROAM, we start a timer and we
897 * wait for node B to claim it. In case of timeout
898 * the entry is purged.
899 *
900 * If there are other originators left, we directly delete
901 * the originator.
902 * 2) the client roamed to us => we can directly delete
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200903 * the global entry, since it is useless now.
904 */
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200905 tt_local_entry = tt_local_hash_find(bat_priv,
906 tt_global_entry->common.addr);
907 if (tt_local_entry) {
908 /* local entry exists, case 2: client roamed to us. */
909 tt_global_del_orig_list(tt_global_entry);
910 tt_global_del_struct(bat_priv, tt_global_entry, message);
911 } else
912 /* no local entry exists, case 1: check for roaming */
913 tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
914 message);
915
Sven Eckelmann92f90f52011-12-22 20:31:12 +0800916
Antonio Quartullicc47f662011-04-27 14:27:57 +0200917out:
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200918 if (tt_global_entry)
919 tt_global_entry_free_ref(tt_global_entry);
Antonio Quartulli797399b2011-12-04 22:38:27 +0100920 if (tt_local_entry)
921 tt_local_entry_free_ref(tt_local_entry);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200922}
923
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200924void batadv_tt_global_del_orig(struct bat_priv *bat_priv,
925 struct orig_node *orig_node, const char *message)
Antonio Quartullia73105b2011-04-27 14:27:44 +0200926{
927 struct tt_global_entry *tt_global_entry;
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100928 struct tt_common_entry *tt_common_entry;
Antonio Quartullic90681b2011-10-05 17:05:25 +0200929 uint32_t i;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200930 struct hashtable_t *hash = bat_priv->tt_global_hash;
931 struct hlist_node *node, *safe;
932 struct hlist_head *head;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200933 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartullia73105b2011-04-27 14:27:44 +0200934
Simon Wunderlich6e801492011-10-19 10:28:26 +0200935 if (!hash)
936 return;
937
Antonio Quartullia73105b2011-04-27 14:27:44 +0200938 for (i = 0; i < hash->size; i++) {
939 head = &hash->table[i];
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200940 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000941
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200942 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100943 hlist_for_each_entry_safe(tt_common_entry, node, safe,
Sven Eckelmann7c64fd92012-02-28 10:55:36 +0100944 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100945 tt_global_entry = container_of(tt_common_entry,
946 struct tt_global_entry,
947 common);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200948
949 tt_global_del_orig_entry(bat_priv, tt_global_entry,
950 orig_node, message);
951
952 if (hlist_empty(&tt_global_entry->orig_list)) {
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200953 bat_dbg(DBG_TT, bat_priv,
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200954 "Deleting global tt entry %pM: %s\n",
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100955 tt_global_entry->common.addr,
Antonio Quartulli87944972011-09-19 12:29:19 +0200956 message);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200957 hlist_del_rcu(node);
958 tt_global_entry_free_ref(tt_global_entry);
959 }
Antonio Quartullia73105b2011-04-27 14:27:44 +0200960 }
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200961 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000962 }
Antonio Quartullia73105b2011-04-27 14:27:44 +0200963 atomic_set(&orig_node->tt_size, 0);
Antonio Quartulli17071572011-11-07 16:36:40 +0100964 orig_node->tt_initialised = false;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000965}
966
Antonio Quartullicc47f662011-04-27 14:27:57 +0200967static void tt_global_roam_purge(struct bat_priv *bat_priv)
968{
969 struct hashtable_t *hash = bat_priv->tt_global_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100970 struct tt_common_entry *tt_common_entry;
Antonio Quartullicc47f662011-04-27 14:27:57 +0200971 struct tt_global_entry *tt_global_entry;
972 struct hlist_node *node, *node_tmp;
973 struct hlist_head *head;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200974 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartullic90681b2011-10-05 17:05:25 +0200975 uint32_t i;
Antonio Quartullicc47f662011-04-27 14:27:57 +0200976
Antonio Quartullicc47f662011-04-27 14:27:57 +0200977 for (i = 0; i < hash->size; i++) {
978 head = &hash->table[i];
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200979 list_lock = &hash->list_locks[i];
Antonio Quartullicc47f662011-04-27 14:27:57 +0200980
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200981 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100982 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
Antonio Quartullicc47f662011-04-27 14:27:57 +0200983 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100984 tt_global_entry = container_of(tt_common_entry,
985 struct tt_global_entry,
986 common);
987 if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
Antonio Quartullicc47f662011-04-27 14:27:57 +0200988 continue;
Martin Hundebølla04ccd52011-12-08 13:32:41 +0100989 if (!has_timed_out(tt_global_entry->roam_at,
Marek Lindner032b7962011-12-20 19:30:40 +0800990 TT_CLIENT_ROAM_TIMEOUT))
Antonio Quartullicc47f662011-04-27 14:27:57 +0200991 continue;
992
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100993 bat_dbg(DBG_TT, bat_priv,
994 "Deleting global tt entry (%pM): Roaming timeout\n",
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100995 tt_global_entry->common.addr);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200996
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200997 hlist_del_rcu(node);
998 tt_global_entry_free_ref(tt_global_entry);
Antonio Quartullicc47f662011-04-27 14:27:57 +0200999 }
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001000 spin_unlock_bh(list_lock);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001001 }
1002
Antonio Quartullicc47f662011-04-27 14:27:57 +02001003}
1004
Antonio Quartullia73105b2011-04-27 14:27:44 +02001005static void tt_global_table_free(struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001006{
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001007 struct hashtable_t *hash;
1008 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001009 struct tt_common_entry *tt_common_entry;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001010 struct tt_global_entry *tt_global_entry;
1011 struct hlist_node *node, *node_tmp;
1012 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001013 uint32_t i;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001014
Antonio Quartulli2dafb492011-05-05 08:42:45 +02001015 if (!bat_priv->tt_global_hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001016 return;
1017
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001018 hash = bat_priv->tt_global_hash;
1019
1020 for (i = 0; i < hash->size; i++) {
1021 head = &hash->table[i];
1022 list_lock = &hash->list_locks[i];
1023
1024 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001025 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001026 head, hash_entry) {
1027 hlist_del_rcu(node);
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001028 tt_global_entry = container_of(tt_common_entry,
1029 struct tt_global_entry,
1030 common);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001031 tt_global_entry_free_ref(tt_global_entry);
1032 }
1033 spin_unlock_bh(list_lock);
1034 }
1035
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +02001036 batadv_hash_destroy(hash);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001037
Antonio Quartulli2dafb492011-05-05 08:42:45 +02001038 bat_priv->tt_global_hash = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001039}
1040
Antonio Quartulli59b699c2011-07-07 15:35:36 +02001041static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
1042 struct tt_global_entry *tt_global_entry)
1043{
1044 bool ret = false;
1045
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001046 if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
1047 tt_global_entry->common.flags & TT_CLIENT_WIFI)
Antonio Quartulli59b699c2011-07-07 15:35:36 +02001048 ret = true;
1049
1050 return ret;
1051}
1052
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001053struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv,
1054 const uint8_t *src,
1055 const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001056{
Antonio Quartulli3d393e42011-07-07 15:35:37 +02001057 struct tt_local_entry *tt_local_entry = NULL;
1058 struct tt_global_entry *tt_global_entry = NULL;
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001059 struct orig_node *orig_node = NULL;
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001060 struct neigh_node *router = NULL;
1061 struct hlist_head *head;
1062 struct hlist_node *node;
1063 struct tt_orig_list_entry *orig_entry;
1064 int best_tq;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001065
Antonio Quartulli3d393e42011-07-07 15:35:37 +02001066 if (src && atomic_read(&bat_priv->ap_isolation)) {
1067 tt_local_entry = tt_local_hash_find(bat_priv, src);
1068 if (!tt_local_entry)
1069 goto out;
1070 }
Marek Lindner7aadf882011-02-18 12:28:09 +00001071
Antonio Quartulli3d393e42011-07-07 15:35:37 +02001072 tt_global_entry = tt_global_hash_find(bat_priv, addr);
Antonio Quartulli2dafb492011-05-05 08:42:45 +02001073 if (!tt_global_entry)
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001074 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001075
Antonio Quartulli3d393e42011-07-07 15:35:37 +02001076 /* check whether the clients should not communicate due to AP
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001077 * isolation
1078 */
Antonio Quartulli3d393e42011-07-07 15:35:37 +02001079 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
1080 goto out;
1081
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001082 best_tq = 0;
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001083
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001084 rcu_read_lock();
1085 head = &tt_global_entry->orig_list;
1086 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001087 router = batadv_orig_node_get_router(orig_entry->orig_node);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001088 if (!router)
1089 continue;
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001090
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001091 if (router->tq_avg > best_tq) {
1092 orig_node = orig_entry->orig_node;
1093 best_tq = router->tq_avg;
1094 }
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001095 batadv_neigh_node_free_ref(router);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001096 }
1097 /* found anything? */
1098 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1099 orig_node = NULL;
1100 rcu_read_unlock();
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001101out:
Antonio Quartulli3d393e42011-07-07 15:35:37 +02001102 if (tt_global_entry)
1103 tt_global_entry_free_ref(tt_global_entry);
1104 if (tt_local_entry)
1105 tt_local_entry_free_ref(tt_local_entry);
1106
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001107 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001108}
Antonio Quartullia73105b2011-04-27 14:27:44 +02001109
1110/* Calculates the checksum of the local table of a given orig_node */
Sven Eckelmannde7aae62012-02-05 18:55:22 +01001111static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1112 struct orig_node *orig_node)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001113{
1114 uint16_t total = 0, total_one;
1115 struct hashtable_t *hash = bat_priv->tt_global_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001116 struct tt_common_entry *tt_common_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001117 struct tt_global_entry *tt_global_entry;
1118 struct hlist_node *node;
1119 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001120 uint32_t i;
1121 int j;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001122
1123 for (i = 0; i < hash->size; i++) {
1124 head = &hash->table[i];
1125
1126 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001127 hlist_for_each_entry_rcu(tt_common_entry, node,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001128 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001129 tt_global_entry = container_of(tt_common_entry,
1130 struct tt_global_entry,
1131 common);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001132 /* Roaming clients are in the global table for
1133 * consistency only. They don't have to be
1134 * taken into account while computing the
1135 * global crc
1136 */
1137 if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
1138 continue;
1139
1140 /* find out if this global entry is announced by this
1141 * originator
1142 */
1143 if (!tt_global_entry_has_orig(tt_global_entry,
1144 orig_node))
1145 continue;
1146
1147 total_one = 0;
1148 for (j = 0; j < ETH_ALEN; j++)
1149 total_one = crc16_byte(total_one,
1150 tt_global_entry->common.addr[j]);
1151 total ^= total_one;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001152 }
1153 rcu_read_unlock();
1154 }
1155
1156 return total;
1157}
1158
1159/* Calculates the checksum of the local table */
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +08001160static uint16_t batadv_tt_local_crc(struct bat_priv *bat_priv)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001161{
1162 uint16_t total = 0, total_one;
1163 struct hashtable_t *hash = bat_priv->tt_local_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001164 struct tt_common_entry *tt_common_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001165 struct hlist_node *node;
1166 struct hlist_head *head;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001167 uint32_t i;
1168 int j;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001169
1170 for (i = 0; i < hash->size; i++) {
1171 head = &hash->table[i];
1172
1173 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001174 hlist_for_each_entry_rcu(tt_common_entry, node,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001175 head, hash_entry) {
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001176 /* not yet committed clients have not to be taken into
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001177 * account while computing the CRC
1178 */
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001179 if (tt_common_entry->flags & TT_CLIENT_NEW)
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001180 continue;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001181 total_one = 0;
1182 for (j = 0; j < ETH_ALEN; j++)
1183 total_one = crc16_byte(total_one,
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001184 tt_common_entry->addr[j]);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001185 total ^= total_one;
1186 }
Antonio Quartullia73105b2011-04-27 14:27:44 +02001187 rcu_read_unlock();
1188 }
1189
1190 return total;
1191}
1192
1193static void tt_req_list_free(struct bat_priv *bat_priv)
1194{
1195 struct tt_req_node *node, *safe;
1196
1197 spin_lock_bh(&bat_priv->tt_req_list_lock);
1198
1199 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1200 list_del(&node->list);
1201 kfree(node);
1202 }
1203
1204 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1205}
1206
Sven Eckelmannde7aae62012-02-05 18:55:22 +01001207static void tt_save_orig_buffer(struct bat_priv *bat_priv,
1208 struct orig_node *orig_node,
1209 const unsigned char *tt_buff,
1210 uint8_t tt_num_changes)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001211{
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001212 uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001213
1214 /* Replace the old buffer only if I received something in the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001215 * last OGM (the OGM could carry no changes)
1216 */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001217 spin_lock_bh(&orig_node->tt_buff_lock);
1218 if (tt_buff_len > 0) {
1219 kfree(orig_node->tt_buff);
1220 orig_node->tt_buff_len = 0;
1221 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1222 if (orig_node->tt_buff) {
1223 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1224 orig_node->tt_buff_len = tt_buff_len;
1225 }
1226 }
1227 spin_unlock_bh(&orig_node->tt_buff_lock);
1228}
1229
1230static void tt_req_purge(struct bat_priv *bat_priv)
1231{
1232 struct tt_req_node *node, *safe;
1233
1234 spin_lock_bh(&bat_priv->tt_req_list_lock);
1235 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
Marek Lindner032b7962011-12-20 19:30:40 +08001236 if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
Antonio Quartullia73105b2011-04-27 14:27:44 +02001237 list_del(&node->list);
1238 kfree(node);
1239 }
1240 }
1241 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1242}
1243
1244/* returns the pointer to the new tt_req_node struct if no request
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001245 * has already been issued for this orig_node, NULL otherwise
1246 */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001247static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
1248 struct orig_node *orig_node)
1249{
1250 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1251
1252 spin_lock_bh(&bat_priv->tt_req_list_lock);
1253 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
1254 if (compare_eth(tt_req_node_tmp, orig_node) &&
Martin Hundebølla04ccd52011-12-08 13:32:41 +01001255 !has_timed_out(tt_req_node_tmp->issued_at,
Marek Lindner032b7962011-12-20 19:30:40 +08001256 TT_REQUEST_TIMEOUT))
Antonio Quartullia73105b2011-04-27 14:27:44 +02001257 goto unlock;
1258 }
1259
1260 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1261 if (!tt_req_node)
1262 goto unlock;
1263
1264 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1265 tt_req_node->issued_at = jiffies;
1266
1267 list_add(&tt_req_node->list, &bat_priv->tt_req_list);
1268unlock:
1269 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1270 return tt_req_node;
1271}
1272
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001273/* data_ptr is useless here, but has to be kept to respect the prototype */
1274static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
1275{
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001276 const struct tt_common_entry *tt_common_entry = entry_ptr;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001277
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001278 if (tt_common_entry->flags & TT_CLIENT_NEW)
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001279 return 0;
1280 return 1;
1281}
1282
Antonio Quartullia73105b2011-04-27 14:27:44 +02001283static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
1284{
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001285 const struct tt_common_entry *tt_common_entry = entry_ptr;
1286 const struct tt_global_entry *tt_global_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001287 const struct orig_node *orig_node = data_ptr;
1288
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001289 if (tt_common_entry->flags & TT_CLIENT_ROAM)
Antonio Quartullicc47f662011-04-27 14:27:57 +02001290 return 0;
1291
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001292 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1293 common);
1294
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02001295 return tt_global_entry_has_orig(tt_global_entry, orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001296}
1297
1298static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1299 struct hashtable_t *hash,
1300 struct hard_iface *primary_if,
1301 int (*valid_cb)(const void *,
1302 const void *),
1303 void *cb_data)
1304{
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001305 struct tt_common_entry *tt_common_entry;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001306 struct tt_query_packet *tt_response;
1307 struct tt_change *tt_change;
1308 struct hlist_node *node;
1309 struct hlist_head *head;
1310 struct sk_buff *skb = NULL;
1311 uint16_t tt_tot, tt_count;
1312 ssize_t tt_query_size = sizeof(struct tt_query_packet);
Antonio Quartullic90681b2011-10-05 17:05:25 +02001313 uint32_t i;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001314
1315 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1316 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1317 tt_len -= tt_len % sizeof(struct tt_change);
1318 }
1319 tt_tot = tt_len / sizeof(struct tt_change);
1320
1321 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
1322 if (!skb)
1323 goto out;
1324
1325 skb_reserve(skb, ETH_HLEN);
1326 tt_response = (struct tt_query_packet *)skb_put(skb,
1327 tt_query_size + tt_len);
1328 tt_response->ttvn = ttvn;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001329
1330 tt_change = (struct tt_change *)(skb->data + tt_query_size);
1331 tt_count = 0;
1332
1333 rcu_read_lock();
1334 for (i = 0; i < hash->size; i++) {
1335 head = &hash->table[i];
1336
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001337 hlist_for_each_entry_rcu(tt_common_entry, node,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001338 head, hash_entry) {
1339 if (tt_count == tt_tot)
1340 break;
1341
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001342 if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
Antonio Quartullia73105b2011-04-27 14:27:44 +02001343 continue;
1344
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001345 memcpy(tt_change->addr, tt_common_entry->addr,
1346 ETH_ALEN);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001347 tt_change->flags = NO_FLAGS;
1348
1349 tt_count++;
1350 tt_change++;
1351 }
1352 }
1353 rcu_read_unlock();
1354
Antonio Quartulli9d852392011-10-17 14:25:13 +02001355 /* store in the message the number of entries we have successfully
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001356 * copied
1357 */
Antonio Quartulli9d852392011-10-17 14:25:13 +02001358 tt_response->tt_data = htons(tt_count);
1359
Antonio Quartullia73105b2011-04-27 14:27:44 +02001360out:
1361 return skb;
1362}
1363
Marek Lindnera943cac2011-07-30 13:10:18 +02001364static int send_tt_request(struct bat_priv *bat_priv,
1365 struct orig_node *dst_orig_node,
1366 uint8_t ttvn, uint16_t tt_crc, bool full_table)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001367{
1368 struct sk_buff *skb = NULL;
1369 struct tt_query_packet *tt_request;
1370 struct neigh_node *neigh_node = NULL;
1371 struct hard_iface *primary_if;
1372 struct tt_req_node *tt_req_node = NULL;
1373 int ret = 1;
1374
Sven Eckelmanne5d89252012-05-12 13:48:54 +02001375 primary_if = batadv_primary_if_get_selected(bat_priv);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001376 if (!primary_if)
1377 goto out;
1378
1379 /* The new tt_req will be issued only if I'm not waiting for a
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001380 * reply from the same orig_node yet
1381 */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001382 tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
1383 if (!tt_req_node)
1384 goto out;
1385
1386 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1387 if (!skb)
1388 goto out;
1389
1390 skb_reserve(skb, ETH_HLEN);
1391
1392 tt_request = (struct tt_query_packet *)skb_put(skb,
1393 sizeof(struct tt_query_packet));
1394
Sven Eckelmann76543d12011-11-20 15:47:38 +01001395 tt_request->header.packet_type = BAT_TT_QUERY;
1396 tt_request->header.version = COMPAT_VERSION;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001397 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1398 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
Sven Eckelmann76543d12011-11-20 15:47:38 +01001399 tt_request->header.ttl = TTL;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001400 tt_request->ttvn = ttvn;
Antonio Quartulli6d2003f2012-04-14 13:15:27 +02001401 tt_request->tt_data = htons(tt_crc);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001402 tt_request->flags = TT_REQUEST;
1403
1404 if (full_table)
1405 tt_request->flags |= TT_FULL_TABLE;
1406
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001407 neigh_node = batadv_orig_node_get_router(dst_orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001408 if (!neigh_node)
1409 goto out;
1410
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001411 bat_dbg(DBG_TT, bat_priv,
1412 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1413 dst_orig_node->orig, neigh_node->addr,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001414 (full_table ? 'F' : '.'));
1415
Martin Hundebøllf8214862012-04-20 17:02:45 +02001416 batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_TX);
1417
Sven Eckelmann9455e342012-05-12 02:09:37 +02001418 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001419 ret = 0;
1420
1421out:
1422 if (neigh_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001423 batadv_neigh_node_free_ref(neigh_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001424 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +02001425 batadv_hardif_free_ref(primary_if);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001426 if (ret)
1427 kfree_skb(skb);
1428 if (ret && tt_req_node) {
1429 spin_lock_bh(&bat_priv->tt_req_list_lock);
1430 list_del(&tt_req_node->list);
1431 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1432 kfree(tt_req_node);
1433 }
1434 return ret;
1435}
1436
1437static bool send_other_tt_response(struct bat_priv *bat_priv,
1438 struct tt_query_packet *tt_request)
1439{
1440 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1441 struct neigh_node *neigh_node = NULL;
1442 struct hard_iface *primary_if = NULL;
1443 uint8_t orig_ttvn, req_ttvn, ttvn;
1444 int ret = false;
1445 unsigned char *tt_buff;
1446 bool full_table;
1447 uint16_t tt_len, tt_tot;
1448 struct sk_buff *skb = NULL;
1449 struct tt_query_packet *tt_response;
1450
1451 bat_dbg(DBG_TT, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001452 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1453 tt_request->src, tt_request->ttvn, tt_request->dst,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001454 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1455
1456 /* Let's get the orig node of the REAL destination */
Antonio Quartullieb7e2a12011-10-12 14:54:50 +02001457 req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001458 if (!req_dst_orig_node)
1459 goto out;
1460
Antonio Quartullieb7e2a12011-10-12 14:54:50 +02001461 res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001462 if (!res_dst_orig_node)
1463 goto out;
1464
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001465 neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001466 if (!neigh_node)
1467 goto out;
1468
Sven Eckelmanne5d89252012-05-12 13:48:54 +02001469 primary_if = batadv_primary_if_get_selected(bat_priv);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001470 if (!primary_if)
1471 goto out;
1472
1473 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1474 req_ttvn = tt_request->ttvn;
1475
Antonio Quartulli015758d2011-07-09 17:52:13 +02001476 /* I don't have the requested data */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001477 if (orig_ttvn != req_ttvn ||
Al Virof25bd582012-04-22 07:44:27 +01001478 tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
Antonio Quartullia73105b2011-04-27 14:27:44 +02001479 goto out;
1480
Antonio Quartulli015758d2011-07-09 17:52:13 +02001481 /* If the full table has been explicitly requested */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001482 if (tt_request->flags & TT_FULL_TABLE ||
1483 !req_dst_orig_node->tt_buff)
1484 full_table = true;
1485 else
1486 full_table = false;
1487
1488 /* In this version, fragmentation is not implemented, then
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001489 * I'll send only one packet with as much TT entries as I can
1490 */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001491 if (!full_table) {
1492 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1493 tt_len = req_dst_orig_node->tt_buff_len;
1494 tt_tot = tt_len / sizeof(struct tt_change);
1495
1496 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1497 tt_len + ETH_HLEN);
1498 if (!skb)
1499 goto unlock;
1500
1501 skb_reserve(skb, ETH_HLEN);
1502 tt_response = (struct tt_query_packet *)skb_put(skb,
1503 sizeof(struct tt_query_packet) + tt_len);
1504 tt_response->ttvn = req_ttvn;
1505 tt_response->tt_data = htons(tt_tot);
1506
1507 tt_buff = skb->data + sizeof(struct tt_query_packet);
1508 /* Copy the last orig_node's OGM buffer */
1509 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1510 req_dst_orig_node->tt_buff_len);
1511
1512 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1513 } else {
1514 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1515 sizeof(struct tt_change);
1516 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1517
1518 skb = tt_response_fill_table(tt_len, ttvn,
1519 bat_priv->tt_global_hash,
1520 primary_if, tt_global_valid_entry,
1521 req_dst_orig_node);
1522 if (!skb)
1523 goto out;
1524
1525 tt_response = (struct tt_query_packet *)skb->data;
1526 }
1527
Sven Eckelmann76543d12011-11-20 15:47:38 +01001528 tt_response->header.packet_type = BAT_TT_QUERY;
1529 tt_response->header.version = COMPAT_VERSION;
1530 tt_response->header.ttl = TTL;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001531 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1532 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1533 tt_response->flags = TT_RESPONSE;
1534
1535 if (full_table)
1536 tt_response->flags |= TT_FULL_TABLE;
1537
1538 bat_dbg(DBG_TT, bat_priv,
1539 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1540 res_dst_orig_node->orig, neigh_node->addr,
1541 req_dst_orig_node->orig, req_ttvn);
1542
Martin Hundebøllf8214862012-04-20 17:02:45 +02001543 batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1544
Sven Eckelmann9455e342012-05-12 02:09:37 +02001545 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001546 ret = true;
1547 goto out;
1548
1549unlock:
1550 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1551
1552out:
1553 if (res_dst_orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001554 batadv_orig_node_free_ref(res_dst_orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001555 if (req_dst_orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001556 batadv_orig_node_free_ref(req_dst_orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001557 if (neigh_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001558 batadv_neigh_node_free_ref(neigh_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001559 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +02001560 batadv_hardif_free_ref(primary_if);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001561 if (!ret)
1562 kfree_skb(skb);
1563 return ret;
1564
1565}
1566static bool send_my_tt_response(struct bat_priv *bat_priv,
1567 struct tt_query_packet *tt_request)
1568{
1569 struct orig_node *orig_node = NULL;
1570 struct neigh_node *neigh_node = NULL;
1571 struct hard_iface *primary_if = NULL;
1572 uint8_t my_ttvn, req_ttvn, ttvn;
1573 int ret = false;
1574 unsigned char *tt_buff;
1575 bool full_table;
1576 uint16_t tt_len, tt_tot;
1577 struct sk_buff *skb = NULL;
1578 struct tt_query_packet *tt_response;
1579
1580 bat_dbg(DBG_TT, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001581 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1582 tt_request->src, tt_request->ttvn,
Antonio Quartullia73105b2011-04-27 14:27:44 +02001583 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1584
1585
1586 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1587 req_ttvn = tt_request->ttvn;
1588
Antonio Quartullieb7e2a12011-10-12 14:54:50 +02001589 orig_node = orig_hash_find(bat_priv, tt_request->src);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001590 if (!orig_node)
1591 goto out;
1592
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001593 neigh_node = batadv_orig_node_get_router(orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001594 if (!neigh_node)
1595 goto out;
1596
Sven Eckelmanne5d89252012-05-12 13:48:54 +02001597 primary_if = batadv_primary_if_get_selected(bat_priv);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001598 if (!primary_if)
1599 goto out;
1600
1601 /* If the full table has been explicitly requested or the gap
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001602 * is too big send the whole local translation table
1603 */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001604 if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1605 !bat_priv->tt_buff)
1606 full_table = true;
1607 else
1608 full_table = false;
1609
1610 /* In this version, fragmentation is not implemented, then
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001611 * I'll send only one packet with as much TT entries as I can
1612 */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001613 if (!full_table) {
1614 spin_lock_bh(&bat_priv->tt_buff_lock);
1615 tt_len = bat_priv->tt_buff_len;
1616 tt_tot = tt_len / sizeof(struct tt_change);
1617
1618 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1619 tt_len + ETH_HLEN);
1620 if (!skb)
1621 goto unlock;
1622
1623 skb_reserve(skb, ETH_HLEN);
1624 tt_response = (struct tt_query_packet *)skb_put(skb,
1625 sizeof(struct tt_query_packet) + tt_len);
1626 tt_response->ttvn = req_ttvn;
1627 tt_response->tt_data = htons(tt_tot);
1628
1629 tt_buff = skb->data + sizeof(struct tt_query_packet);
1630 memcpy(tt_buff, bat_priv->tt_buff,
1631 bat_priv->tt_buff_len);
1632 spin_unlock_bh(&bat_priv->tt_buff_lock);
1633 } else {
1634 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1635 sizeof(struct tt_change);
1636 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1637
1638 skb = tt_response_fill_table(tt_len, ttvn,
1639 bat_priv->tt_local_hash,
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001640 primary_if, tt_local_valid_entry,
1641 NULL);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001642 if (!skb)
1643 goto out;
1644
1645 tt_response = (struct tt_query_packet *)skb->data;
1646 }
1647
Sven Eckelmann76543d12011-11-20 15:47:38 +01001648 tt_response->header.packet_type = BAT_TT_QUERY;
1649 tt_response->header.version = COMPAT_VERSION;
1650 tt_response->header.ttl = TTL;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001651 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1652 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1653 tt_response->flags = TT_RESPONSE;
1654
1655 if (full_table)
1656 tt_response->flags |= TT_FULL_TABLE;
1657
1658 bat_dbg(DBG_TT, bat_priv,
1659 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1660 orig_node->orig, neigh_node->addr,
1661 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1662
Martin Hundebøllf8214862012-04-20 17:02:45 +02001663 batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1664
Sven Eckelmann9455e342012-05-12 02:09:37 +02001665 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001666 ret = true;
1667 goto out;
1668
1669unlock:
1670 spin_unlock_bh(&bat_priv->tt_buff_lock);
1671out:
1672 if (orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001673 batadv_orig_node_free_ref(orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001674 if (neigh_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001675 batadv_neigh_node_free_ref(neigh_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001676 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +02001677 batadv_hardif_free_ref(primary_if);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001678 if (!ret)
1679 kfree_skb(skb);
1680 /* This packet was for me, so it doesn't need to be re-routed */
1681 return true;
1682}
1683
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001684bool batadv_send_tt_response(struct bat_priv *bat_priv,
1685 struct tt_query_packet *tt_request)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001686{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001687 if (batadv_is_my_mac(tt_request->dst)) {
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001688 /* don't answer backbone gws! */
Sven Eckelmann08adf152012-05-12 13:38:47 +02001689 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001690 return true;
1691
Antonio Quartullia73105b2011-04-27 14:27:44 +02001692 return send_my_tt_response(bat_priv, tt_request);
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001693 } else {
Antonio Quartullia73105b2011-04-27 14:27:44 +02001694 return send_other_tt_response(bat_priv, tt_request);
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001695 }
Antonio Quartullia73105b2011-04-27 14:27:44 +02001696}
1697
1698static void _tt_update_changes(struct bat_priv *bat_priv,
1699 struct orig_node *orig_node,
1700 struct tt_change *tt_change,
1701 uint16_t tt_num_changes, uint8_t ttvn)
1702{
1703 int i;
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001704 int is_wifi;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001705
1706 for (i = 0; i < tt_num_changes; i++) {
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001707 if ((tt_change + i)->flags & TT_CLIENT_DEL) {
Antonio Quartullia73105b2011-04-27 14:27:44 +02001708 tt_global_del(bat_priv, orig_node,
1709 (tt_change + i)->addr,
Antonio Quartullicc47f662011-04-27 14:27:57 +02001710 "tt removed by changes",
1711 (tt_change + i)->flags & TT_CLIENT_ROAM);
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001712 } else {
1713 is_wifi = (tt_change + i)->flags & TT_CLIENT_WIFI;
1714 if (!batadv_tt_global_add(bat_priv, orig_node,
1715 (tt_change + i)->addr, ttvn,
1716 false, is_wifi))
Antonio Quartullia73105b2011-04-27 14:27:44 +02001717 /* In case of problem while storing a
1718 * global_entry, we stop the updating
1719 * procedure without committing the
1720 * ttvn change. This will avoid to send
1721 * corrupted data on tt_request
1722 */
1723 return;
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001724 }
Antonio Quartullia73105b2011-04-27 14:27:44 +02001725 }
Antonio Quartulli17071572011-11-07 16:36:40 +01001726 orig_node->tt_initialised = true;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001727}
1728
1729static void tt_fill_gtable(struct bat_priv *bat_priv,
1730 struct tt_query_packet *tt_response)
1731{
1732 struct orig_node *orig_node = NULL;
1733
1734 orig_node = orig_hash_find(bat_priv, tt_response->src);
1735 if (!orig_node)
1736 goto out;
1737
1738 /* Purge the old table first.. */
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001739 batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
Antonio Quartullia73105b2011-04-27 14:27:44 +02001740
1741 _tt_update_changes(bat_priv, orig_node,
1742 (struct tt_change *)(tt_response + 1),
Al Virof25bd582012-04-22 07:44:27 +01001743 ntohs(tt_response->tt_data), tt_response->ttvn);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001744
1745 spin_lock_bh(&orig_node->tt_buff_lock);
1746 kfree(orig_node->tt_buff);
1747 orig_node->tt_buff_len = 0;
1748 orig_node->tt_buff = NULL;
1749 spin_unlock_bh(&orig_node->tt_buff_lock);
1750
1751 atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1752
1753out:
1754 if (orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001755 batadv_orig_node_free_ref(orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001756}
1757
Marek Lindnera943cac2011-07-30 13:10:18 +02001758static void tt_update_changes(struct bat_priv *bat_priv,
1759 struct orig_node *orig_node,
1760 uint16_t tt_num_changes, uint8_t ttvn,
1761 struct tt_change *tt_change)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001762{
1763 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1764 ttvn);
1765
1766 tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
1767 tt_num_changes);
1768 atomic_set(&orig_node->last_ttvn, ttvn);
1769}
1770
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001771bool batadv_is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001772{
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001773 struct tt_local_entry *tt_local_entry = NULL;
1774 bool ret = false;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001775
Antonio Quartullia73105b2011-04-27 14:27:44 +02001776 tt_local_entry = tt_local_hash_find(bat_priv, addr);
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001777 if (!tt_local_entry)
1778 goto out;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001779 /* Check if the client has been logically deleted (but is kept for
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001780 * consistency purpose)
1781 */
Antonio Quartulli48100ba2011-10-30 12:17:33 +01001782 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
Antonio Quartulli058d0e22011-07-07 01:40:58 +02001783 goto out;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001784 ret = true;
1785out:
Antonio Quartullia73105b2011-04-27 14:27:44 +02001786 if (tt_local_entry)
Antonio Quartulli7683fdc2011-04-27 14:28:07 +02001787 tt_local_entry_free_ref(tt_local_entry);
1788 return ret;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001789}
1790
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001791void batadv_handle_tt_response(struct bat_priv *bat_priv,
1792 struct tt_query_packet *tt_response)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001793{
1794 struct tt_req_node *node, *safe;
1795 struct orig_node *orig_node = NULL;
1796
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001797 bat_dbg(DBG_TT, bat_priv,
1798 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
Al Virof25bd582012-04-22 07:44:27 +01001799 tt_response->src, tt_response->ttvn,
1800 ntohs(tt_response->tt_data),
Antonio Quartullia73105b2011-04-27 14:27:44 +02001801 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1802
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001803 /* we should have never asked a backbone gw */
Sven Eckelmann08adf152012-05-12 13:38:47 +02001804 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01001805 goto out;
1806
Antonio Quartullia73105b2011-04-27 14:27:44 +02001807 orig_node = orig_hash_find(bat_priv, tt_response->src);
1808 if (!orig_node)
1809 goto out;
1810
1811 if (tt_response->flags & TT_FULL_TABLE)
1812 tt_fill_gtable(bat_priv, tt_response);
1813 else
Al Virof25bd582012-04-22 07:44:27 +01001814 tt_update_changes(bat_priv, orig_node,
1815 ntohs(tt_response->tt_data),
Antonio Quartullia73105b2011-04-27 14:27:44 +02001816 tt_response->ttvn,
1817 (struct tt_change *)(tt_response + 1));
1818
1819 /* Delete the tt_req_node from pending tt_requests list */
1820 spin_lock_bh(&bat_priv->tt_req_list_lock);
1821 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1822 if (!compare_eth(node->addr, tt_response->src))
1823 continue;
1824 list_del(&node->list);
1825 kfree(node);
1826 }
1827 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1828
1829 /* Recalculate the CRC for this orig_node and store it */
Antonio Quartullia73105b2011-04-27 14:27:44 +02001830 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001831 /* Roaming phase is over: tables are in sync again. I can
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001832 * unset the flag
1833 */
Antonio Quartullicc47f662011-04-27 14:27:57 +02001834 orig_node->tt_poss_change = false;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001835out:
1836 if (orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001837 batadv_orig_node_free_ref(orig_node);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001838}
1839
Sven Eckelmann08c36d32012-05-12 02:09:39 +02001840int batadv_tt_init(struct bat_priv *bat_priv)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001841{
Sven Eckelmann5346c352012-05-05 13:27:28 +02001842 int ret;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001843
Sven Eckelmann5346c352012-05-05 13:27:28 +02001844 ret = tt_local_init(bat_priv);
1845 if (ret < 0)
1846 return ret;
1847
1848 ret = tt_global_init(bat_priv);
1849 if (ret < 0)
1850 return ret;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001851
1852 tt_start_timer(bat_priv);
1853
1854 return 1;
1855}
1856
Antonio Quartullicc47f662011-04-27 14:27:57 +02001857static void tt_roam_list_free(struct bat_priv *bat_priv)
Antonio Quartullia73105b2011-04-27 14:27:44 +02001858{
Antonio Quartullicc47f662011-04-27 14:27:57 +02001859 struct tt_roam_node *node, *safe;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001860
Antonio Quartullicc47f662011-04-27 14:27:57 +02001861 spin_lock_bh(&bat_priv->tt_roam_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001862
Antonio Quartullicc47f662011-04-27 14:27:57 +02001863 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1864 list_del(&node->list);
1865 kfree(node);
1866 }
1867
1868 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1869}
1870
1871static void tt_roam_purge(struct bat_priv *bat_priv)
1872{
1873 struct tt_roam_node *node, *safe;
1874
1875 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1876 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
Marek Lindner032b7962011-12-20 19:30:40 +08001877 if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
Antonio Quartullicc47f662011-04-27 14:27:57 +02001878 continue;
1879
1880 list_del(&node->list);
1881 kfree(node);
1882 }
1883 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1884}
1885
1886/* This function checks whether the client already reached the
1887 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1888 * will not be sent.
1889 *
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001890 * returns true if the ROAMING_ADV can be sent, false otherwise
1891 */
Antonio Quartullicc47f662011-04-27 14:27:57 +02001892static bool tt_check_roam_count(struct bat_priv *bat_priv,
1893 uint8_t *client)
1894{
1895 struct tt_roam_node *tt_roam_node;
1896 bool ret = false;
1897
1898 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1899 /* The new tt_req will be issued only if I'm not waiting for a
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001900 * reply from the same orig_node yet
1901 */
Antonio Quartullicc47f662011-04-27 14:27:57 +02001902 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1903 if (!compare_eth(tt_roam_node->addr, client))
1904 continue;
1905
Marek Lindner032b7962011-12-20 19:30:40 +08001906 if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
Antonio Quartullicc47f662011-04-27 14:27:57 +02001907 continue;
1908
1909 if (!atomic_dec_not_zero(&tt_roam_node->counter))
1910 /* Sorry, you roamed too many times! */
1911 goto unlock;
1912 ret = true;
1913 break;
1914 }
1915
1916 if (!ret) {
1917 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1918 if (!tt_roam_node)
1919 goto unlock;
1920
1921 tt_roam_node->first_time = jiffies;
1922 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
1923 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1924
1925 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1926 ret = true;
1927 }
1928
1929unlock:
1930 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1931 return ret;
1932}
1933
Sven Eckelmannde7aae62012-02-05 18:55:22 +01001934static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1935 struct orig_node *orig_node)
Antonio Quartullicc47f662011-04-27 14:27:57 +02001936{
1937 struct neigh_node *neigh_node = NULL;
1938 struct sk_buff *skb = NULL;
1939 struct roam_adv_packet *roam_adv_packet;
1940 int ret = 1;
1941 struct hard_iface *primary_if;
1942
1943 /* before going on we have to check whether the client has
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001944 * already roamed to us too many times
1945 */
Antonio Quartullicc47f662011-04-27 14:27:57 +02001946 if (!tt_check_roam_count(bat_priv, client))
1947 goto out;
1948
1949 skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
1950 if (!skb)
1951 goto out;
1952
1953 skb_reserve(skb, ETH_HLEN);
1954
1955 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1956 sizeof(struct roam_adv_packet));
1957
Sven Eckelmann76543d12011-11-20 15:47:38 +01001958 roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
1959 roam_adv_packet->header.version = COMPAT_VERSION;
1960 roam_adv_packet->header.ttl = TTL;
Sven Eckelmanne5d89252012-05-12 13:48:54 +02001961 primary_if = batadv_primary_if_get_selected(bat_priv);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001962 if (!primary_if)
1963 goto out;
1964 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
Sven Eckelmanne5d89252012-05-12 13:48:54 +02001965 batadv_hardif_free_ref(primary_if);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001966 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1967 memcpy(roam_adv_packet->client, client, ETH_ALEN);
1968
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001969 neigh_node = batadv_orig_node_get_router(orig_node);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001970 if (!neigh_node)
1971 goto out;
1972
1973 bat_dbg(DBG_TT, bat_priv,
1974 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1975 orig_node->orig, client, neigh_node->addr);
1976
Martin Hundebøllf8214862012-04-20 17:02:45 +02001977 batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_TX);
1978
Sven Eckelmann9455e342012-05-12 02:09:37 +02001979 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001980 ret = 0;
1981
1982out:
1983 if (neigh_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001984 batadv_neigh_node_free_ref(neigh_node);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001985 if (ret)
1986 kfree_skb(skb);
1987 return;
Antonio Quartullia73105b2011-04-27 14:27:44 +02001988}
1989
1990static void tt_purge(struct work_struct *work)
1991{
1992 struct delayed_work *delayed_work =
1993 container_of(work, struct delayed_work, work);
1994 struct bat_priv *bat_priv =
1995 container_of(delayed_work, struct bat_priv, tt_work);
1996
1997 tt_local_purge(bat_priv);
Antonio Quartullicc47f662011-04-27 14:27:57 +02001998 tt_global_roam_purge(bat_priv);
Antonio Quartullia73105b2011-04-27 14:27:44 +02001999 tt_req_purge(bat_priv);
Antonio Quartullicc47f662011-04-27 14:27:57 +02002000 tt_roam_purge(bat_priv);
Antonio Quartullia73105b2011-04-27 14:27:44 +02002001
2002 tt_start_timer(bat_priv);
2003}
Antonio Quartullicc47f662011-04-27 14:27:57 +02002004
Sven Eckelmann08c36d32012-05-12 02:09:39 +02002005void batadv_tt_free(struct bat_priv *bat_priv)
Antonio Quartullicc47f662011-04-27 14:27:57 +02002006{
2007 cancel_delayed_work_sync(&bat_priv->tt_work);
2008
2009 tt_local_table_free(bat_priv);
2010 tt_global_table_free(bat_priv);
2011 tt_req_list_free(bat_priv);
2012 tt_changes_list_free(bat_priv);
2013 tt_roam_list_free(bat_priv);
2014
2015 kfree(bat_priv->tt_buff);
2016}
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002017
Antonio Quartulli697f2532011-11-07 16:47:01 +01002018/* This function will enable or disable the specified flags for all the entries
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02002019 * in the given hash table and returns the number of modified entries
2020 */
Antonio Quartulli697f2532011-11-07 16:47:01 +01002021static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
2022 bool enable)
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002023{
Antonio Quartullic90681b2011-10-05 17:05:25 +02002024 uint32_t i;
Antonio Quartulli697f2532011-11-07 16:47:01 +01002025 uint16_t changed_num = 0;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002026 struct hlist_head *head;
2027 struct hlist_node *node;
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002028 struct tt_common_entry *tt_common_entry;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002029
2030 if (!hash)
Antonio Quartulli697f2532011-11-07 16:47:01 +01002031 goto out;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002032
2033 for (i = 0; i < hash->size; i++) {
2034 head = &hash->table[i];
2035
2036 rcu_read_lock();
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002037 hlist_for_each_entry_rcu(tt_common_entry, node,
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002038 head, hash_entry) {
Antonio Quartulli697f2532011-11-07 16:47:01 +01002039 if (enable) {
2040 if ((tt_common_entry->flags & flags) == flags)
2041 continue;
2042 tt_common_entry->flags |= flags;
2043 } else {
2044 if (!(tt_common_entry->flags & flags))
2045 continue;
2046 tt_common_entry->flags &= ~flags;
2047 }
2048 changed_num++;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002049 }
2050 rcu_read_unlock();
2051 }
Antonio Quartulli697f2532011-11-07 16:47:01 +01002052out:
2053 return changed_num;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002054}
2055
2056/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
2057static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
2058{
2059 struct hashtable_t *hash = bat_priv->tt_local_hash;
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002060 struct tt_common_entry *tt_common_entry;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002061 struct tt_local_entry *tt_local_entry;
2062 struct hlist_node *node, *node_tmp;
2063 struct hlist_head *head;
2064 spinlock_t *list_lock; /* protects write access to the hash lists */
Antonio Quartullic90681b2011-10-05 17:05:25 +02002065 uint32_t i;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002066
2067 if (!hash)
2068 return;
2069
2070 for (i = 0; i < hash->size; i++) {
2071 head = &hash->table[i];
2072 list_lock = &hash->list_locks[i];
2073
2074 spin_lock_bh(list_lock);
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002075 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002076 head, hash_entry) {
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002077 if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002078 continue;
2079
Sven Eckelmann86ceb362012-03-07 09:07:45 +01002080 bat_dbg(DBG_TT, bat_priv,
2081 "Deleting local tt entry (%pM): pending\n",
2082 tt_common_entry->addr);
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002083
2084 atomic_dec(&bat_priv->num_local_tt);
2085 hlist_del_rcu(node);
Antonio Quartulli48100ba2011-10-30 12:17:33 +01002086 tt_local_entry = container_of(tt_common_entry,
2087 struct tt_local_entry,
2088 common);
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002089 tt_local_entry_free_ref(tt_local_entry);
2090 }
2091 spin_unlock_bh(list_lock);
2092 }
2093
2094}
2095
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +08002096static int tt_commit_changes(struct bat_priv *bat_priv,
2097 unsigned char **packet_buff, int *packet_buff_len,
2098 int packet_min_len)
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002099{
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +08002100 uint16_t changed_num = 0;
2101
2102 if (atomic_read(&bat_priv->tt_local_changes) < 1)
2103 return -ENOENT;
2104
2105 changed_num = tt_set_flags(bat_priv->tt_local_hash,
2106 TT_CLIENT_NEW, false);
2107
2108 /* all reset entries have to be counted as local entries */
Antonio Quartulli697f2532011-11-07 16:47:01 +01002109 atomic_add(changed_num, &bat_priv->num_local_tt);
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002110 tt_local_purge_pending_clients(bat_priv);
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +08002111 bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002112
2113 /* Increment the TTVN only once per OGM interval */
2114 atomic_inc(&bat_priv->ttvn);
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +02002115 bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
2116 (uint8_t)atomic_read(&bat_priv->ttvn));
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002117 bat_priv->tt_poss_change = false;
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +08002118
2119 /* reset the sending counter */
2120 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
2121
2122 return tt_changes_fill_buff(bat_priv, packet_buff,
2123 packet_buff_len, packet_min_len);
2124}
2125
2126/* when calling this function (hard_iface == primary_if) has to be true */
2127int batadv_tt_append_diff(struct bat_priv *bat_priv,
2128 unsigned char **packet_buff, int *packet_buff_len,
2129 int packet_min_len)
2130{
2131 int tt_num_changes;
2132
2133 /* if at least one change happened */
2134 tt_num_changes = tt_commit_changes(bat_priv, packet_buff,
2135 packet_buff_len, packet_min_len);
2136
2137 /* if the changes have been sent often enough */
2138 if ((tt_num_changes < 0) &&
2139 (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
2140 tt_realloc_packet_buff(packet_buff, packet_buff_len,
2141 packet_min_len, packet_min_len);
2142 tt_num_changes = 0;
2143 }
2144
2145 return tt_num_changes;
Antonio Quartulli058d0e22011-07-07 01:40:58 +02002146}
Antonio Quartulli59b699c2011-07-07 15:35:36 +02002147
Sven Eckelmann08c36d32012-05-12 02:09:39 +02002148bool batadv_is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src,
2149 uint8_t *dst)
Antonio Quartulli59b699c2011-07-07 15:35:36 +02002150{
2151 struct tt_local_entry *tt_local_entry = NULL;
2152 struct tt_global_entry *tt_global_entry = NULL;
2153 bool ret = true;
2154
2155 if (!atomic_read(&bat_priv->ap_isolation))
2156 return false;
2157
2158 tt_local_entry = tt_local_hash_find(bat_priv, dst);
2159 if (!tt_local_entry)
2160 goto out;
2161
2162 tt_global_entry = tt_global_hash_find(bat_priv, src);
2163 if (!tt_global_entry)
2164 goto out;
2165
2166 if (_is_ap_isolated(tt_local_entry, tt_global_entry))
2167 goto out;
2168
2169 ret = false;
2170
2171out:
2172 if (tt_global_entry)
2173 tt_global_entry_free_ref(tt_global_entry);
2174 if (tt_local_entry)
2175 tt_local_entry_free_ref(tt_local_entry);
2176 return ret;
2177}
Marek Lindnera943cac2011-07-30 13:10:18 +02002178
Sven Eckelmann08c36d32012-05-12 02:09:39 +02002179void batadv_tt_update_orig(struct bat_priv *bat_priv,
2180 struct orig_node *orig_node,
2181 const unsigned char *tt_buff, uint8_t tt_num_changes,
2182 uint8_t ttvn, uint16_t tt_crc)
Marek Lindnera943cac2011-07-30 13:10:18 +02002183{
2184 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2185 bool full_table = true;
2186
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01002187 /* don't care about a backbone gateways updates. */
Sven Eckelmann08adf152012-05-12 13:38:47 +02002188 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
Simon Wunderlich20ff9d52012-01-22 20:00:23 +01002189 return;
2190
Antonio Quartulli17071572011-11-07 16:36:40 +01002191 /* orig table not initialised AND first diff is in the OGM OR the ttvn
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02002192 * increased by one -> we can apply the attached changes
2193 */
Antonio Quartulli17071572011-11-07 16:36:40 +01002194 if ((!orig_node->tt_initialised && ttvn == 1) ||
2195 ttvn - orig_ttvn == 1) {
Marek Lindnera943cac2011-07-30 13:10:18 +02002196 /* the OGM could not contain the changes due to their size or
2197 * because they have already been sent TT_OGM_APPEND_MAX times.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02002198 * In this case send a tt request
2199 */
Marek Lindnera943cac2011-07-30 13:10:18 +02002200 if (!tt_num_changes) {
2201 full_table = false;
2202 goto request_table;
2203 }
2204
2205 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
2206 (struct tt_change *)tt_buff);
2207
2208 /* Even if we received the precomputed crc with the OGM, we
2209 * prefer to recompute it to spot any possible inconsistency
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02002210 * in the global table
2211 */
Marek Lindnera943cac2011-07-30 13:10:18 +02002212 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
2213
2214 /* The ttvn alone is not enough to guarantee consistency
2215 * because a single value could represent different states
2216 * (due to the wrap around). Thus a node has to check whether
2217 * the resulting table (after applying the changes) is still
2218 * consistent or not. E.g. a node could disconnect while its
2219 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2220 * checking the CRC value is mandatory to detect the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02002221 * inconsistency
2222 */
Marek Lindnera943cac2011-07-30 13:10:18 +02002223 if (orig_node->tt_crc != tt_crc)
2224 goto request_table;
2225
2226 /* Roaming phase is over: tables are in sync again. I can
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02002227 * unset the flag
2228 */
Marek Lindnera943cac2011-07-30 13:10:18 +02002229 orig_node->tt_poss_change = false;
2230 } else {
2231 /* if we missed more than one change or our tables are not
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02002232 * in sync anymore -> request fresh tt data
2233 */
Antonio Quartulli17071572011-11-07 16:36:40 +01002234 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2235 orig_node->tt_crc != tt_crc) {
Marek Lindnera943cac2011-07-30 13:10:18 +02002236request_table:
Sven Eckelmann86ceb362012-03-07 09:07:45 +01002237 bat_dbg(DBG_TT, bat_priv,
2238 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2239 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2240 orig_node->tt_crc, tt_num_changes);
Marek Lindnera943cac2011-07-30 13:10:18 +02002241 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
2242 full_table);
2243 return;
2244 }
2245 }
2246}
Antonio Quartulli3275e7c2012-03-16 18:03:28 +01002247
2248/* returns true whether we know that the client has moved from its old
2249 * originator to another one. This entry is kept is still kept for consistency
2250 * purposes
2251 */
Sven Eckelmann08c36d32012-05-12 02:09:39 +02002252bool batadv_tt_global_client_is_roaming(struct bat_priv *bat_priv,
2253 uint8_t *addr)
Antonio Quartulli3275e7c2012-03-16 18:03:28 +01002254{
2255 struct tt_global_entry *tt_global_entry;
2256 bool ret = false;
2257
2258 tt_global_entry = tt_global_hash_find(bat_priv, addr);
2259 if (!tt_global_entry)
2260 goto out;
2261
2262 ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
2263 tt_global_entry_free_ref(tt_global_entry);
2264out:
2265 return ret;
2266}