blob: f931830d630ec4fc0f38e8370135ad1be932a53b [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann64afe352011-01-27 10:38:15 +01002 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "translation-table.h"
24#include "soft-interface.h"
Marek Lindner32ae9b22011-04-20 15:40:58 +020025#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000026#include "hash.h"
27#include "originator.h"
28
29static void hna_local_purge(struct work_struct *work);
30static void _hna_global_del_orig(struct bat_priv *bat_priv,
31 struct hna_global_entry *hna_global_entry,
32 char *message);
33
Marek Lindner7aadf882011-02-18 12:28:09 +000034/* returns 1 if they are the same mac addr */
35static int compare_lhna(struct hlist_node *node, void *data2)
36{
37 void *data1 = container_of(node, struct hna_local_entry, hash_entry);
38
39 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
40}
41
42/* returns 1 if they are the same mac addr */
43static int compare_ghna(struct hlist_node *node, void *data2)
44{
45 void *data1 = container_of(node, struct hna_global_entry, hash_entry);
46
47 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
48}
49
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000050static void hna_local_start_timer(struct bat_priv *bat_priv)
51{
52 INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
53 queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
54}
55
Marek Lindner7aadf882011-02-18 12:28:09 +000056static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv,
57 void *data)
58{
59 struct hashtable_t *hash = bat_priv->hna_local_hash;
60 struct hlist_head *head;
61 struct hlist_node *node;
62 struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL;
63 int index;
64
65 if (!hash)
66 return NULL;
67
68 index = choose_orig(data, hash->size);
69 head = &hash->table[index];
70
71 rcu_read_lock();
72 hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) {
73 if (!compare_eth(hna_local_entry, data))
74 continue;
75
76 hna_local_entry_tmp = hna_local_entry;
77 break;
78 }
79 rcu_read_unlock();
80
81 return hna_local_entry_tmp;
82}
83
84static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv,
85 void *data)
86{
87 struct hashtable_t *hash = bat_priv->hna_global_hash;
88 struct hlist_head *head;
89 struct hlist_node *node;
90 struct hna_global_entry *hna_global_entry;
91 struct hna_global_entry *hna_global_entry_tmp = NULL;
92 int index;
93
94 if (!hash)
95 return NULL;
96
97 index = choose_orig(data, hash->size);
98 head = &hash->table[index];
99
100 rcu_read_lock();
101 hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) {
102 if (!compare_eth(hna_global_entry, data))
103 continue;
104
105 hna_global_entry_tmp = hna_global_entry;
106 break;
107 }
108 rcu_read_unlock();
109
110 return hna_global_entry_tmp;
111}
112
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000113int hna_local_init(struct bat_priv *bat_priv)
114{
115 if (bat_priv->hna_local_hash)
116 return 1;
117
118 bat_priv->hna_local_hash = hash_new(1024);
119
120 if (!bat_priv->hna_local_hash)
121 return 0;
122
123 atomic_set(&bat_priv->hna_local_changed, 0);
124 hna_local_start_timer(bat_priv);
125
126 return 1;
127}
128
129void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
130{
131 struct bat_priv *bat_priv = netdev_priv(soft_iface);
132 struct hna_local_entry *hna_local_entry;
133 struct hna_global_entry *hna_global_entry;
134 int required_bytes;
135
136 spin_lock_bh(&bat_priv->hna_lhash_lock);
Marek Lindner7aadf882011-02-18 12:28:09 +0000137 hna_local_entry = hna_local_hash_find(bat_priv, addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000138 spin_unlock_bh(&bat_priv->hna_lhash_lock);
139
140 if (hna_local_entry) {
141 hna_local_entry->last_seen = jiffies;
142 return;
143 }
144
145 /* only announce as many hosts as possible in the batman-packet and
146 space in batman_packet->num_hna That also should give a limit to
147 MAC-flooding. */
148 required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN;
149 required_bytes += BAT_PACKET_LEN;
150
151 if ((required_bytes > ETH_DATA_LEN) ||
152 (atomic_read(&bat_priv->aggregated_ogms) &&
153 required_bytes > MAX_AGGREGATION_BYTES) ||
154 (bat_priv->num_local_hna + 1 > 255)) {
155 bat_dbg(DBG_ROUTES, bat_priv,
156 "Can't add new local hna entry (%pM): "
157 "number of local hna entries exceeds packet size\n",
158 addr);
159 return;
160 }
161
162 bat_dbg(DBG_ROUTES, bat_priv,
163 "Creating new local hna entry: %pM\n", addr);
164
165 hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
166 if (!hna_local_entry)
167 return;
168
169 memcpy(hna_local_entry->addr, addr, ETH_ALEN);
170 hna_local_entry->last_seen = jiffies;
171
172 /* the batman interface mac address should never be purged */
Marek Lindner39901e72011-02-18 12:28:08 +0000173 if (compare_eth(addr, soft_iface->dev_addr))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000174 hna_local_entry->never_purge = 1;
175 else
176 hna_local_entry->never_purge = 0;
177
178 spin_lock_bh(&bat_priv->hna_lhash_lock);
179
Marek Lindner7aadf882011-02-18 12:28:09 +0000180 hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig,
181 hna_local_entry, &hna_local_entry->hash_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000182 bat_priv->num_local_hna++;
183 atomic_set(&bat_priv->hna_local_changed, 1);
184
185 spin_unlock_bh(&bat_priv->hna_lhash_lock);
186
187 /* remove address from global hash if present */
188 spin_lock_bh(&bat_priv->hna_ghash_lock);
189
Marek Lindner7aadf882011-02-18 12:28:09 +0000190 hna_global_entry = hna_global_hash_find(bat_priv, addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000191
192 if (hna_global_entry)
193 _hna_global_del_orig(bat_priv, hna_global_entry,
194 "local hna received");
195
196 spin_unlock_bh(&bat_priv->hna_ghash_lock);
197}
198
199int hna_local_fill_buffer(struct bat_priv *bat_priv,
200 unsigned char *buff, int buff_len)
201{
202 struct hashtable_t *hash = bat_priv->hna_local_hash;
203 struct hna_local_entry *hna_local_entry;
Marek Lindner7aadf882011-02-18 12:28:09 +0000204 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000205 struct hlist_head *head;
Marek Lindner7aadf882011-02-18 12:28:09 +0000206 int i, count = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000207
208 spin_lock_bh(&bat_priv->hna_lhash_lock);
209
210 for (i = 0; i < hash->size; i++) {
211 head = &hash->table[i];
212
Marek Lindner7aadf882011-02-18 12:28:09 +0000213 rcu_read_lock();
214 hlist_for_each_entry_rcu(hna_local_entry, node,
215 head, hash_entry) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000216 if (buff_len < (count + 1) * ETH_ALEN)
217 break;
218
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000219 memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
220 ETH_ALEN);
221
222 count++;
223 }
Marek Lindner7aadf882011-02-18 12:28:09 +0000224 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000225 }
226
227 /* if we did not get all new local hnas see you next time ;-) */
228 if (count == bat_priv->num_local_hna)
229 atomic_set(&bat_priv->hna_local_changed, 0);
230
231 spin_unlock_bh(&bat_priv->hna_lhash_lock);
Sven Eckelmann53320fe2010-12-20 10:32:03 -0800232 return count;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000233}
234
235int hna_local_seq_print_text(struct seq_file *seq, void *offset)
236{
237 struct net_device *net_dev = (struct net_device *)seq->private;
238 struct bat_priv *bat_priv = netdev_priv(net_dev);
239 struct hashtable_t *hash = bat_priv->hna_local_hash;
240 struct hna_local_entry *hna_local_entry;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200241 struct hard_iface *primary_if;
Marek Lindner7aadf882011-02-18 12:28:09 +0000242 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000243 struct hlist_head *head;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000244 size_t buf_size, pos;
245 char *buff;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200246 int i, ret = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000247
Marek Lindner32ae9b22011-04-20 15:40:58 +0200248 primary_if = primary_if_get_selected(bat_priv);
249 if (!primary_if) {
250 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
251 "please specify interfaces to enable it\n",
252 net_dev->name);
253 goto out;
254 }
255
256 if (primary_if->if_status != IF_ACTIVE) {
257 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
258 "primary interface not active\n",
259 net_dev->name);
260 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000261 }
262
263 seq_printf(seq, "Locally retrieved addresses (from %s) "
264 "announced via HNA:\n",
265 net_dev->name);
266
267 spin_lock_bh(&bat_priv->hna_lhash_lock);
268
269 buf_size = 1;
270 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
271 for (i = 0; i < hash->size; i++) {
272 head = &hash->table[i];
273
Marek Lindner7aadf882011-02-18 12:28:09 +0000274 rcu_read_lock();
275 __hlist_for_each_rcu(node, head)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000276 buf_size += 21;
Marek Lindner7aadf882011-02-18 12:28:09 +0000277 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000278 }
279
280 buff = kmalloc(buf_size, GFP_ATOMIC);
281 if (!buff) {
282 spin_unlock_bh(&bat_priv->hna_lhash_lock);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200283 ret = -ENOMEM;
284 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000285 }
Marek Lindner7aadf882011-02-18 12:28:09 +0000286
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000287 buff[0] = '\0';
288 pos = 0;
289
290 for (i = 0; i < hash->size; i++) {
291 head = &hash->table[i];
292
Marek Lindner7aadf882011-02-18 12:28:09 +0000293 rcu_read_lock();
294 hlist_for_each_entry_rcu(hna_local_entry, node,
295 head, hash_entry) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000296 pos += snprintf(buff + pos, 22, " * %pM\n",
297 hna_local_entry->addr);
298 }
Marek Lindner7aadf882011-02-18 12:28:09 +0000299 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000300 }
301
302 spin_unlock_bh(&bat_priv->hna_lhash_lock);
303
304 seq_printf(seq, "%s", buff);
305 kfree(buff);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200306out:
307 if (primary_if)
308 hardif_free_ref(primary_if);
309 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000310}
311
Marek Lindner7aadf882011-02-18 12:28:09 +0000312static void _hna_local_del(struct hlist_node *node, void *arg)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000313{
314 struct bat_priv *bat_priv = (struct bat_priv *)arg;
Marek Lindner7aadf882011-02-18 12:28:09 +0000315 void *data = container_of(node, struct hna_local_entry, hash_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000316
317 kfree(data);
318 bat_priv->num_local_hna--;
319 atomic_set(&bat_priv->hna_local_changed, 1);
320}
321
322static void hna_local_del(struct bat_priv *bat_priv,
323 struct hna_local_entry *hna_local_entry,
324 char *message)
325{
326 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
327 hna_local_entry->addr, message);
328
Marek Lindner7aadf882011-02-18 12:28:09 +0000329 hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000330 hna_local_entry->addr);
Marek Lindner7aadf882011-02-18 12:28:09 +0000331 _hna_local_del(&hna_local_entry->hash_entry, bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000332}
333
334void hna_local_remove(struct bat_priv *bat_priv,
335 uint8_t *addr, char *message)
336{
337 struct hna_local_entry *hna_local_entry;
338
339 spin_lock_bh(&bat_priv->hna_lhash_lock);
340
Marek Lindner7aadf882011-02-18 12:28:09 +0000341 hna_local_entry = hna_local_hash_find(bat_priv, addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000342
343 if (hna_local_entry)
344 hna_local_del(bat_priv, hna_local_entry, message);
345
346 spin_unlock_bh(&bat_priv->hna_lhash_lock);
347}
348
349static void hna_local_purge(struct work_struct *work)
350{
351 struct delayed_work *delayed_work =
352 container_of(work, struct delayed_work, work);
353 struct bat_priv *bat_priv =
354 container_of(delayed_work, struct bat_priv, hna_work);
355 struct hashtable_t *hash = bat_priv->hna_local_hash;
356 struct hna_local_entry *hna_local_entry;
Marek Lindner7aadf882011-02-18 12:28:09 +0000357 struct hlist_node *node, *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000358 struct hlist_head *head;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000359 unsigned long timeout;
Marek Lindner7aadf882011-02-18 12:28:09 +0000360 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000361
362 spin_lock_bh(&bat_priv->hna_lhash_lock);
363
364 for (i = 0; i < hash->size; i++) {
365 head = &hash->table[i];
366
Marek Lindner7aadf882011-02-18 12:28:09 +0000367 hlist_for_each_entry_safe(hna_local_entry, node, node_tmp,
368 head, hash_entry) {
369 if (hna_local_entry->never_purge)
370 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000371
372 timeout = hna_local_entry->last_seen;
373 timeout += LOCAL_HNA_TIMEOUT * HZ;
374
Marek Lindner7aadf882011-02-18 12:28:09 +0000375 if (time_before(jiffies, timeout))
376 continue;
377
378 hna_local_del(bat_priv, hna_local_entry,
379 "address timed out");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000380 }
381 }
382
383 spin_unlock_bh(&bat_priv->hna_lhash_lock);
384 hna_local_start_timer(bat_priv);
385}
386
387void hna_local_free(struct bat_priv *bat_priv)
388{
389 if (!bat_priv->hna_local_hash)
390 return;
391
392 cancel_delayed_work_sync(&bat_priv->hna_work);
393 hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
394 bat_priv->hna_local_hash = NULL;
395}
396
397int hna_global_init(struct bat_priv *bat_priv)
398{
399 if (bat_priv->hna_global_hash)
400 return 1;
401
402 bat_priv->hna_global_hash = hash_new(1024);
403
404 if (!bat_priv->hna_global_hash)
405 return 0;
406
407 return 1;
408}
409
410void hna_global_add_orig(struct bat_priv *bat_priv,
411 struct orig_node *orig_node,
412 unsigned char *hna_buff, int hna_buff_len)
413{
414 struct hna_global_entry *hna_global_entry;
415 struct hna_local_entry *hna_local_entry;
416 int hna_buff_count = 0;
417 unsigned char *hna_ptr;
418
419 while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
420 spin_lock_bh(&bat_priv->hna_ghash_lock);
421
422 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
Marek Lindner7aadf882011-02-18 12:28:09 +0000423 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000424
425 if (!hna_global_entry) {
426 spin_unlock_bh(&bat_priv->hna_ghash_lock);
427
428 hna_global_entry =
429 kmalloc(sizeof(struct hna_global_entry),
430 GFP_ATOMIC);
431
432 if (!hna_global_entry)
433 break;
434
435 memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
436
437 bat_dbg(DBG_ROUTES, bat_priv,
438 "Creating new global hna entry: "
439 "%pM (via %pM)\n",
440 hna_global_entry->addr, orig_node->orig);
441
442 spin_lock_bh(&bat_priv->hna_ghash_lock);
Marek Lindner7aadf882011-02-18 12:28:09 +0000443 hash_add(bat_priv->hna_global_hash, compare_ghna,
444 choose_orig, hna_global_entry,
445 &hna_global_entry->hash_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000446
447 }
448
449 hna_global_entry->orig_node = orig_node;
450 spin_unlock_bh(&bat_priv->hna_ghash_lock);
451
452 /* remove address from local hash if present */
453 spin_lock_bh(&bat_priv->hna_lhash_lock);
454
455 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
Marek Lindner7aadf882011-02-18 12:28:09 +0000456 hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000457
458 if (hna_local_entry)
459 hna_local_del(bat_priv, hna_local_entry,
460 "global hna received");
461
462 spin_unlock_bh(&bat_priv->hna_lhash_lock);
463
464 hna_buff_count++;
465 }
466
467 /* initialize, and overwrite if malloc succeeds */
468 orig_node->hna_buff = NULL;
469 orig_node->hna_buff_len = 0;
470
471 if (hna_buff_len > 0) {
472 orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
473 if (orig_node->hna_buff) {
474 memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
475 orig_node->hna_buff_len = hna_buff_len;
476 }
477 }
478}
479
480int hna_global_seq_print_text(struct seq_file *seq, void *offset)
481{
482 struct net_device *net_dev = (struct net_device *)seq->private;
483 struct bat_priv *bat_priv = netdev_priv(net_dev);
484 struct hashtable_t *hash = bat_priv->hna_global_hash;
485 struct hna_global_entry *hna_global_entry;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200486 struct hard_iface *primary_if;
Marek Lindner7aadf882011-02-18 12:28:09 +0000487 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000488 struct hlist_head *head;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000489 size_t buf_size, pos;
490 char *buff;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200491 int i, ret = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000492
Marek Lindner32ae9b22011-04-20 15:40:58 +0200493 primary_if = primary_if_get_selected(bat_priv);
494 if (!primary_if) {
495 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
496 "specify interfaces to enable it\n",
497 net_dev->name);
498 goto out;
499 }
500
501 if (primary_if->if_status != IF_ACTIVE) {
502 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
503 "primary interface not active\n",
504 net_dev->name);
505 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000506 }
507
508 seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
509 net_dev->name);
510
511 spin_lock_bh(&bat_priv->hna_ghash_lock);
512
513 buf_size = 1;
514 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
515 for (i = 0; i < hash->size; i++) {
516 head = &hash->table[i];
517
Marek Lindner7aadf882011-02-18 12:28:09 +0000518 rcu_read_lock();
519 __hlist_for_each_rcu(node, head)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000520 buf_size += 43;
Marek Lindner7aadf882011-02-18 12:28:09 +0000521 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000522 }
523
524 buff = kmalloc(buf_size, GFP_ATOMIC);
525 if (!buff) {
526 spin_unlock_bh(&bat_priv->hna_ghash_lock);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200527 ret = -ENOMEM;
528 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000529 }
530 buff[0] = '\0';
531 pos = 0;
532
533 for (i = 0; i < hash->size; i++) {
534 head = &hash->table[i];
535
Marek Lindner7aadf882011-02-18 12:28:09 +0000536 rcu_read_lock();
537 hlist_for_each_entry_rcu(hna_global_entry, node,
538 head, hash_entry) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000539 pos += snprintf(buff + pos, 44,
540 " * %pM via %pM\n",
541 hna_global_entry->addr,
542 hna_global_entry->orig_node->orig);
543 }
Marek Lindner7aadf882011-02-18 12:28:09 +0000544 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000545 }
546
547 spin_unlock_bh(&bat_priv->hna_ghash_lock);
548
549 seq_printf(seq, "%s", buff);
550 kfree(buff);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200551out:
552 if (primary_if)
553 hardif_free_ref(primary_if);
554 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000555}
556
557static void _hna_global_del_orig(struct bat_priv *bat_priv,
558 struct hna_global_entry *hna_global_entry,
559 char *message)
560{
561 bat_dbg(DBG_ROUTES, bat_priv,
562 "Deleting global hna entry %pM (via %pM): %s\n",
563 hna_global_entry->addr, hna_global_entry->orig_node->orig,
564 message);
565
Marek Lindner7aadf882011-02-18 12:28:09 +0000566 hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000567 hna_global_entry->addr);
568 kfree(hna_global_entry);
569}
570
571void hna_global_del_orig(struct bat_priv *bat_priv,
572 struct orig_node *orig_node, char *message)
573{
574 struct hna_global_entry *hna_global_entry;
575 int hna_buff_count = 0;
576 unsigned char *hna_ptr;
577
578 if (orig_node->hna_buff_len == 0)
579 return;
580
581 spin_lock_bh(&bat_priv->hna_ghash_lock);
582
583 while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
584 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
Marek Lindner7aadf882011-02-18 12:28:09 +0000585 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000586
587 if ((hna_global_entry) &&
588 (hna_global_entry->orig_node == orig_node))
589 _hna_global_del_orig(bat_priv, hna_global_entry,
590 message);
591
592 hna_buff_count++;
593 }
594
595 spin_unlock_bh(&bat_priv->hna_ghash_lock);
596
597 orig_node->hna_buff_len = 0;
598 kfree(orig_node->hna_buff);
599 orig_node->hna_buff = NULL;
600}
601
Marek Lindner7aadf882011-02-18 12:28:09 +0000602static void hna_global_del(struct hlist_node *node, void *arg)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000603{
Marek Lindner7aadf882011-02-18 12:28:09 +0000604 void *data = container_of(node, struct hna_global_entry, hash_entry);
605
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000606 kfree(data);
607}
608
609void hna_global_free(struct bat_priv *bat_priv)
610{
611 if (!bat_priv->hna_global_hash)
612 return;
613
614 hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
615 bat_priv->hna_global_hash = NULL;
616}
617
618struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
619{
620 struct hna_global_entry *hna_global_entry;
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000621 struct orig_node *orig_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000622
623 spin_lock_bh(&bat_priv->hna_ghash_lock);
Marek Lindner7aadf882011-02-18 12:28:09 +0000624 hna_global_entry = hna_global_hash_find(bat_priv, addr);
625
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000626 if (!hna_global_entry)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000627 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000628
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000629 if (!atomic_inc_not_zero(&hna_global_entry->orig_node->refcount))
630 goto out;
631
632 orig_node = hna_global_entry->orig_node;
633
634out:
635 spin_unlock_bh(&bat_priv->hna_ghash_lock);
636 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000637}