Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 1 | /* |
Sven Eckelmann | 64afe35 | 2011-01-27 10:38:15 +0100 | [diff] [blame] | 2 | * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 3 | * |
| 4 | * Marek Lindner, Simon Wunderlich |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of version 2 of the GNU General Public |
| 8 | * License as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
| 18 | * 02110-1301, USA |
| 19 | * |
| 20 | */ |
| 21 | |
| 22 | #include "main.h" |
| 23 | #include "translation-table.h" |
| 24 | #include "soft-interface.h" |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 25 | #include "hard-interface.h" |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 26 | #include "send.h" |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 27 | #include "hash.h" |
| 28 | #include "originator.h" |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 29 | #include "routing.h" |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 30 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 31 | #include <linux/crc16.h> |
| 32 | |
| 33 | static void _tt_global_del(struct bat_priv *bat_priv, |
| 34 | struct tt_global_entry *tt_global_entry, |
| 35 | const char *message); |
| 36 | static void tt_purge(struct work_struct *work); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 37 | |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 38 | /* returns 1 if they are the same mac addr */ |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 39 | static int compare_ltt(const struct hlist_node *node, const void *data2) |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 40 | { |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 41 | const void *data1 = container_of(node, struct tt_local_entry, |
| 42 | hash_entry); |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 43 | |
| 44 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); |
| 45 | } |
| 46 | |
| 47 | /* returns 1 if they are the same mac addr */ |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 48 | static int compare_gtt(const struct hlist_node *node, const void *data2) |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 49 | { |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 50 | const void *data1 = container_of(node, struct tt_global_entry, |
| 51 | hash_entry); |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 52 | |
| 53 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); |
| 54 | } |
| 55 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 56 | static void tt_start_timer(struct bat_priv *bat_priv) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 57 | { |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 58 | INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge); |
| 59 | queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, |
| 60 | msecs_to_jiffies(5000)); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 61 | } |
| 62 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 63 | static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 64 | const void *data) |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 65 | { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 66 | struct hashtable_t *hash = bat_priv->tt_local_hash; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 67 | struct hlist_head *head; |
| 68 | struct hlist_node *node; |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 69 | struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 70 | int index; |
| 71 | |
| 72 | if (!hash) |
| 73 | return NULL; |
| 74 | |
| 75 | index = choose_orig(data, hash->size); |
| 76 | head = &hash->table[index]; |
| 77 | |
| 78 | rcu_read_lock(); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 79 | hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { |
| 80 | if (!compare_eth(tt_local_entry, data)) |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 81 | continue; |
| 82 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 83 | tt_local_entry_tmp = tt_local_entry; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 84 | break; |
| 85 | } |
| 86 | rcu_read_unlock(); |
| 87 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 88 | return tt_local_entry_tmp; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 89 | } |
| 90 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 91 | static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 92 | const void *data) |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 93 | { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 94 | struct hashtable_t *hash = bat_priv->tt_global_hash; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 95 | struct hlist_head *head; |
| 96 | struct hlist_node *node; |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 97 | struct tt_global_entry *tt_global_entry; |
| 98 | struct tt_global_entry *tt_global_entry_tmp = NULL; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 99 | int index; |
| 100 | |
| 101 | if (!hash) |
| 102 | return NULL; |
| 103 | |
| 104 | index = choose_orig(data, hash->size); |
| 105 | head = &hash->table[index]; |
| 106 | |
| 107 | rcu_read_lock(); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 108 | hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) { |
| 109 | if (!compare_eth(tt_global_entry, data)) |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 110 | continue; |
| 111 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 112 | tt_global_entry_tmp = tt_global_entry; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 113 | break; |
| 114 | } |
| 115 | rcu_read_unlock(); |
| 116 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 117 | return tt_global_entry_tmp; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 118 | } |
| 119 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 120 | static bool is_out_of_time(unsigned long starting_time, unsigned long timeout) |
| 121 | { |
| 122 | unsigned long deadline; |
| 123 | deadline = starting_time + msecs_to_jiffies(timeout); |
| 124 | |
| 125 | return time_after(jiffies, deadline); |
| 126 | } |
| 127 | |
| 128 | static void tt_local_event(struct bat_priv *bat_priv, uint8_t op, |
| 129 | const uint8_t *addr) |
| 130 | { |
| 131 | struct tt_change_node *tt_change_node; |
| 132 | |
| 133 | tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC); |
| 134 | |
| 135 | if (!tt_change_node) |
| 136 | return; |
| 137 | |
| 138 | tt_change_node->change.flags = op; |
| 139 | memcpy(tt_change_node->change.addr, addr, ETH_ALEN); |
| 140 | |
| 141 | spin_lock_bh(&bat_priv->tt_changes_list_lock); |
| 142 | /* track the change in the OGMinterval list */ |
| 143 | list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list); |
| 144 | atomic_inc(&bat_priv->tt_local_changes); |
| 145 | spin_unlock_bh(&bat_priv->tt_changes_list_lock); |
| 146 | |
| 147 | atomic_set(&bat_priv->tt_ogm_append_cnt, 0); |
| 148 | } |
| 149 | |
| 150 | int tt_len(int changes_num) |
| 151 | { |
| 152 | return changes_num * sizeof(struct tt_change); |
| 153 | } |
| 154 | |
| 155 | static int tt_local_init(struct bat_priv *bat_priv) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 156 | { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 157 | if (bat_priv->tt_local_hash) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 158 | return 1; |
| 159 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 160 | bat_priv->tt_local_hash = hash_new(1024); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 161 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 162 | if (!bat_priv->tt_local_hash) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 163 | return 0; |
| 164 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 165 | return 1; |
| 166 | } |
| 167 | |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 168 | void tt_local_add(struct net_device *soft_iface, const uint8_t *addr) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 169 | { |
| 170 | struct bat_priv *bat_priv = netdev_priv(soft_iface); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 171 | struct tt_local_entry *tt_local_entry; |
| 172 | struct tt_global_entry *tt_global_entry; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 173 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 174 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
| 175 | tt_local_entry = tt_local_hash_find(bat_priv, addr); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 176 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 177 | if (tt_local_entry) { |
| 178 | tt_local_entry->last_seen = jiffies; |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 179 | goto unlock; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 180 | } |
| 181 | |
Sven Eckelmann | 704509b | 2011-05-14 23:14:54 +0200 | [diff] [blame] | 182 | tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 183 | if (!tt_local_entry) |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 184 | goto unlock; |
| 185 | |
| 186 | tt_local_event(bat_priv, NO_FLAGS, addr); |
| 187 | |
| 188 | bat_dbg(DBG_TT, bat_priv, |
| 189 | "Creating new local tt entry: %pM (ttvn: %d)\n", addr, |
| 190 | (uint8_t)atomic_read(&bat_priv->ttvn)); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 191 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 192 | memcpy(tt_local_entry->addr, addr, ETH_ALEN); |
| 193 | tt_local_entry->last_seen = jiffies; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 194 | |
| 195 | /* the batman interface mac address should never be purged */ |
Marek Lindner | 39901e7 | 2011-02-18 12:28:08 +0000 | [diff] [blame] | 196 | if (compare_eth(addr, soft_iface->dev_addr)) |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 197 | tt_local_entry->never_purge = 1; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 198 | else |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 199 | tt_local_entry->never_purge = 0; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 200 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 201 | hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig, |
| 202 | tt_local_entry, &tt_local_entry->hash_entry); |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 203 | atomic_inc(&bat_priv->num_local_tt); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 204 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 205 | |
| 206 | /* remove address from global hash if present */ |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 207 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 208 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 209 | tt_global_entry = tt_global_hash_find(bat_priv, addr); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 210 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 211 | if (tt_global_entry) |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 212 | _tt_global_del(bat_priv, tt_global_entry, |
| 213 | "local tt received"); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 214 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 215 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 216 | return; |
| 217 | unlock: |
| 218 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 219 | } |
| 220 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 221 | int tt_changes_fill_buffer(struct bat_priv *bat_priv, |
| 222 | unsigned char *buff, int buff_len) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 223 | { |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 224 | int count = 0, tot_changes = 0; |
| 225 | struct tt_change_node *entry, *safe; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 226 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 227 | if (buff_len > 0) |
| 228 | tot_changes = buff_len / tt_len(1); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 229 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 230 | spin_lock_bh(&bat_priv->tt_changes_list_lock); |
| 231 | atomic_set(&bat_priv->tt_local_changes, 0); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 232 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 233 | list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, |
| 234 | list) { |
| 235 | if (count < tot_changes) { |
| 236 | memcpy(buff + tt_len(count), |
| 237 | &entry->change, sizeof(struct tt_change)); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 238 | count++; |
| 239 | } |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 240 | list_del(&entry->list); |
| 241 | kfree(entry); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 242 | } |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 243 | spin_unlock_bh(&bat_priv->tt_changes_list_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 244 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 245 | /* Keep the buffer for possible tt_request */ |
| 246 | spin_lock_bh(&bat_priv->tt_buff_lock); |
| 247 | kfree(bat_priv->tt_buff); |
| 248 | bat_priv->tt_buff_len = 0; |
| 249 | bat_priv->tt_buff = NULL; |
| 250 | /* We check whether this new OGM has no changes due to size |
| 251 | * problems */ |
| 252 | if (buff_len > 0) { |
| 253 | /** |
| 254 | * if kmalloc() fails we will reply with the full table |
| 255 | * instead of providing the diff |
| 256 | */ |
| 257 | bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC); |
| 258 | if (bat_priv->tt_buff) { |
| 259 | memcpy(bat_priv->tt_buff, buff, buff_len); |
| 260 | bat_priv->tt_buff_len = buff_len; |
| 261 | } |
| 262 | } |
| 263 | spin_unlock_bh(&bat_priv->tt_buff_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 264 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 265 | return tot_changes; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 266 | } |
| 267 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 268 | int tt_local_seq_print_text(struct seq_file *seq, void *offset) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 269 | { |
| 270 | struct net_device *net_dev = (struct net_device *)seq->private; |
| 271 | struct bat_priv *bat_priv = netdev_priv(net_dev); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 272 | struct hashtable_t *hash = bat_priv->tt_local_hash; |
| 273 | struct tt_local_entry *tt_local_entry; |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 274 | struct hard_iface *primary_if; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 275 | struct hlist_node *node; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 276 | struct hlist_head *head; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 277 | size_t buf_size, pos; |
| 278 | char *buff; |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 279 | int i, ret = 0; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 280 | |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 281 | primary_if = primary_if_get_selected(bat_priv); |
| 282 | if (!primary_if) { |
| 283 | ret = seq_printf(seq, "BATMAN mesh %s disabled - " |
| 284 | "please specify interfaces to enable it\n", |
| 285 | net_dev->name); |
| 286 | goto out; |
| 287 | } |
| 288 | |
| 289 | if (primary_if->if_status != IF_ACTIVE) { |
| 290 | ret = seq_printf(seq, "BATMAN mesh %s disabled - " |
| 291 | "primary interface not active\n", |
| 292 | net_dev->name); |
| 293 | goto out; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 294 | } |
| 295 | |
| 296 | seq_printf(seq, "Locally retrieved addresses (from %s) " |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 297 | "announced via TT (TTVN: %u):\n", |
| 298 | net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 299 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 300 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 301 | |
| 302 | buf_size = 1; |
| 303 | /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ |
| 304 | for (i = 0; i < hash->size; i++) { |
| 305 | head = &hash->table[i]; |
| 306 | |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 307 | rcu_read_lock(); |
| 308 | __hlist_for_each_rcu(node, head) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 309 | buf_size += 21; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 310 | rcu_read_unlock(); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 311 | } |
| 312 | |
| 313 | buff = kmalloc(buf_size, GFP_ATOMIC); |
| 314 | if (!buff) { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 315 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 316 | ret = -ENOMEM; |
| 317 | goto out; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 318 | } |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 319 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 320 | buff[0] = '\0'; |
| 321 | pos = 0; |
| 322 | |
| 323 | for (i = 0; i < hash->size; i++) { |
| 324 | head = &hash->table[i]; |
| 325 | |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 326 | rcu_read_lock(); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 327 | hlist_for_each_entry_rcu(tt_local_entry, node, |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 328 | head, hash_entry) { |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 329 | pos += snprintf(buff + pos, 22, " * %pM\n", |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 330 | tt_local_entry->addr); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 331 | } |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 332 | rcu_read_unlock(); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 333 | } |
| 334 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 335 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 336 | |
| 337 | seq_printf(seq, "%s", buff); |
| 338 | kfree(buff); |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 339 | out: |
| 340 | if (primary_if) |
| 341 | hardif_free_ref(primary_if); |
| 342 | return ret; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 343 | } |
| 344 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 345 | static void tt_local_entry_free(struct hlist_node *node, void *arg) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 346 | { |
Sven Eckelmann | 5f718c2 | 2011-05-14 23:14:52 +0200 | [diff] [blame] | 347 | struct bat_priv *bat_priv = arg; |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 348 | void *data = container_of(node, struct tt_local_entry, hash_entry); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 349 | |
| 350 | kfree(data); |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 351 | atomic_dec(&bat_priv->num_local_tt); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 352 | } |
| 353 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 354 | static void tt_local_del(struct bat_priv *bat_priv, |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 355 | struct tt_local_entry *tt_local_entry, |
| 356 | const char *message) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 357 | { |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 358 | bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry (%pM): %s\n", |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 359 | tt_local_entry->addr, message); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 360 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 361 | atomic_dec(&bat_priv->num_local_tt); |
| 362 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 363 | hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig, |
| 364 | tt_local_entry->addr); |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 365 | |
| 366 | tt_local_entry_free(&tt_local_entry->hash_entry, bat_priv); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 367 | } |
| 368 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 369 | void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, |
| 370 | const char *message) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 371 | { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 372 | struct tt_local_entry *tt_local_entry; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 373 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 374 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 375 | tt_local_entry = tt_local_hash_find(bat_priv, addr); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 376 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 377 | if (tt_local_entry) { |
| 378 | tt_local_event(bat_priv, TT_CHANGE_DEL, tt_local_entry->addr); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 379 | tt_local_del(bat_priv, tt_local_entry, message); |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 380 | } |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 381 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 382 | } |
| 383 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 384 | static void tt_local_purge(struct bat_priv *bat_priv) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 385 | { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 386 | struct hashtable_t *hash = bat_priv->tt_local_hash; |
| 387 | struct tt_local_entry *tt_local_entry; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 388 | struct hlist_node *node, *node_tmp; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 389 | struct hlist_head *head; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 390 | int i; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 391 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 392 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 393 | |
| 394 | for (i = 0; i < hash->size; i++) { |
| 395 | head = &hash->table[i]; |
| 396 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 397 | hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 398 | head, hash_entry) { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 399 | if (tt_local_entry->never_purge) |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 400 | continue; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 401 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 402 | if (!is_out_of_time(tt_local_entry->last_seen, |
| 403 | TT_LOCAL_TIMEOUT * 1000)) |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 404 | continue; |
| 405 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 406 | tt_local_event(bat_priv, TT_CHANGE_DEL, |
| 407 | tt_local_entry->addr); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 408 | tt_local_del(bat_priv, tt_local_entry, |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 409 | "address timed out"); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 410 | } |
| 411 | } |
| 412 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 413 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 414 | } |
| 415 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 416 | static void tt_local_table_free(struct bat_priv *bat_priv) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 417 | { |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 418 | struct hashtable_t *hash; |
| 419 | int i; |
| 420 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
| 421 | struct hlist_head *head; |
| 422 | struct hlist_node *node, *node_tmp; |
| 423 | struct tt_local_entry *tt_local_entry; |
| 424 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 425 | if (!bat_priv->tt_local_hash) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 426 | return; |
| 427 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 428 | hash = bat_priv->tt_local_hash; |
| 429 | |
| 430 | for (i = 0; i < hash->size; i++) { |
| 431 | head = &hash->table[i]; |
| 432 | list_lock = &hash->list_locks[i]; |
| 433 | |
| 434 | spin_lock_bh(list_lock); |
| 435 | hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, |
| 436 | head, hash_entry) { |
| 437 | hlist_del_rcu(node); |
| 438 | kfree(tt_local_entry); |
| 439 | } |
| 440 | spin_unlock_bh(list_lock); |
| 441 | } |
| 442 | |
| 443 | hash_destroy(hash); |
| 444 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 445 | bat_priv->tt_local_hash = NULL; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 446 | } |
| 447 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 448 | static int tt_global_init(struct bat_priv *bat_priv) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 449 | { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 450 | if (bat_priv->tt_global_hash) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 451 | return 1; |
| 452 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 453 | bat_priv->tt_global_hash = hash_new(1024); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 454 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 455 | if (!bat_priv->tt_global_hash) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 456 | return 0; |
| 457 | |
| 458 | return 1; |
| 459 | } |
| 460 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 461 | static void tt_changes_list_free(struct bat_priv *bat_priv) |
| 462 | { |
| 463 | struct tt_change_node *entry, *safe; |
| 464 | |
| 465 | spin_lock_bh(&bat_priv->tt_changes_list_lock); |
| 466 | |
| 467 | list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, |
| 468 | list) { |
| 469 | list_del(&entry->list); |
| 470 | kfree(entry); |
| 471 | } |
| 472 | |
| 473 | atomic_set(&bat_priv->tt_local_changes, 0); |
| 474 | spin_unlock_bh(&bat_priv->tt_changes_list_lock); |
| 475 | } |
| 476 | |
| 477 | /* caller must hold orig_node refcount */ |
| 478 | int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, |
| 479 | const unsigned char *tt_addr, uint8_t ttvn) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 480 | { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 481 | struct tt_global_entry *tt_global_entry; |
| 482 | struct tt_local_entry *tt_local_entry; |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 483 | struct orig_node *orig_node_tmp; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 484 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 485 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
| 486 | tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 487 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 488 | if (!tt_global_entry) { |
| 489 | tt_global_entry = |
| 490 | kmalloc(sizeof(*tt_global_entry), |
| 491 | GFP_ATOMIC); |
| 492 | if (!tt_global_entry) |
| 493 | goto unlock; |
| 494 | memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN); |
| 495 | /* Assign the new orig_node */ |
| 496 | atomic_inc(&orig_node->refcount); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 497 | tt_global_entry->orig_node = orig_node; |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 498 | tt_global_entry->ttvn = ttvn; |
| 499 | atomic_inc(&orig_node->tt_size); |
| 500 | hash_add(bat_priv->tt_global_hash, compare_gtt, |
| 501 | choose_orig, tt_global_entry, |
| 502 | &tt_global_entry->hash_entry); |
| 503 | } else { |
| 504 | if (tt_global_entry->orig_node != orig_node) { |
| 505 | atomic_dec(&tt_global_entry->orig_node->tt_size); |
| 506 | orig_node_tmp = tt_global_entry->orig_node; |
| 507 | atomic_inc(&orig_node->refcount); |
| 508 | tt_global_entry->orig_node = orig_node; |
| 509 | tt_global_entry->ttvn = ttvn; |
| 510 | orig_node_free_ref(orig_node_tmp); |
| 511 | atomic_inc(&orig_node->tt_size); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 512 | } |
| 513 | } |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 514 | |
| 515 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
| 516 | |
| 517 | bat_dbg(DBG_TT, bat_priv, |
| 518 | "Creating new global tt entry: %pM (via %pM)\n", |
| 519 | tt_global_entry->addr, orig_node->orig); |
| 520 | |
| 521 | /* remove address from local hash if present */ |
| 522 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
| 523 | tt_local_entry = tt_local_hash_find(bat_priv, tt_addr); |
| 524 | |
| 525 | if (tt_local_entry) |
| 526 | tt_local_del(bat_priv, tt_local_entry, |
| 527 | "global tt received"); |
| 528 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
| 529 | return 1; |
| 530 | unlock: |
| 531 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
| 532 | return 0; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 533 | } |
| 534 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 535 | int tt_global_seq_print_text(struct seq_file *seq, void *offset) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 536 | { |
| 537 | struct net_device *net_dev = (struct net_device *)seq->private; |
| 538 | struct bat_priv *bat_priv = netdev_priv(net_dev); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 539 | struct hashtable_t *hash = bat_priv->tt_global_hash; |
| 540 | struct tt_global_entry *tt_global_entry; |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 541 | struct hard_iface *primary_if; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 542 | struct hlist_node *node; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 543 | struct hlist_head *head; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 544 | size_t buf_size, pos; |
| 545 | char *buff; |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 546 | int i, ret = 0; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 547 | |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 548 | primary_if = primary_if_get_selected(bat_priv); |
| 549 | if (!primary_if) { |
| 550 | ret = seq_printf(seq, "BATMAN mesh %s disabled - please " |
| 551 | "specify interfaces to enable it\n", |
| 552 | net_dev->name); |
| 553 | goto out; |
| 554 | } |
| 555 | |
| 556 | if (primary_if->if_status != IF_ACTIVE) { |
| 557 | ret = seq_printf(seq, "BATMAN mesh %s disabled - " |
| 558 | "primary interface not active\n", |
| 559 | net_dev->name); |
| 560 | goto out; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 561 | } |
| 562 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 563 | seq_printf(seq, |
| 564 | "Globally announced TT entries received via the mesh %s\n", |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 565 | net_dev->name); |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 566 | seq_printf(seq, " %-13s %s %-15s %s\n", |
| 567 | "Client", "(TTVN)", "Originator", "(Curr TTVN)"); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 568 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 569 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 570 | |
| 571 | buf_size = 1; |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 572 | /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via |
| 573 | * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/ |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 574 | for (i = 0; i < hash->size; i++) { |
| 575 | head = &hash->table[i]; |
| 576 | |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 577 | rcu_read_lock(); |
| 578 | __hlist_for_each_rcu(node, head) |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 579 | buf_size += 59; |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 580 | rcu_read_unlock(); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 581 | } |
| 582 | |
| 583 | buff = kmalloc(buf_size, GFP_ATOMIC); |
| 584 | if (!buff) { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 585 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 586 | ret = -ENOMEM; |
| 587 | goto out; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 588 | } |
| 589 | buff[0] = '\0'; |
| 590 | pos = 0; |
| 591 | |
| 592 | for (i = 0; i < hash->size; i++) { |
| 593 | head = &hash->table[i]; |
| 594 | |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 595 | rcu_read_lock(); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 596 | hlist_for_each_entry_rcu(tt_global_entry, node, |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 597 | head, hash_entry) { |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 598 | pos += snprintf(buff + pos, 61, |
| 599 | " * %pM (%3u) via %pM (%3u)\n", |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 600 | tt_global_entry->addr, |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 601 | tt_global_entry->ttvn, |
| 602 | tt_global_entry->orig_node->orig, |
| 603 | (uint8_t) atomic_read( |
| 604 | &tt_global_entry->orig_node-> |
| 605 | last_ttvn)); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 606 | } |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 607 | rcu_read_unlock(); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 608 | } |
| 609 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 610 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 611 | |
| 612 | seq_printf(seq, "%s", buff); |
| 613 | kfree(buff); |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 614 | out: |
| 615 | if (primary_if) |
| 616 | hardif_free_ref(primary_if); |
| 617 | return ret; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 618 | } |
| 619 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 620 | static void _tt_global_del(struct bat_priv *bat_priv, |
| 621 | struct tt_global_entry *tt_global_entry, |
| 622 | const char *message) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 623 | { |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 624 | if (!tt_global_entry) |
| 625 | return; |
| 626 | |
| 627 | bat_dbg(DBG_TT, bat_priv, |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 628 | "Deleting global tt entry %pM (via %pM): %s\n", |
| 629 | tt_global_entry->addr, tt_global_entry->orig_node->orig, |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 630 | message); |
| 631 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 632 | atomic_dec(&tt_global_entry->orig_node->tt_size); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 633 | hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig, |
| 634 | tt_global_entry->addr); |
| 635 | kfree(tt_global_entry); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 636 | } |
| 637 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 638 | void tt_global_del(struct bat_priv *bat_priv, |
| 639 | struct orig_node *orig_node, const unsigned char *addr, |
| 640 | const char *message) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 641 | { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 642 | struct tt_global_entry *tt_global_entry; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 643 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 644 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
| 645 | tt_global_entry = tt_global_hash_find(bat_priv, addr); |
| 646 | |
| 647 | if (tt_global_entry && tt_global_entry->orig_node == orig_node) { |
| 648 | atomic_dec(&orig_node->tt_size); |
| 649 | _tt_global_del(bat_priv, tt_global_entry, message); |
| 650 | } |
| 651 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
| 652 | } |
| 653 | |
| 654 | void tt_global_del_orig(struct bat_priv *bat_priv, |
| 655 | struct orig_node *orig_node, const char *message) |
| 656 | { |
| 657 | struct tt_global_entry *tt_global_entry; |
| 658 | int i; |
| 659 | struct hashtable_t *hash = bat_priv->tt_global_hash; |
| 660 | struct hlist_node *node, *safe; |
| 661 | struct hlist_head *head; |
| 662 | |
| 663 | if (!bat_priv->tt_global_hash) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 664 | return; |
| 665 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 666 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 667 | for (i = 0; i < hash->size; i++) { |
| 668 | head = &hash->table[i]; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 669 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 670 | hlist_for_each_entry_safe(tt_global_entry, node, safe, |
| 671 | head, hash_entry) { |
| 672 | if (tt_global_entry->orig_node == orig_node) |
| 673 | _tt_global_del(bat_priv, tt_global_entry, |
| 674 | message); |
| 675 | } |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 676 | } |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 677 | atomic_set(&orig_node->tt_size, 0); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 678 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 679 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 680 | } |
| 681 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 682 | static void tt_global_entry_free(struct hlist_node *node, void *arg) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 683 | { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 684 | void *data = container_of(node, struct tt_global_entry, hash_entry); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 685 | kfree(data); |
| 686 | } |
| 687 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 688 | static void tt_global_table_free(struct bat_priv *bat_priv) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 689 | { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 690 | if (!bat_priv->tt_global_hash) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 691 | return; |
| 692 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 693 | hash_delete(bat_priv->tt_global_hash, tt_global_entry_free, NULL); |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 694 | bat_priv->tt_global_hash = NULL; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 695 | } |
| 696 | |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 697 | struct orig_node *transtable_search(struct bat_priv *bat_priv, |
| 698 | const uint8_t *addr) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 699 | { |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 700 | struct tt_global_entry *tt_global_entry; |
Marek Lindner | 7b36e8e | 2011-02-18 12:28:10 +0000 | [diff] [blame] | 701 | struct orig_node *orig_node = NULL; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 702 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 703 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
| 704 | tt_global_entry = tt_global_hash_find(bat_priv, addr); |
Marek Lindner | 7aadf88 | 2011-02-18 12:28:09 +0000 | [diff] [blame] | 705 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 706 | if (!tt_global_entry) |
Marek Lindner | 7b36e8e | 2011-02-18 12:28:10 +0000 | [diff] [blame] | 707 | goto out; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 708 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 709 | if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) |
Marek Lindner | 7b36e8e | 2011-02-18 12:28:10 +0000 | [diff] [blame] | 710 | goto out; |
| 711 | |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 712 | orig_node = tt_global_entry->orig_node; |
Marek Lindner | 7b36e8e | 2011-02-18 12:28:10 +0000 | [diff] [blame] | 713 | |
| 714 | out: |
Antonio Quartulli | 2dafb49 | 2011-05-05 08:42:45 +0200 | [diff] [blame] | 715 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
Marek Lindner | 7b36e8e | 2011-02-18 12:28:10 +0000 | [diff] [blame] | 716 | return orig_node; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 717 | } |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame^] | 718 | |
| 719 | /* Calculates the checksum of the local table of a given orig_node */ |
| 720 | uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) |
| 721 | { |
| 722 | uint16_t total = 0, total_one; |
| 723 | struct hashtable_t *hash = bat_priv->tt_global_hash; |
| 724 | struct tt_global_entry *tt_global_entry; |
| 725 | struct hlist_node *node; |
| 726 | struct hlist_head *head; |
| 727 | int i, j; |
| 728 | |
| 729 | for (i = 0; i < hash->size; i++) { |
| 730 | head = &hash->table[i]; |
| 731 | |
| 732 | rcu_read_lock(); |
| 733 | hlist_for_each_entry_rcu(tt_global_entry, node, |
| 734 | head, hash_entry) { |
| 735 | if (compare_eth(tt_global_entry->orig_node, |
| 736 | orig_node)) { |
| 737 | total_one = 0; |
| 738 | for (j = 0; j < ETH_ALEN; j++) |
| 739 | total_one = crc16_byte(total_one, |
| 740 | tt_global_entry->addr[j]); |
| 741 | total ^= total_one; |
| 742 | } |
| 743 | } |
| 744 | rcu_read_unlock(); |
| 745 | } |
| 746 | |
| 747 | return total; |
| 748 | } |
| 749 | |
| 750 | /* Calculates the checksum of the local table */ |
| 751 | uint16_t tt_local_crc(struct bat_priv *bat_priv) |
| 752 | { |
| 753 | uint16_t total = 0, total_one; |
| 754 | struct hashtable_t *hash = bat_priv->tt_local_hash; |
| 755 | struct tt_local_entry *tt_local_entry; |
| 756 | struct hlist_node *node; |
| 757 | struct hlist_head *head; |
| 758 | int i, j; |
| 759 | |
| 760 | for (i = 0; i < hash->size; i++) { |
| 761 | head = &hash->table[i]; |
| 762 | |
| 763 | rcu_read_lock(); |
| 764 | hlist_for_each_entry_rcu(tt_local_entry, node, |
| 765 | head, hash_entry) { |
| 766 | total_one = 0; |
| 767 | for (j = 0; j < ETH_ALEN; j++) |
| 768 | total_one = crc16_byte(total_one, |
| 769 | tt_local_entry->addr[j]); |
| 770 | total ^= total_one; |
| 771 | } |
| 772 | |
| 773 | rcu_read_unlock(); |
| 774 | } |
| 775 | |
| 776 | return total; |
| 777 | } |
| 778 | |
| 779 | static void tt_req_list_free(struct bat_priv *bat_priv) |
| 780 | { |
| 781 | struct tt_req_node *node, *safe; |
| 782 | |
| 783 | spin_lock_bh(&bat_priv->tt_req_list_lock); |
| 784 | |
| 785 | list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { |
| 786 | list_del(&node->list); |
| 787 | kfree(node); |
| 788 | } |
| 789 | |
| 790 | spin_unlock_bh(&bat_priv->tt_req_list_lock); |
| 791 | } |
| 792 | |
| 793 | void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, |
| 794 | const unsigned char *tt_buff, uint8_t tt_num_changes) |
| 795 | { |
| 796 | uint16_t tt_buff_len = tt_len(tt_num_changes); |
| 797 | |
| 798 | /* Replace the old buffer only if I received something in the |
| 799 | * last OGM (the OGM could carry no changes) */ |
| 800 | spin_lock_bh(&orig_node->tt_buff_lock); |
| 801 | if (tt_buff_len > 0) { |
| 802 | kfree(orig_node->tt_buff); |
| 803 | orig_node->tt_buff_len = 0; |
| 804 | orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC); |
| 805 | if (orig_node->tt_buff) { |
| 806 | memcpy(orig_node->tt_buff, tt_buff, tt_buff_len); |
| 807 | orig_node->tt_buff_len = tt_buff_len; |
| 808 | } |
| 809 | } |
| 810 | spin_unlock_bh(&orig_node->tt_buff_lock); |
| 811 | } |
| 812 | |
| 813 | static void tt_req_purge(struct bat_priv *bat_priv) |
| 814 | { |
| 815 | struct tt_req_node *node, *safe; |
| 816 | |
| 817 | spin_lock_bh(&bat_priv->tt_req_list_lock); |
| 818 | list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { |
| 819 | if (is_out_of_time(node->issued_at, |
| 820 | TT_REQUEST_TIMEOUT * 1000)) { |
| 821 | list_del(&node->list); |
| 822 | kfree(node); |
| 823 | } |
| 824 | } |
| 825 | spin_unlock_bh(&bat_priv->tt_req_list_lock); |
| 826 | } |
| 827 | |
| 828 | /* returns the pointer to the new tt_req_node struct if no request |
| 829 | * has already been issued for this orig_node, NULL otherwise */ |
| 830 | static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv, |
| 831 | struct orig_node *orig_node) |
| 832 | { |
| 833 | struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; |
| 834 | |
| 835 | spin_lock_bh(&bat_priv->tt_req_list_lock); |
| 836 | list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) { |
| 837 | if (compare_eth(tt_req_node_tmp, orig_node) && |
| 838 | !is_out_of_time(tt_req_node_tmp->issued_at, |
| 839 | TT_REQUEST_TIMEOUT * 1000)) |
| 840 | goto unlock; |
| 841 | } |
| 842 | |
| 843 | tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC); |
| 844 | if (!tt_req_node) |
| 845 | goto unlock; |
| 846 | |
| 847 | memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN); |
| 848 | tt_req_node->issued_at = jiffies; |
| 849 | |
| 850 | list_add(&tt_req_node->list, &bat_priv->tt_req_list); |
| 851 | unlock: |
| 852 | spin_unlock_bh(&bat_priv->tt_req_list_lock); |
| 853 | return tt_req_node; |
| 854 | } |
| 855 | |
| 856 | static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr) |
| 857 | { |
| 858 | const struct tt_global_entry *tt_global_entry = entry_ptr; |
| 859 | const struct orig_node *orig_node = data_ptr; |
| 860 | |
| 861 | return (tt_global_entry->orig_node == orig_node); |
| 862 | } |
| 863 | |
| 864 | static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, |
| 865 | struct hashtable_t *hash, |
| 866 | struct hard_iface *primary_if, |
| 867 | int (*valid_cb)(const void *, |
| 868 | const void *), |
| 869 | void *cb_data) |
| 870 | { |
| 871 | struct tt_local_entry *tt_local_entry; |
| 872 | struct tt_query_packet *tt_response; |
| 873 | struct tt_change *tt_change; |
| 874 | struct hlist_node *node; |
| 875 | struct hlist_head *head; |
| 876 | struct sk_buff *skb = NULL; |
| 877 | uint16_t tt_tot, tt_count; |
| 878 | ssize_t tt_query_size = sizeof(struct tt_query_packet); |
| 879 | int i; |
| 880 | |
| 881 | if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { |
| 882 | tt_len = primary_if->soft_iface->mtu - tt_query_size; |
| 883 | tt_len -= tt_len % sizeof(struct tt_change); |
| 884 | } |
| 885 | tt_tot = tt_len / sizeof(struct tt_change); |
| 886 | |
| 887 | skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN); |
| 888 | if (!skb) |
| 889 | goto out; |
| 890 | |
| 891 | skb_reserve(skb, ETH_HLEN); |
| 892 | tt_response = (struct tt_query_packet *)skb_put(skb, |
| 893 | tt_query_size + tt_len); |
| 894 | tt_response->ttvn = ttvn; |
| 895 | tt_response->tt_data = htons(tt_tot); |
| 896 | |
| 897 | tt_change = (struct tt_change *)(skb->data + tt_query_size); |
| 898 | tt_count = 0; |
| 899 | |
| 900 | rcu_read_lock(); |
| 901 | for (i = 0; i < hash->size; i++) { |
| 902 | head = &hash->table[i]; |
| 903 | |
| 904 | hlist_for_each_entry_rcu(tt_local_entry, node, |
| 905 | head, hash_entry) { |
| 906 | if (tt_count == tt_tot) |
| 907 | break; |
| 908 | |
| 909 | if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data))) |
| 910 | continue; |
| 911 | |
| 912 | memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN); |
| 913 | tt_change->flags = NO_FLAGS; |
| 914 | |
| 915 | tt_count++; |
| 916 | tt_change++; |
| 917 | } |
| 918 | } |
| 919 | rcu_read_unlock(); |
| 920 | |
| 921 | out: |
| 922 | return skb; |
| 923 | } |
| 924 | |
| 925 | int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node, |
| 926 | uint8_t ttvn, uint16_t tt_crc, bool full_table) |
| 927 | { |
| 928 | struct sk_buff *skb = NULL; |
| 929 | struct tt_query_packet *tt_request; |
| 930 | struct neigh_node *neigh_node = NULL; |
| 931 | struct hard_iface *primary_if; |
| 932 | struct tt_req_node *tt_req_node = NULL; |
| 933 | int ret = 1; |
| 934 | |
| 935 | primary_if = primary_if_get_selected(bat_priv); |
| 936 | if (!primary_if) |
| 937 | goto out; |
| 938 | |
| 939 | /* The new tt_req will be issued only if I'm not waiting for a |
| 940 | * reply from the same orig_node yet */ |
| 941 | tt_req_node = new_tt_req_node(bat_priv, dst_orig_node); |
| 942 | if (!tt_req_node) |
| 943 | goto out; |
| 944 | |
| 945 | skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN); |
| 946 | if (!skb) |
| 947 | goto out; |
| 948 | |
| 949 | skb_reserve(skb, ETH_HLEN); |
| 950 | |
| 951 | tt_request = (struct tt_query_packet *)skb_put(skb, |
| 952 | sizeof(struct tt_query_packet)); |
| 953 | |
| 954 | tt_request->packet_type = BAT_TT_QUERY; |
| 955 | tt_request->version = COMPAT_VERSION; |
| 956 | memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN); |
| 957 | memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); |
| 958 | tt_request->ttl = TTL; |
| 959 | tt_request->ttvn = ttvn; |
| 960 | tt_request->tt_data = tt_crc; |
| 961 | tt_request->flags = TT_REQUEST; |
| 962 | |
| 963 | if (full_table) |
| 964 | tt_request->flags |= TT_FULL_TABLE; |
| 965 | |
| 966 | neigh_node = orig_node_get_router(dst_orig_node); |
| 967 | if (!neigh_node) |
| 968 | goto out; |
| 969 | |
| 970 | bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM " |
| 971 | "[%c]\n", dst_orig_node->orig, neigh_node->addr, |
| 972 | (full_table ? 'F' : '.')); |
| 973 | |
| 974 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); |
| 975 | ret = 0; |
| 976 | |
| 977 | out: |
| 978 | if (neigh_node) |
| 979 | neigh_node_free_ref(neigh_node); |
| 980 | if (primary_if) |
| 981 | hardif_free_ref(primary_if); |
| 982 | if (ret) |
| 983 | kfree_skb(skb); |
| 984 | if (ret && tt_req_node) { |
| 985 | spin_lock_bh(&bat_priv->tt_req_list_lock); |
| 986 | list_del(&tt_req_node->list); |
| 987 | spin_unlock_bh(&bat_priv->tt_req_list_lock); |
| 988 | kfree(tt_req_node); |
| 989 | } |
| 990 | return ret; |
| 991 | } |
| 992 | |
| 993 | static bool send_other_tt_response(struct bat_priv *bat_priv, |
| 994 | struct tt_query_packet *tt_request) |
| 995 | { |
| 996 | struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL; |
| 997 | struct neigh_node *neigh_node = NULL; |
| 998 | struct hard_iface *primary_if = NULL; |
| 999 | uint8_t orig_ttvn, req_ttvn, ttvn; |
| 1000 | int ret = false; |
| 1001 | unsigned char *tt_buff; |
| 1002 | bool full_table; |
| 1003 | uint16_t tt_len, tt_tot; |
| 1004 | struct sk_buff *skb = NULL; |
| 1005 | struct tt_query_packet *tt_response; |
| 1006 | |
| 1007 | bat_dbg(DBG_TT, bat_priv, |
| 1008 | "Received TT_REQUEST from %pM for " |
| 1009 | "ttvn: %u (%pM) [%c]\n", tt_request->src, |
| 1010 | tt_request->ttvn, tt_request->dst, |
| 1011 | (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); |
| 1012 | |
| 1013 | /* Let's get the orig node of the REAL destination */ |
| 1014 | req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst); |
| 1015 | if (!req_dst_orig_node) |
| 1016 | goto out; |
| 1017 | |
| 1018 | res_dst_orig_node = get_orig_node(bat_priv, tt_request->src); |
| 1019 | if (!res_dst_orig_node) |
| 1020 | goto out; |
| 1021 | |
| 1022 | neigh_node = orig_node_get_router(res_dst_orig_node); |
| 1023 | if (!neigh_node) |
| 1024 | goto out; |
| 1025 | |
| 1026 | primary_if = primary_if_get_selected(bat_priv); |
| 1027 | if (!primary_if) |
| 1028 | goto out; |
| 1029 | |
| 1030 | orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); |
| 1031 | req_ttvn = tt_request->ttvn; |
| 1032 | |
| 1033 | /* I have not the requested data */ |
| 1034 | if (orig_ttvn != req_ttvn || |
| 1035 | tt_request->tt_data != req_dst_orig_node->tt_crc) |
| 1036 | goto out; |
| 1037 | |
| 1038 | /* If it has explicitly been requested the full table */ |
| 1039 | if (tt_request->flags & TT_FULL_TABLE || |
| 1040 | !req_dst_orig_node->tt_buff) |
| 1041 | full_table = true; |
| 1042 | else |
| 1043 | full_table = false; |
| 1044 | |
| 1045 | /* In this version, fragmentation is not implemented, then |
| 1046 | * I'll send only one packet with as much TT entries as I can */ |
| 1047 | if (!full_table) { |
| 1048 | spin_lock_bh(&req_dst_orig_node->tt_buff_lock); |
| 1049 | tt_len = req_dst_orig_node->tt_buff_len; |
| 1050 | tt_tot = tt_len / sizeof(struct tt_change); |
| 1051 | |
| 1052 | skb = dev_alloc_skb(sizeof(struct tt_query_packet) + |
| 1053 | tt_len + ETH_HLEN); |
| 1054 | if (!skb) |
| 1055 | goto unlock; |
| 1056 | |
| 1057 | skb_reserve(skb, ETH_HLEN); |
| 1058 | tt_response = (struct tt_query_packet *)skb_put(skb, |
| 1059 | sizeof(struct tt_query_packet) + tt_len); |
| 1060 | tt_response->ttvn = req_ttvn; |
| 1061 | tt_response->tt_data = htons(tt_tot); |
| 1062 | |
| 1063 | tt_buff = skb->data + sizeof(struct tt_query_packet); |
| 1064 | /* Copy the last orig_node's OGM buffer */ |
| 1065 | memcpy(tt_buff, req_dst_orig_node->tt_buff, |
| 1066 | req_dst_orig_node->tt_buff_len); |
| 1067 | |
| 1068 | spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); |
| 1069 | } else { |
| 1070 | tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) * |
| 1071 | sizeof(struct tt_change); |
| 1072 | ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); |
| 1073 | |
| 1074 | skb = tt_response_fill_table(tt_len, ttvn, |
| 1075 | bat_priv->tt_global_hash, |
| 1076 | primary_if, tt_global_valid_entry, |
| 1077 | req_dst_orig_node); |
| 1078 | if (!skb) |
| 1079 | goto out; |
| 1080 | |
| 1081 | tt_response = (struct tt_query_packet *)skb->data; |
| 1082 | } |
| 1083 | |
| 1084 | tt_response->packet_type = BAT_TT_QUERY; |
| 1085 | tt_response->version = COMPAT_VERSION; |
| 1086 | tt_response->ttl = TTL; |
| 1087 | memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN); |
| 1088 | memcpy(tt_response->dst, tt_request->src, ETH_ALEN); |
| 1089 | tt_response->flags = TT_RESPONSE; |
| 1090 | |
| 1091 | if (full_table) |
| 1092 | tt_response->flags |= TT_FULL_TABLE; |
| 1093 | |
| 1094 | bat_dbg(DBG_TT, bat_priv, |
| 1095 | "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n", |
| 1096 | res_dst_orig_node->orig, neigh_node->addr, |
| 1097 | req_dst_orig_node->orig, req_ttvn); |
| 1098 | |
| 1099 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); |
| 1100 | ret = true; |
| 1101 | goto out; |
| 1102 | |
| 1103 | unlock: |
| 1104 | spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); |
| 1105 | |
| 1106 | out: |
| 1107 | if (res_dst_orig_node) |
| 1108 | orig_node_free_ref(res_dst_orig_node); |
| 1109 | if (req_dst_orig_node) |
| 1110 | orig_node_free_ref(req_dst_orig_node); |
| 1111 | if (neigh_node) |
| 1112 | neigh_node_free_ref(neigh_node); |
| 1113 | if (primary_if) |
| 1114 | hardif_free_ref(primary_if); |
| 1115 | if (!ret) |
| 1116 | kfree_skb(skb); |
| 1117 | return ret; |
| 1118 | |
| 1119 | } |
| 1120 | static bool send_my_tt_response(struct bat_priv *bat_priv, |
| 1121 | struct tt_query_packet *tt_request) |
| 1122 | { |
| 1123 | struct orig_node *orig_node = NULL; |
| 1124 | struct neigh_node *neigh_node = NULL; |
| 1125 | struct hard_iface *primary_if = NULL; |
| 1126 | uint8_t my_ttvn, req_ttvn, ttvn; |
| 1127 | int ret = false; |
| 1128 | unsigned char *tt_buff; |
| 1129 | bool full_table; |
| 1130 | uint16_t tt_len, tt_tot; |
| 1131 | struct sk_buff *skb = NULL; |
| 1132 | struct tt_query_packet *tt_response; |
| 1133 | |
| 1134 | bat_dbg(DBG_TT, bat_priv, |
| 1135 | "Received TT_REQUEST from %pM for " |
| 1136 | "ttvn: %u (me) [%c]\n", tt_request->src, |
| 1137 | tt_request->ttvn, |
| 1138 | (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); |
| 1139 | |
| 1140 | |
| 1141 | my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); |
| 1142 | req_ttvn = tt_request->ttvn; |
| 1143 | |
| 1144 | orig_node = get_orig_node(bat_priv, tt_request->src); |
| 1145 | if (!orig_node) |
| 1146 | goto out; |
| 1147 | |
| 1148 | neigh_node = orig_node_get_router(orig_node); |
| 1149 | if (!neigh_node) |
| 1150 | goto out; |
| 1151 | |
| 1152 | primary_if = primary_if_get_selected(bat_priv); |
| 1153 | if (!primary_if) |
| 1154 | goto out; |
| 1155 | |
| 1156 | /* If the full table has been explicitly requested or the gap |
| 1157 | * is too big send the whole local translation table */ |
| 1158 | if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn || |
| 1159 | !bat_priv->tt_buff) |
| 1160 | full_table = true; |
| 1161 | else |
| 1162 | full_table = false; |
| 1163 | |
| 1164 | /* In this version, fragmentation is not implemented, then |
| 1165 | * I'll send only one packet with as much TT entries as I can */ |
| 1166 | if (!full_table) { |
| 1167 | spin_lock_bh(&bat_priv->tt_buff_lock); |
| 1168 | tt_len = bat_priv->tt_buff_len; |
| 1169 | tt_tot = tt_len / sizeof(struct tt_change); |
| 1170 | |
| 1171 | skb = dev_alloc_skb(sizeof(struct tt_query_packet) + |
| 1172 | tt_len + ETH_HLEN); |
| 1173 | if (!skb) |
| 1174 | goto unlock; |
| 1175 | |
| 1176 | skb_reserve(skb, ETH_HLEN); |
| 1177 | tt_response = (struct tt_query_packet *)skb_put(skb, |
| 1178 | sizeof(struct tt_query_packet) + tt_len); |
| 1179 | tt_response->ttvn = req_ttvn; |
| 1180 | tt_response->tt_data = htons(tt_tot); |
| 1181 | |
| 1182 | tt_buff = skb->data + sizeof(struct tt_query_packet); |
| 1183 | memcpy(tt_buff, bat_priv->tt_buff, |
| 1184 | bat_priv->tt_buff_len); |
| 1185 | spin_unlock_bh(&bat_priv->tt_buff_lock); |
| 1186 | } else { |
| 1187 | tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) * |
| 1188 | sizeof(struct tt_change); |
| 1189 | ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); |
| 1190 | |
| 1191 | skb = tt_response_fill_table(tt_len, ttvn, |
| 1192 | bat_priv->tt_local_hash, |
| 1193 | primary_if, NULL, NULL); |
| 1194 | if (!skb) |
| 1195 | goto out; |
| 1196 | |
| 1197 | tt_response = (struct tt_query_packet *)skb->data; |
| 1198 | } |
| 1199 | |
| 1200 | tt_response->packet_type = BAT_TT_QUERY; |
| 1201 | tt_response->version = COMPAT_VERSION; |
| 1202 | tt_response->ttl = TTL; |
| 1203 | memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN); |
| 1204 | memcpy(tt_response->dst, tt_request->src, ETH_ALEN); |
| 1205 | tt_response->flags = TT_RESPONSE; |
| 1206 | |
| 1207 | if (full_table) |
| 1208 | tt_response->flags |= TT_FULL_TABLE; |
| 1209 | |
| 1210 | bat_dbg(DBG_TT, bat_priv, |
| 1211 | "Sending TT_RESPONSE to %pM via %pM [%c]\n", |
| 1212 | orig_node->orig, neigh_node->addr, |
| 1213 | (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); |
| 1214 | |
| 1215 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); |
| 1216 | ret = true; |
| 1217 | goto out; |
| 1218 | |
| 1219 | unlock: |
| 1220 | spin_unlock_bh(&bat_priv->tt_buff_lock); |
| 1221 | out: |
| 1222 | if (orig_node) |
| 1223 | orig_node_free_ref(orig_node); |
| 1224 | if (neigh_node) |
| 1225 | neigh_node_free_ref(neigh_node); |
| 1226 | if (primary_if) |
| 1227 | hardif_free_ref(primary_if); |
| 1228 | if (!ret) |
| 1229 | kfree_skb(skb); |
| 1230 | /* This packet was for me, so it doesn't need to be re-routed */ |
| 1231 | return true; |
| 1232 | } |
| 1233 | |
| 1234 | bool send_tt_response(struct bat_priv *bat_priv, |
| 1235 | struct tt_query_packet *tt_request) |
| 1236 | { |
| 1237 | if (is_my_mac(tt_request->dst)) |
| 1238 | return send_my_tt_response(bat_priv, tt_request); |
| 1239 | else |
| 1240 | return send_other_tt_response(bat_priv, tt_request); |
| 1241 | } |
| 1242 | |
| 1243 | static void _tt_update_changes(struct bat_priv *bat_priv, |
| 1244 | struct orig_node *orig_node, |
| 1245 | struct tt_change *tt_change, |
| 1246 | uint16_t tt_num_changes, uint8_t ttvn) |
| 1247 | { |
| 1248 | int i; |
| 1249 | |
| 1250 | for (i = 0; i < tt_num_changes; i++) { |
| 1251 | if ((tt_change + i)->flags & TT_CHANGE_DEL) |
| 1252 | tt_global_del(bat_priv, orig_node, |
| 1253 | (tt_change + i)->addr, |
| 1254 | "tt removed by changes"); |
| 1255 | else |
| 1256 | if (!tt_global_add(bat_priv, orig_node, |
| 1257 | (tt_change + i)->addr, ttvn)) |
| 1258 | /* In case of problem while storing a |
| 1259 | * global_entry, we stop the updating |
| 1260 | * procedure without committing the |
| 1261 | * ttvn change. This will avoid to send |
| 1262 | * corrupted data on tt_request |
| 1263 | */ |
| 1264 | return; |
| 1265 | } |
| 1266 | } |
| 1267 | |
| 1268 | static void tt_fill_gtable(struct bat_priv *bat_priv, |
| 1269 | struct tt_query_packet *tt_response) |
| 1270 | { |
| 1271 | struct orig_node *orig_node = NULL; |
| 1272 | |
| 1273 | orig_node = orig_hash_find(bat_priv, tt_response->src); |
| 1274 | if (!orig_node) |
| 1275 | goto out; |
| 1276 | |
| 1277 | /* Purge the old table first.. */ |
| 1278 | tt_global_del_orig(bat_priv, orig_node, "Received full table"); |
| 1279 | |
| 1280 | _tt_update_changes(bat_priv, orig_node, |
| 1281 | (struct tt_change *)(tt_response + 1), |
| 1282 | tt_response->tt_data, tt_response->ttvn); |
| 1283 | |
| 1284 | spin_lock_bh(&orig_node->tt_buff_lock); |
| 1285 | kfree(orig_node->tt_buff); |
| 1286 | orig_node->tt_buff_len = 0; |
| 1287 | orig_node->tt_buff = NULL; |
| 1288 | spin_unlock_bh(&orig_node->tt_buff_lock); |
| 1289 | |
| 1290 | atomic_set(&orig_node->last_ttvn, tt_response->ttvn); |
| 1291 | |
| 1292 | out: |
| 1293 | if (orig_node) |
| 1294 | orig_node_free_ref(orig_node); |
| 1295 | } |
| 1296 | |
| 1297 | void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node, |
| 1298 | uint16_t tt_num_changes, uint8_t ttvn, |
| 1299 | struct tt_change *tt_change) |
| 1300 | { |
| 1301 | _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes, |
| 1302 | ttvn); |
| 1303 | |
| 1304 | tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change, |
| 1305 | tt_num_changes); |
| 1306 | atomic_set(&orig_node->last_ttvn, ttvn); |
| 1307 | } |
| 1308 | |
| 1309 | bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) |
| 1310 | { |
| 1311 | struct tt_local_entry *tt_local_entry; |
| 1312 | |
| 1313 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
| 1314 | tt_local_entry = tt_local_hash_find(bat_priv, addr); |
| 1315 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
| 1316 | |
| 1317 | if (tt_local_entry) |
| 1318 | return true; |
| 1319 | return false; |
| 1320 | } |
| 1321 | |
| 1322 | void handle_tt_response(struct bat_priv *bat_priv, |
| 1323 | struct tt_query_packet *tt_response) |
| 1324 | { |
| 1325 | struct tt_req_node *node, *safe; |
| 1326 | struct orig_node *orig_node = NULL; |
| 1327 | |
| 1328 | bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for " |
| 1329 | "ttvn %d t_size: %d [%c]\n", |
| 1330 | tt_response->src, tt_response->ttvn, |
| 1331 | tt_response->tt_data, |
| 1332 | (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); |
| 1333 | |
| 1334 | orig_node = orig_hash_find(bat_priv, tt_response->src); |
| 1335 | if (!orig_node) |
| 1336 | goto out; |
| 1337 | |
| 1338 | if (tt_response->flags & TT_FULL_TABLE) |
| 1339 | tt_fill_gtable(bat_priv, tt_response); |
| 1340 | else |
| 1341 | tt_update_changes(bat_priv, orig_node, tt_response->tt_data, |
| 1342 | tt_response->ttvn, |
| 1343 | (struct tt_change *)(tt_response + 1)); |
| 1344 | |
| 1345 | /* Delete the tt_req_node from pending tt_requests list */ |
| 1346 | spin_lock_bh(&bat_priv->tt_req_list_lock); |
| 1347 | list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { |
| 1348 | if (!compare_eth(node->addr, tt_response->src)) |
| 1349 | continue; |
| 1350 | list_del(&node->list); |
| 1351 | kfree(node); |
| 1352 | } |
| 1353 | spin_unlock_bh(&bat_priv->tt_req_list_lock); |
| 1354 | |
| 1355 | /* Recalculate the CRC for this orig_node and store it */ |
| 1356 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
| 1357 | orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); |
| 1358 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
| 1359 | out: |
| 1360 | if (orig_node) |
| 1361 | orig_node_free_ref(orig_node); |
| 1362 | } |
| 1363 | |
| 1364 | int tt_init(struct bat_priv *bat_priv) |
| 1365 | { |
| 1366 | if (!tt_local_init(bat_priv)) |
| 1367 | return 0; |
| 1368 | |
| 1369 | if (!tt_global_init(bat_priv)) |
| 1370 | return 0; |
| 1371 | |
| 1372 | tt_start_timer(bat_priv); |
| 1373 | |
| 1374 | return 1; |
| 1375 | } |
| 1376 | |
| 1377 | void tt_free(struct bat_priv *bat_priv) |
| 1378 | { |
| 1379 | cancel_delayed_work_sync(&bat_priv->tt_work); |
| 1380 | |
| 1381 | tt_local_table_free(bat_priv); |
| 1382 | tt_global_table_free(bat_priv); |
| 1383 | tt_req_list_free(bat_priv); |
| 1384 | tt_changes_list_free(bat_priv); |
| 1385 | |
| 1386 | kfree(bat_priv->tt_buff); |
| 1387 | } |
| 1388 | |
| 1389 | static void tt_purge(struct work_struct *work) |
| 1390 | { |
| 1391 | struct delayed_work *delayed_work = |
| 1392 | container_of(work, struct delayed_work, work); |
| 1393 | struct bat_priv *bat_priv = |
| 1394 | container_of(delayed_work, struct bat_priv, tt_work); |
| 1395 | |
| 1396 | tt_local_purge(bat_priv); |
| 1397 | tt_req_purge(bat_priv); |
| 1398 | |
| 1399 | tt_start_timer(bat_priv); |
| 1400 | } |