blob: 3e1bb7a1f8b469d554d7492404cd86e9320b4602 [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann567db7b2012-01-01 00:41:38 +01002 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "bat_sysfs.h"
24#include "bat_debugfs.h"
25#include "routing.h"
26#include "send.h"
27#include "originator.h"
28#include "soft-interface.h"
29#include "icmp_socket.h"
30#include "translation-table.h"
31#include "hard-interface.h"
32#include "gateway_client.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010033#include "bridge_loop_avoidance.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000034#include "vis.h"
35#include "hash.h"
Marek Lindner1c280472011-11-28 17:40:17 +080036#include "bat_algo.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000037
Sven Eckelmannc3caf512011-05-03 11:51:38 +020038
39/* List manipulations on hardif_list have to be rtnl_lock()'ed,
40 * list traversals just rcu-locked */
Marek Lindner4389e472011-02-18 12:33:19 +000041struct list_head hardif_list;
Marek Lindnerffa995e2012-03-01 15:35:17 +080042static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *);
Marek Lindner519d3492012-04-18 17:15:57 +080043char bat_routing_algo[20] = "BATMAN_IV";
Marek Lindner1c280472011-11-28 17:40:17 +080044static struct hlist_head bat_algo_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000045
46unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
47
48struct workqueue_struct *bat_event_workqueue;
49
Marek Lindnerffa995e2012-03-01 15:35:17 +080050static void recv_handler_init(void);
51
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000052static int __init batman_init(void)
53{
Marek Lindner4389e472011-02-18 12:33:19 +000054 INIT_LIST_HEAD(&hardif_list);
Marek Lindner1c280472011-11-28 17:40:17 +080055 INIT_HLIST_HEAD(&bat_algo_list);
56
Marek Lindnerffa995e2012-03-01 15:35:17 +080057 recv_handler_init();
58
Sven Eckelmann81c524f2012-05-12 02:09:22 +020059 batadv_iv_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000060
61 /* the name should not be longer than 10 chars - see
62 * http://lwn.net/Articles/23634/ */
63 bat_event_workqueue = create_singlethread_workqueue("bat_events");
64
65 if (!bat_event_workqueue)
66 return -ENOMEM;
67
68 bat_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +020069 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000070
71 register_netdevice_notifier(&hard_if_notifier);
72
Sven Eckelmann86ceb362012-03-07 09:07:45 +010073 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
74 SOURCE_VERSION, COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000075
76 return 0;
77}
78
79static void __exit batman_exit(void)
80{
Sven Eckelmann40a072d2012-05-12 02:09:23 +020081 batadv_debugfs_destroy();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000082 unregister_netdevice_notifier(&hard_if_notifier);
83 hardif_remove_interfaces();
84
85 flush_workqueue(bat_event_workqueue);
86 destroy_workqueue(bat_event_workqueue);
87 bat_event_workqueue = NULL;
88
89 rcu_barrier();
90}
91
92int mesh_init(struct net_device *soft_iface)
93{
94 struct bat_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +020095 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000096
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000097 spin_lock_init(&bat_priv->forw_bat_list_lock);
98 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +020099 spin_lock_init(&bat_priv->tt_changes_list_lock);
100 spin_lock_init(&bat_priv->tt_req_list_lock);
Antonio Quartullicc47f662011-04-27 14:27:57 +0200101 spin_lock_init(&bat_priv->tt_roam_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200102 spin_lock_init(&bat_priv->tt_buff_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000103 spin_lock_init(&bat_priv->gw_list_lock);
104 spin_lock_init(&bat_priv->vis_hash_lock);
105 spin_lock_init(&bat_priv->vis_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000106
107 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
108 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
109 INIT_HLIST_HEAD(&bat_priv->gw_list);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200110 INIT_LIST_HEAD(&bat_priv->tt_changes_list);
111 INIT_LIST_HEAD(&bat_priv->tt_req_list);
Antonio Quartullicc47f662011-04-27 14:27:57 +0200112 INIT_LIST_HEAD(&bat_priv->tt_roam_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000113
Sven Eckelmann5346c352012-05-05 13:27:28 +0200114 ret = originator_init(bat_priv);
115 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000116 goto err;
117
Sven Eckelmann5346c352012-05-05 13:27:28 +0200118 ret = tt_init(bat_priv);
119 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000120 goto err;
121
Antonio Quartullibc279082011-07-07 15:35:35 +0200122 tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000123
Sven Eckelmann5346c352012-05-05 13:27:28 +0200124 ret = vis_init(bat_priv);
125 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000126 goto err;
127
Sven Eckelmann08adf152012-05-12 13:38:47 +0200128 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200129 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100130 goto err;
131
Antonio Quartulli2265c142011-04-27 00:22:00 +0200132 atomic_set(&bat_priv->gw_reselect, 0);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000133 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200134
135 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000136
137err:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000138 mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200139 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000140}
141
142void mesh_free(struct net_device *soft_iface)
143{
144 struct bat_priv *bat_priv = netdev_priv(soft_iface);
145
146 atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
147
148 purge_outstanding_packets(bat_priv, NULL);
149
150 vis_quit(bat_priv);
151
152 gw_node_purge(bat_priv);
153 originator_free(bat_priv);
154
Antonio Quartullia73105b2011-04-27 14:27:44 +0200155 tt_free(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000156
Sven Eckelmann08adf152012-05-12 13:38:47 +0200157 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100158
Martin Hundebøllf8214862012-04-20 17:02:45 +0200159 free_percpu(bat_priv->bat_counters);
160
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000161 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
162}
163
164void inc_module_count(void)
165{
166 try_module_get(THIS_MODULE);
167}
168
169void dec_module_count(void)
170{
171 module_put(THIS_MODULE);
172}
173
Sven Eckelmann747e4222011-05-14 23:14:50 +0200174int is_my_mac(const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000175{
Sven Eckelmann747e4222011-05-14 23:14:50 +0200176 const struct hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000177
178 rcu_read_lock();
Marek Lindnere6c10f42011-02-18 12:33:20 +0000179 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
180 if (hard_iface->if_status != IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000181 continue;
182
Marek Lindnere6c10f42011-02-18 12:33:20 +0000183 if (compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000184 rcu_read_unlock();
185 return 1;
186 }
187 }
188 rcu_read_unlock();
189 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190}
191
Marek Lindnerffa995e2012-03-01 15:35:17 +0800192static int recv_unhandled_packet(struct sk_buff *skb,
193 struct hard_iface *recv_if)
194{
195 return NET_RX_DROP;
196}
197
198/* incoming packets with the batman ethertype received on any active hard
199 * interface
200 */
201int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
202 struct packet_type *ptype, struct net_device *orig_dev)
203{
204 struct bat_priv *bat_priv;
205 struct batman_ogm_packet *batman_ogm_packet;
206 struct hard_iface *hard_iface;
207 uint8_t idx;
208 int ret;
209
210 hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
211 skb = skb_share_check(skb, GFP_ATOMIC);
212
213 /* skb was released by skb_share_check() */
214 if (!skb)
215 goto err_out;
216
217 /* packet should hold at least type and version */
218 if (unlikely(!pskb_may_pull(skb, 2)))
219 goto err_free;
220
221 /* expect a valid ethernet header here. */
222 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
223 goto err_free;
224
225 if (!hard_iface->soft_iface)
226 goto err_free;
227
228 bat_priv = netdev_priv(hard_iface->soft_iface);
229
230 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
231 goto err_free;
232
233 /* discard frames on not active interfaces */
234 if (hard_iface->if_status != IF_ACTIVE)
235 goto err_free;
236
237 batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
238
239 if (batman_ogm_packet->header.version != COMPAT_VERSION) {
240 bat_dbg(DBG_BATMAN, bat_priv,
241 "Drop packet: incompatible batman version (%i)\n",
242 batman_ogm_packet->header.version);
243 goto err_free;
244 }
245
246 /* all receive handlers return whether they received or reused
247 * the supplied skb. if not, we have to free the skb.
248 */
249 idx = batman_ogm_packet->header.packet_type;
250 ret = (*recv_packet_handler[idx])(skb, hard_iface);
251
252 if (ret == NET_RX_DROP)
253 kfree_skb(skb);
254
255 /* return NET_RX_SUCCESS in any case as we
256 * most probably dropped the packet for
257 * routing-logical reasons.
258 */
259 return NET_RX_SUCCESS;
260
261err_free:
262 kfree_skb(skb);
263err_out:
264 return NET_RX_DROP;
265}
266
267static void recv_handler_init(void)
268{
269 int i;
270
271 for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++)
272 recv_packet_handler[i] = recv_unhandled_packet;
273
Marek Lindnerffa995e2012-03-01 15:35:17 +0800274 /* batman icmp packet */
275 recv_packet_handler[BAT_ICMP] = recv_icmp_packet;
276 /* unicast packet */
277 recv_packet_handler[BAT_UNICAST] = recv_unicast_packet;
278 /* fragmented unicast packet */
279 recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet;
280 /* broadcast packet */
281 recv_packet_handler[BAT_BCAST] = recv_bcast_packet;
282 /* vis packet */
283 recv_packet_handler[BAT_VIS] = recv_vis_packet;
284 /* Translation table query (request or response) */
285 recv_packet_handler[BAT_TT_QUERY] = recv_tt_query;
286 /* Roaming advertisement */
287 recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv;
288}
289
290int recv_handler_register(uint8_t packet_type,
291 int (*recv_handler)(struct sk_buff *,
292 struct hard_iface *))
293{
294 if (recv_packet_handler[packet_type] != &recv_unhandled_packet)
295 return -EBUSY;
296
297 recv_packet_handler[packet_type] = recv_handler;
298 return 0;
299}
300
301void recv_handler_unregister(uint8_t packet_type)
302{
303 recv_packet_handler[packet_type] = recv_unhandled_packet;
304}
305
Marek Lindner1c280472011-11-28 17:40:17 +0800306static struct bat_algo_ops *bat_algo_get(char *name)
307{
308 struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
309 struct hlist_node *node;
310
311 hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) {
312 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
313 continue;
314
315 bat_algo_ops = bat_algo_ops_tmp;
316 break;
317 }
318
319 return bat_algo_ops;
320}
321
322int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
323{
324 struct bat_algo_ops *bat_algo_ops_tmp;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200325 int ret;
Marek Lindner1c280472011-11-28 17:40:17 +0800326
327 bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name);
328 if (bat_algo_ops_tmp) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100329 pr_info("Trying to register already registered routing algorithm: %s\n",
330 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200331 ret = -EEXIST;
Marek Lindner1c280472011-11-28 17:40:17 +0800332 goto out;
333 }
334
Marek Lindner01c42242011-11-28 21:31:55 +0800335 /* all algorithms must implement all ops (for now) */
Marek Lindnerc2aca022012-02-07 17:20:45 +0800336 if (!bat_algo_ops->bat_iface_enable ||
Marek Lindner00a50072012-02-07 17:20:47 +0800337 !bat_algo_ops->bat_iface_disable ||
Marek Lindnerc3229392012-03-11 06:17:50 +0800338 !bat_algo_ops->bat_iface_update_mac ||
Marek Lindnercd8b78e2012-02-07 17:20:49 +0800339 !bat_algo_ops->bat_primary_iface_set ||
Marek Lindner01c42242011-11-28 21:31:55 +0800340 !bat_algo_ops->bat_ogm_schedule ||
Marek Lindnerc3e29312012-03-04 16:56:25 +0800341 !bat_algo_ops->bat_ogm_emit) {
Marek Lindner01c42242011-11-28 21:31:55 +0800342 pr_info("Routing algo '%s' does not implement required ops\n",
343 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200344 ret = -EINVAL;
Marek Lindner01c42242011-11-28 21:31:55 +0800345 goto out;
346 }
347
Marek Lindner1c280472011-11-28 17:40:17 +0800348 INIT_HLIST_NODE(&bat_algo_ops->list);
349 hlist_add_head(&bat_algo_ops->list, &bat_algo_list);
350 ret = 0;
351
352out:
353 return ret;
354}
355
356int bat_algo_select(struct bat_priv *bat_priv, char *name)
357{
358 struct bat_algo_ops *bat_algo_ops;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200359 int ret = -EINVAL;
Marek Lindner1c280472011-11-28 17:40:17 +0800360
361 bat_algo_ops = bat_algo_get(name);
362 if (!bat_algo_ops)
363 goto out;
364
365 bat_priv->bat_algo_ops = bat_algo_ops;
366 ret = 0;
367
368out:
369 return ret;
370}
371
372int bat_algo_seq_print_text(struct seq_file *seq, void *offset)
373{
374 struct bat_algo_ops *bat_algo_ops;
375 struct hlist_node *node;
376
377 seq_printf(seq, "Available routing algorithms:\n");
378
379 hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) {
380 seq_printf(seq, "%s\n", bat_algo_ops->name);
381 }
382
383 return 0;
384}
385
Marek Lindnerd419be12011-12-10 19:45:53 +0800386static int param_set_ra(const char *val, const struct kernel_param *kp)
387{
388 struct bat_algo_ops *bat_algo_ops;
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800389 char *algo_name = (char *)val;
390 size_t name_len = strlen(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +0800391
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800392 if (algo_name[name_len - 1] == '\n')
393 algo_name[name_len - 1] = '\0';
394
395 bat_algo_ops = bat_algo_get(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +0800396 if (!bat_algo_ops) {
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800397 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +0800398 return -EINVAL;
399 }
400
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800401 return param_set_copystring(algo_name, kp);
Marek Lindnerd419be12011-12-10 19:45:53 +0800402}
403
404static const struct kernel_param_ops param_ops_ra = {
405 .set = param_set_ra,
406 .get = param_get_string,
407};
408
409static struct kparam_string __param_string_ra = {
410 .maxlen = sizeof(bat_routing_algo),
411 .string = bat_routing_algo,
412};
413
414module_param_cb(routing_algo, &param_ops_ra, &__param_string_ra, 0644);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000415module_init(batman_init);
416module_exit(batman_exit);
417
418MODULE_LICENSE("GPL");
419
420MODULE_AUTHOR(DRIVER_AUTHOR);
421MODULE_DESCRIPTION(DRIVER_DESC);
422MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000423MODULE_VERSION(SOURCE_VERSION);