blob: 1356af660b5bf05645394a28c91cdbc54e5ad4c2 [file] [log] [blame]
Antonio Quartulli0b873932013-01-04 03:05:31 +01001/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018 */
19
Sven Eckelmann95a066d2012-10-17 21:10:39 +020020#include <linux/crc32c.h>
21#include <linux/highmem.h>
Simon Wunderlichc54f38c92013-07-29 17:56:44 +020022#include <linux/if_vlan.h>
23#include <net/ip.h>
24#include <net/ipv6.h>
25#include <net/dsfield.h>
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000026#include "main.h"
Sven Eckelmannb706b132012-06-10 23:58:51 +020027#include "sysfs.h"
28#include "debugfs.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000029#include "routing.h"
30#include "send.h"
31#include "originator.h"
32#include "soft-interface.h"
33#include "icmp_socket.h"
34#include "translation-table.h"
35#include "hard-interface.h"
36#include "gateway_client.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010037#include "bridge_loop_avoidance.h"
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +020038#include "distributed-arp-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000039#include "vis.h"
40#include "hash.h"
Marek Lindner1c280472011-11-28 17:40:17 +080041#include "bat_algo.h"
Martin Hundebølld353d8d2013-01-25 11:12:38 +010042#include "network-coding.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000043
Sven Eckelmannc3caf512011-05-03 11:51:38 +020044
45/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020046 * list traversals just rcu-locked
47 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020048struct list_head batadv_hardif_list;
Sven Eckelmannee11ad62012-05-16 20:23:19 +020049static int (*batadv_rx_handler[256])(struct sk_buff *,
Sven Eckelmann56303d32012-06-05 22:31:31 +020050 struct batadv_hard_iface *);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020051char batadv_routing_algo[20] = "BATMAN_IV";
Sven Eckelmannee11ad62012-05-16 20:23:19 +020052static struct hlist_head batadv_algo_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000053
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020054unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000055
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020056struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000057
Sven Eckelmannee11ad62012-05-16 20:23:19 +020058static void batadv_recv_handler_init(void);
Marek Lindnerffa995e2012-03-01 15:35:17 +080059
Sven Eckelmannee11ad62012-05-16 20:23:19 +020060static int __init batadv_init(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000061{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020062 INIT_LIST_HEAD(&batadv_hardif_list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +020063 INIT_HLIST_HEAD(&batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +080064
Sven Eckelmannee11ad62012-05-16 20:23:19 +020065 batadv_recv_handler_init();
Marek Lindnerffa995e2012-03-01 15:35:17 +080066
Sven Eckelmann81c524f2012-05-12 02:09:22 +020067 batadv_iv_init();
Matthias Schiffer6c519ba2013-09-27 18:03:39 +020068 batadv_nc_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000069
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020070 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000071
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020072 if (!batadv_event_workqueue)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000073 return -ENOMEM;
74
Sven Eckelmann9039dc72012-05-12 02:09:33 +020075 batadv_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +020076 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000077
Sven Eckelmann95638772012-05-12 02:09:31 +020078 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080079 rtnl_link_register(&batadv_link_ops);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000080
Sven Eckelmann86ceb362012-03-07 09:07:45 +010081 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +020082 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000083
84 return 0;
85}
86
Sven Eckelmannee11ad62012-05-16 20:23:19 +020087static void __exit batadv_exit(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000088{
Sven Eckelmann40a072d2012-05-12 02:09:23 +020089 batadv_debugfs_destroy();
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080090 rtnl_link_unregister(&batadv_link_ops);
Sven Eckelmann95638772012-05-12 02:09:31 +020091 unregister_netdevice_notifier(&batadv_hard_if_notifier);
92 batadv_hardif_remove_interfaces();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000093
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020094 flush_workqueue(batadv_event_workqueue);
95 destroy_workqueue(batadv_event_workqueue);
96 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000097
98 rcu_barrier();
99}
100
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200101int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000102{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200103 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200104 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000105
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000106 spin_lock_init(&bat_priv->forw_bat_list_lock);
107 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200108 spin_lock_init(&bat_priv->tt.changes_list_lock);
109 spin_lock_init(&bat_priv->tt.req_list_lock);
110 spin_lock_init(&bat_priv->tt.roam_list_lock);
111 spin_lock_init(&bat_priv->tt.last_changeset_lock);
112 spin_lock_init(&bat_priv->gw.list_lock);
113 spin_lock_init(&bat_priv->vis.hash_lock);
114 spin_lock_init(&bat_priv->vis.list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000115
116 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
117 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200118 INIT_HLIST_HEAD(&bat_priv->gw.list);
119 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
120 INIT_LIST_HEAD(&bat_priv->tt.req_list);
121 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000122
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200123 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200124 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000125 goto err;
126
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200127 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200128 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000129 goto err;
130
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200131 batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
132 BATADV_NULL_IFINDEX);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000133
Sven Eckelmannd0f714f2012-05-12 02:09:41 +0200134 ret = batadv_vis_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200135 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000136 goto err;
137
Sven Eckelmann08adf152012-05-12 13:38:47 +0200138 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200139 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100140 goto err;
141
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200142 ret = batadv_dat_init(bat_priv);
143 if (ret < 0)
144 goto err;
145
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200146 ret = batadv_nc_mesh_init(bat_priv);
Martin Hundebølld353d8d2013-01-25 11:12:38 +0100147 if (ret < 0)
148 goto err;
149
Sven Eckelmann807736f2012-07-15 22:26:51 +0200150 atomic_set(&bat_priv->gw.reselect, 0);
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200151 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200152
153 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000154
155err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200156 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200157 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000158}
159
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200160void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000161{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200162 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000163
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200164 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000165
Sven Eckelmann9455e342012-05-12 02:09:37 +0200166 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000167
Sven Eckelmannd0f714f2012-05-12 02:09:41 +0200168 batadv_vis_quit(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000169
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200170 batadv_gw_node_purge(bat_priv);
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200171 batadv_nc_mesh_free(bat_priv);
Antonio Quartullia4361862013-05-07 01:06:18 +0200172 batadv_dat_free(bat_priv);
Sven Eckelmann08adf152012-05-12 13:38:47 +0200173 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100174
Antonio Quartullia4361862013-05-07 01:06:18 +0200175 /* Free the TT and the originator tables only after having terminated
176 * all the other depending components which may use these structures for
177 * their purposes.
178 */
179 batadv_tt_free(bat_priv);
180
181 /* Since the originator table clean up routine is accessing the TT
182 * tables as well, it has to be invoked after the TT tables have been
183 * freed and marked as empty. This ensures that no cleanup RCU callbacks
184 * accessing the TT data are scheduled for later execution.
185 */
186 batadv_originator_free(bat_priv);
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200187
Martin Hundebøllf8214862012-04-20 17:02:45 +0200188 free_percpu(bat_priv->bat_counters);
Martin Hundebøllf69ae772013-04-17 21:13:16 +0200189 bat_priv->bat_counters = NULL;
Martin Hundebøllf8214862012-04-20 17:02:45 +0200190
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200191 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000192}
193
David S. Miller6e0895c2013-04-22 20:32:51 -0400194/**
195 * batadv_is_my_mac - check if the given mac address belongs to any of the real
196 * interfaces in the current mesh
197 * @bat_priv: the bat priv with all the soft interface information
198 * @addr: the address to check
199 */
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200200int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000201{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200202 const struct batadv_hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000203
204 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200205 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200206 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000207 continue;
208
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200209 if (hard_iface->soft_iface != bat_priv->soft_iface)
210 continue;
211
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200212 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000213 rcu_read_unlock();
214 return 1;
215 }
216 }
217 rcu_read_unlock();
218 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000219}
220
Marek Lindner30da63a2012-08-03 17:15:46 +0200221/**
222 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
223 * function that requires the primary interface
224 * @seq: debugfs table seq_file struct
225 *
226 * Returns primary interface if found or NULL otherwise.
227 */
228struct batadv_hard_iface *
229batadv_seq_print_text_primary_if_get(struct seq_file *seq)
230{
231 struct net_device *net_dev = (struct net_device *)seq->private;
232 struct batadv_priv *bat_priv = netdev_priv(net_dev);
233 struct batadv_hard_iface *primary_if;
234
235 primary_if = batadv_primary_if_get_selected(bat_priv);
236
237 if (!primary_if) {
238 seq_printf(seq,
239 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
240 net_dev->name);
241 goto out;
242 }
243
244 if (primary_if->if_status == BATADV_IF_ACTIVE)
245 goto out;
246
247 seq_printf(seq,
248 "BATMAN mesh %s disabled - primary interface not active\n",
249 net_dev->name);
250 batadv_hardif_free_ref(primary_if);
251 primary_if = NULL;
252
253out:
254 return primary_if;
255}
256
Simon Wunderlichc54f38c92013-07-29 17:56:44 +0200257/**
258 * batadv_skb_set_priority - sets skb priority according to packet content
259 * @skb: the packet to be sent
260 * @offset: offset to the packet content
261 *
262 * This function sets a value between 256 and 263 (802.1d priority), which
263 * can be interpreted by the cfg80211 or other drivers.
264 */
265void batadv_skb_set_priority(struct sk_buff *skb, int offset)
266{
267 struct iphdr ip_hdr_tmp, *ip_hdr;
268 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
269 struct ethhdr ethhdr_tmp, *ethhdr;
270 struct vlan_ethhdr *vhdr, vhdr_tmp;
271 u32 prio;
272
273 /* already set, do nothing */
274 if (skb->priority >= 256 && skb->priority <= 263)
275 return;
276
277 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
278 if (!ethhdr)
279 return;
280
281 switch (ethhdr->h_proto) {
282 case htons(ETH_P_8021Q):
283 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
284 sizeof(*vhdr), &vhdr_tmp);
285 if (!vhdr)
286 return;
287 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
288 prio = prio >> VLAN_PRIO_SHIFT;
289 break;
290 case htons(ETH_P_IP):
291 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
292 sizeof(*ip_hdr), &ip_hdr_tmp);
293 if (!ip_hdr)
294 return;
295 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
296 break;
297 case htons(ETH_P_IPV6):
298 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
299 sizeof(*ip6_hdr), &ip6_hdr_tmp);
300 if (!ip6_hdr)
301 return;
302 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
303 break;
304 default:
305 return;
306 }
307
308 skb->priority = prio + 256;
309}
310
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200311static int batadv_recv_unhandled_packet(struct sk_buff *skb,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200312 struct batadv_hard_iface *recv_if)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800313{
314 return NET_RX_DROP;
315}
316
317/* incoming packets with the batman ethertype received on any active hard
318 * interface
319 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200320int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
321 struct packet_type *ptype,
322 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800323{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200324 struct batadv_priv *bat_priv;
Sven Eckelmann96412692012-06-05 22:31:30 +0200325 struct batadv_ogm_packet *batadv_ogm_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200326 struct batadv_hard_iface *hard_iface;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800327 uint8_t idx;
328 int ret;
329
Sven Eckelmann56303d32012-06-05 22:31:31 +0200330 hard_iface = container_of(ptype, struct batadv_hard_iface,
331 batman_adv_ptype);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800332 skb = skb_share_check(skb, GFP_ATOMIC);
333
334 /* skb was released by skb_share_check() */
335 if (!skb)
336 goto err_out;
337
338 /* packet should hold at least type and version */
339 if (unlikely(!pskb_may_pull(skb, 2)))
340 goto err_free;
341
342 /* expect a valid ethernet header here. */
343 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
344 goto err_free;
345
346 if (!hard_iface->soft_iface)
347 goto err_free;
348
349 bat_priv = netdev_priv(hard_iface->soft_iface);
350
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200351 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800352 goto err_free;
353
354 /* discard frames on not active interfaces */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200355 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800356 goto err_free;
357
Sven Eckelmann96412692012-06-05 22:31:30 +0200358 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800359
Sven Eckelmann96412692012-06-05 22:31:30 +0200360 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200361 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200362 "Drop packet: incompatible batman version (%i)\n",
Sven Eckelmann96412692012-06-05 22:31:30 +0200363 batadv_ogm_packet->header.version);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800364 goto err_free;
365 }
366
367 /* all receive handlers return whether they received or reused
368 * the supplied skb. if not, we have to free the skb.
369 */
Sven Eckelmann96412692012-06-05 22:31:30 +0200370 idx = batadv_ogm_packet->header.packet_type;
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200371 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800372
373 if (ret == NET_RX_DROP)
374 kfree_skb(skb);
375
376 /* return NET_RX_SUCCESS in any case as we
377 * most probably dropped the packet for
378 * routing-logical reasons.
379 */
380 return NET_RX_SUCCESS;
381
382err_free:
383 kfree_skb(skb);
384err_out:
385 return NET_RX_DROP;
386}
387
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200388static void batadv_recv_handler_init(void)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800389{
390 int i;
391
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200392 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
393 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800394
Marek Lindnerffa995e2012-03-01 15:35:17 +0800395 /* batman icmp packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200396 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200397 /* unicast with 4 addresses packet */
398 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800399 /* unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200400 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800401 /* fragmented unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200402 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800403 /* broadcast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200404 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800405 /* vis packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200406 batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800407 /* Translation table query (request or response) */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200408 batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800409 /* Roaming advertisement */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200410 batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800411}
412
Sven Eckelmann56303d32012-06-05 22:31:31 +0200413int
414batadv_recv_handler_register(uint8_t packet_type,
415 int (*recv_handler)(struct sk_buff *,
416 struct batadv_hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800417{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200418 if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800419 return -EBUSY;
420
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200421 batadv_rx_handler[packet_type] = recv_handler;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800422 return 0;
423}
424
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200425void batadv_recv_handler_unregister(uint8_t packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800426{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200427 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800428}
429
Sven Eckelmann56303d32012-06-05 22:31:31 +0200430static struct batadv_algo_ops *batadv_algo_get(char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800431{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200432 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800433
Sasha Levinb67bfe02013-02-27 17:06:00 -0800434 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800435 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
436 continue;
437
438 bat_algo_ops = bat_algo_ops_tmp;
439 break;
440 }
441
442 return bat_algo_ops;
443}
444
Sven Eckelmann56303d32012-06-05 22:31:31 +0200445int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
Marek Lindner1c280472011-11-28 17:40:17 +0800446{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200447 struct batadv_algo_ops *bat_algo_ops_tmp;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200448 int ret;
Marek Lindner1c280472011-11-28 17:40:17 +0800449
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200450 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
Marek Lindner1c280472011-11-28 17:40:17 +0800451 if (bat_algo_ops_tmp) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100452 pr_info("Trying to register already registered routing algorithm: %s\n",
453 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200454 ret = -EEXIST;
Marek Lindner1c280472011-11-28 17:40:17 +0800455 goto out;
456 }
457
Marek Lindner01c42242011-11-28 21:31:55 +0800458 /* all algorithms must implement all ops (for now) */
Marek Lindnerc2aca022012-02-07 17:20:45 +0800459 if (!bat_algo_ops->bat_iface_enable ||
Marek Lindner00a50072012-02-07 17:20:47 +0800460 !bat_algo_ops->bat_iface_disable ||
Marek Lindnerc3229392012-03-11 06:17:50 +0800461 !bat_algo_ops->bat_iface_update_mac ||
Marek Lindnercd8b78e2012-02-07 17:20:49 +0800462 !bat_algo_ops->bat_primary_iface_set ||
Marek Lindner01c42242011-11-28 21:31:55 +0800463 !bat_algo_ops->bat_ogm_schedule ||
Marek Lindnerc3e29312012-03-04 16:56:25 +0800464 !bat_algo_ops->bat_ogm_emit) {
Marek Lindner01c42242011-11-28 21:31:55 +0800465 pr_info("Routing algo '%s' does not implement required ops\n",
466 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200467 ret = -EINVAL;
Marek Lindner01c42242011-11-28 21:31:55 +0800468 goto out;
469 }
470
Marek Lindner1c280472011-11-28 17:40:17 +0800471 INIT_HLIST_NODE(&bat_algo_ops->list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200472 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +0800473 ret = 0;
474
475out:
476 return ret;
477}
478
Sven Eckelmann56303d32012-06-05 22:31:31 +0200479int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800480{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200481 struct batadv_algo_ops *bat_algo_ops;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200482 int ret = -EINVAL;
Marek Lindner1c280472011-11-28 17:40:17 +0800483
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200484 bat_algo_ops = batadv_algo_get(name);
Marek Lindner1c280472011-11-28 17:40:17 +0800485 if (!bat_algo_ops)
486 goto out;
487
488 bat_priv->bat_algo_ops = bat_algo_ops;
489 ret = 0;
490
491out:
492 return ret;
493}
494
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200495int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
Marek Lindner1c280472011-11-28 17:40:17 +0800496{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200497 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800498
Antonio Quartulli0c814652013-03-21 09:23:29 +0100499 seq_puts(seq, "Available routing algorithms:\n");
Marek Lindner1c280472011-11-28 17:40:17 +0800500
Sasha Levinb67bfe02013-02-27 17:06:00 -0800501 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800502 seq_printf(seq, "%s\n", bat_algo_ops->name);
503 }
504
505 return 0;
506}
507
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200508/**
509 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
510 * the header
511 * @skb: skb pointing to fragmented socket buffers
512 * @payload_ptr: Pointer to position inside the head buffer of the skb
513 * marking the start of the data to be CRC'ed
514 *
515 * payload_ptr must always point to an address in the skb head buffer and not to
516 * a fragment.
517 */
518__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
519{
520 u32 crc = 0;
521 unsigned int from;
522 unsigned int to = skb->len;
523 struct skb_seq_state st;
524 const u8 *data;
525 unsigned int len;
526 unsigned int consumed = 0;
527
528 from = (unsigned int)(payload_ptr - skb->data);
529
530 skb_prepare_seq_read(skb, from, to, &st);
531 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
532 crc = crc32c(crc, data, len);
533 consumed += len;
534 }
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200535
536 return htonl(crc);
537}
538
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200539static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
Marek Lindnerd419be12011-12-10 19:45:53 +0800540{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200541 struct batadv_algo_ops *bat_algo_ops;
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800542 char *algo_name = (char *)val;
543 size_t name_len = strlen(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +0800544
Marek Lindner293c9c12013-04-27 16:22:28 +0800545 if (name_len > 0 && algo_name[name_len - 1] == '\n')
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800546 algo_name[name_len - 1] = '\0';
547
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200548 bat_algo_ops = batadv_algo_get(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +0800549 if (!bat_algo_ops) {
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800550 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +0800551 return -EINVAL;
552 }
553
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800554 return param_set_copystring(algo_name, kp);
Marek Lindnerd419be12011-12-10 19:45:53 +0800555}
556
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200557static const struct kernel_param_ops batadv_param_ops_ra = {
558 .set = batadv_param_set_ra,
Marek Lindnerd419be12011-12-10 19:45:53 +0800559 .get = param_get_string,
560};
561
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200562static struct kparam_string batadv_param_string_ra = {
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200563 .maxlen = sizeof(batadv_routing_algo),
564 .string = batadv_routing_algo,
Marek Lindnerd419be12011-12-10 19:45:53 +0800565};
566
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200567module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
568 0644);
569module_init(batadv_init);
570module_exit(batadv_exit);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000571
572MODULE_LICENSE("GPL");
573
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200574MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
575MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
576MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
577MODULE_VERSION(BATADV_SOURCE_VERSION);