blob: e2de68a5b0c68aed82b068b5852a36942d504b71 [file] [log] [blame]
Antonio Quartulli0b873932013-01-04 03:05:31 +01001/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018 */
19
Sven Eckelmann95a066d2012-10-17 21:10:39 +020020#include <linux/crc32c.h>
21#include <linux/highmem.h>
Simon Wunderlichc54f38c2013-07-29 17:56:44 +020022#include <linux/if_vlan.h>
23#include <net/ip.h>
24#include <net/ipv6.h>
25#include <net/dsfield.h>
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000026#include "main.h"
Sven Eckelmannb706b132012-06-10 23:58:51 +020027#include "sysfs.h"
28#include "debugfs.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000029#include "routing.h"
30#include "send.h"
31#include "originator.h"
32#include "soft-interface.h"
33#include "icmp_socket.h"
34#include "translation-table.h"
35#include "hard-interface.h"
36#include "gateway_client.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010037#include "bridge_loop_avoidance.h"
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +020038#include "distributed-arp-table.h"
Marek Lindneref261572013-04-23 21:39:57 +080039#include "unicast.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000040#include "vis.h"
41#include "hash.h"
Marek Lindner1c280472011-11-28 17:40:17 +080042#include "bat_algo.h"
Martin Hundebølld353d8d2013-01-25 11:12:38 +010043#include "network-coding.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044
Sven Eckelmannc3caf512011-05-03 11:51:38 +020045
46/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020047 * list traversals just rcu-locked
48 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020049struct list_head batadv_hardif_list;
Sven Eckelmannee11ad62012-05-16 20:23:19 +020050static int (*batadv_rx_handler[256])(struct sk_buff *,
Sven Eckelmann56303d32012-06-05 22:31:31 +020051 struct batadv_hard_iface *);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020052char batadv_routing_algo[20] = "BATMAN_IV";
Sven Eckelmannee11ad62012-05-16 20:23:19 +020053static struct hlist_head batadv_algo_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000054
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020055unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000056
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020057struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000058
Sven Eckelmannee11ad62012-05-16 20:23:19 +020059static void batadv_recv_handler_init(void);
Marek Lindnerffa995e2012-03-01 15:35:17 +080060
Sven Eckelmannee11ad62012-05-16 20:23:19 +020061static int __init batadv_init(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000062{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020063 INIT_LIST_HEAD(&batadv_hardif_list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +020064 INIT_HLIST_HEAD(&batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +080065
Sven Eckelmannee11ad62012-05-16 20:23:19 +020066 batadv_recv_handler_init();
Marek Lindnerffa995e2012-03-01 15:35:17 +080067
Sven Eckelmann81c524f2012-05-12 02:09:22 +020068 batadv_iv_init();
Matthias Schiffer6c519ba2013-09-27 18:03:39 +020069 batadv_nc_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000070
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020071 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000072
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020073 if (!batadv_event_workqueue)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000074 return -ENOMEM;
75
Sven Eckelmann9039dc72012-05-12 02:09:33 +020076 batadv_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +020077 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000078
Sven Eckelmann95638772012-05-12 02:09:31 +020079 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080080 rtnl_link_register(&batadv_link_ops);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000081
Sven Eckelmann86ceb362012-03-07 09:07:45 +010082 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +020083 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000084
85 return 0;
86}
87
Sven Eckelmannee11ad62012-05-16 20:23:19 +020088static void __exit batadv_exit(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000089{
Sven Eckelmann40a072d2012-05-12 02:09:23 +020090 batadv_debugfs_destroy();
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080091 rtnl_link_unregister(&batadv_link_ops);
Sven Eckelmann95638772012-05-12 02:09:31 +020092 unregister_netdevice_notifier(&batadv_hard_if_notifier);
93 batadv_hardif_remove_interfaces();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000094
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020095 flush_workqueue(batadv_event_workqueue);
96 destroy_workqueue(batadv_event_workqueue);
97 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000098
99 rcu_barrier();
100}
101
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200102int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000103{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200104 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200105 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000106
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000107 spin_lock_init(&bat_priv->forw_bat_list_lock);
108 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200109 spin_lock_init(&bat_priv->tt.changes_list_lock);
110 spin_lock_init(&bat_priv->tt.req_list_lock);
111 spin_lock_init(&bat_priv->tt.roam_list_lock);
112 spin_lock_init(&bat_priv->tt.last_changeset_lock);
113 spin_lock_init(&bat_priv->gw.list_lock);
114 spin_lock_init(&bat_priv->vis.hash_lock);
115 spin_lock_init(&bat_priv->vis.list_lock);
Marek Lindneref261572013-04-23 21:39:57 +0800116 spin_lock_init(&bat_priv->tvlv.container_list_lock);
117 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000118
119 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
120 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200121 INIT_HLIST_HEAD(&bat_priv->gw.list);
122 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
123 INIT_LIST_HEAD(&bat_priv->tt.req_list);
124 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
Marek Lindneref261572013-04-23 21:39:57 +0800125 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
126 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000127
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200128 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200129 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000130 goto err;
131
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200132 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200133 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000134 goto err;
135
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200136 batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
137 BATADV_NULL_IFINDEX);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000138
Sven Eckelmannd0f714f2012-05-12 02:09:41 +0200139 ret = batadv_vis_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200140 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000141 goto err;
142
Sven Eckelmann08adf152012-05-12 13:38:47 +0200143 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200144 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100145 goto err;
146
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200147 ret = batadv_dat_init(bat_priv);
148 if (ret < 0)
149 goto err;
150
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200151 ret = batadv_nc_mesh_init(bat_priv);
Martin Hundebølld353d8d2013-01-25 11:12:38 +0100152 if (ret < 0)
153 goto err;
154
Sven Eckelmann807736f2012-07-15 22:26:51 +0200155 atomic_set(&bat_priv->gw.reselect, 0);
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200156 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200157
158 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000159
160err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200161 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200162 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000163}
164
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200165void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000166{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200167 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000168
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200169 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000170
Sven Eckelmann9455e342012-05-12 02:09:37 +0200171 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000172
Sven Eckelmannd0f714f2012-05-12 02:09:41 +0200173 batadv_vis_quit(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000174
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200175 batadv_gw_node_purge(bat_priv);
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200176 batadv_nc_mesh_free(bat_priv);
Antonio Quartullia4361862013-05-07 01:06:18 +0200177 batadv_dat_free(bat_priv);
Sven Eckelmann08adf152012-05-12 13:38:47 +0200178 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100179
Antonio Quartullia4361862013-05-07 01:06:18 +0200180 /* Free the TT and the originator tables only after having terminated
181 * all the other depending components which may use these structures for
182 * their purposes.
183 */
184 batadv_tt_free(bat_priv);
185
186 /* Since the originator table clean up routine is accessing the TT
187 * tables as well, it has to be invoked after the TT tables have been
188 * freed and marked as empty. This ensures that no cleanup RCU callbacks
189 * accessing the TT data are scheduled for later execution.
190 */
191 batadv_originator_free(bat_priv);
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200192
Martin Hundebøllf8214862012-04-20 17:02:45 +0200193 free_percpu(bat_priv->bat_counters);
Martin Hundebøllf69ae772013-04-17 21:13:16 +0200194 bat_priv->bat_counters = NULL;
Martin Hundebøllf8214862012-04-20 17:02:45 +0200195
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200196 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000197}
198
David S. Miller6e0895c2013-04-22 20:32:51 -0400199/**
200 * batadv_is_my_mac - check if the given mac address belongs to any of the real
201 * interfaces in the current mesh
202 * @bat_priv: the bat priv with all the soft interface information
203 * @addr: the address to check
204 */
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200205int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000206{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200207 const struct batadv_hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000208
209 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200210 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200211 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000212 continue;
213
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200214 if (hard_iface->soft_iface != bat_priv->soft_iface)
215 continue;
216
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200217 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000218 rcu_read_unlock();
219 return 1;
220 }
221 }
222 rcu_read_unlock();
223 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000224}
225
Marek Lindner30da63a2012-08-03 17:15:46 +0200226/**
227 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
228 * function that requires the primary interface
229 * @seq: debugfs table seq_file struct
230 *
231 * Returns primary interface if found or NULL otherwise.
232 */
233struct batadv_hard_iface *
234batadv_seq_print_text_primary_if_get(struct seq_file *seq)
235{
236 struct net_device *net_dev = (struct net_device *)seq->private;
237 struct batadv_priv *bat_priv = netdev_priv(net_dev);
238 struct batadv_hard_iface *primary_if;
239
240 primary_if = batadv_primary_if_get_selected(bat_priv);
241
242 if (!primary_if) {
243 seq_printf(seq,
244 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
245 net_dev->name);
246 goto out;
247 }
248
249 if (primary_if->if_status == BATADV_IF_ACTIVE)
250 goto out;
251
252 seq_printf(seq,
253 "BATMAN mesh %s disabled - primary interface not active\n",
254 net_dev->name);
255 batadv_hardif_free_ref(primary_if);
256 primary_if = NULL;
257
258out:
259 return primary_if;
260}
261
Simon Wunderlichc54f38c2013-07-29 17:56:44 +0200262/**
263 * batadv_skb_set_priority - sets skb priority according to packet content
264 * @skb: the packet to be sent
265 * @offset: offset to the packet content
266 *
267 * This function sets a value between 256 and 263 (802.1d priority), which
268 * can be interpreted by the cfg80211 or other drivers.
269 */
270void batadv_skb_set_priority(struct sk_buff *skb, int offset)
271{
272 struct iphdr ip_hdr_tmp, *ip_hdr;
273 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
274 struct ethhdr ethhdr_tmp, *ethhdr;
275 struct vlan_ethhdr *vhdr, vhdr_tmp;
276 u32 prio;
277
278 /* already set, do nothing */
279 if (skb->priority >= 256 && skb->priority <= 263)
280 return;
281
282 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
283 if (!ethhdr)
284 return;
285
286 switch (ethhdr->h_proto) {
287 case htons(ETH_P_8021Q):
288 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
289 sizeof(*vhdr), &vhdr_tmp);
290 if (!vhdr)
291 return;
292 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
293 prio = prio >> VLAN_PRIO_SHIFT;
294 break;
295 case htons(ETH_P_IP):
296 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
297 sizeof(*ip_hdr), &ip_hdr_tmp);
298 if (!ip_hdr)
299 return;
300 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
301 break;
302 case htons(ETH_P_IPV6):
303 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
304 sizeof(*ip6_hdr), &ip6_hdr_tmp);
305 if (!ip6_hdr)
306 return;
307 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
308 break;
309 default:
310 return;
311 }
312
313 skb->priority = prio + 256;
314}
315
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200316static int batadv_recv_unhandled_packet(struct sk_buff *skb,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200317 struct batadv_hard_iface *recv_if)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800318{
319 return NET_RX_DROP;
320}
321
322/* incoming packets with the batman ethertype received on any active hard
323 * interface
324 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200325int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
326 struct packet_type *ptype,
327 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800328{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200329 struct batadv_priv *bat_priv;
Sven Eckelmann96412692012-06-05 22:31:30 +0200330 struct batadv_ogm_packet *batadv_ogm_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200331 struct batadv_hard_iface *hard_iface;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800332 uint8_t idx;
333 int ret;
334
Sven Eckelmann56303d32012-06-05 22:31:31 +0200335 hard_iface = container_of(ptype, struct batadv_hard_iface,
336 batman_adv_ptype);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800337 skb = skb_share_check(skb, GFP_ATOMIC);
338
339 /* skb was released by skb_share_check() */
340 if (!skb)
341 goto err_out;
342
343 /* packet should hold at least type and version */
344 if (unlikely(!pskb_may_pull(skb, 2)))
345 goto err_free;
346
347 /* expect a valid ethernet header here. */
348 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
349 goto err_free;
350
351 if (!hard_iface->soft_iface)
352 goto err_free;
353
354 bat_priv = netdev_priv(hard_iface->soft_iface);
355
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200356 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800357 goto err_free;
358
359 /* discard frames on not active interfaces */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200360 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800361 goto err_free;
362
Sven Eckelmann96412692012-06-05 22:31:30 +0200363 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800364
Sven Eckelmann96412692012-06-05 22:31:30 +0200365 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200366 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200367 "Drop packet: incompatible batman version (%i)\n",
Sven Eckelmann96412692012-06-05 22:31:30 +0200368 batadv_ogm_packet->header.version);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800369 goto err_free;
370 }
371
372 /* all receive handlers return whether they received or reused
373 * the supplied skb. if not, we have to free the skb.
374 */
Sven Eckelmann96412692012-06-05 22:31:30 +0200375 idx = batadv_ogm_packet->header.packet_type;
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200376 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800377
378 if (ret == NET_RX_DROP)
379 kfree_skb(skb);
380
381 /* return NET_RX_SUCCESS in any case as we
382 * most probably dropped the packet for
383 * routing-logical reasons.
384 */
385 return NET_RX_SUCCESS;
386
387err_free:
388 kfree_skb(skb);
389err_out:
390 return NET_RX_DROP;
391}
392
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200393static void batadv_recv_handler_init(void)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800394{
395 int i;
396
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200397 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
398 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800399
Marek Lindnerffa995e2012-03-01 15:35:17 +0800400 /* batman icmp packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200401 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200402 /* unicast with 4 addresses packet */
403 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800404 /* unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200405 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800406 /* fragmented unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200407 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800408 /* broadcast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200409 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800410 /* vis packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200411 batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800412 /* Translation table query (request or response) */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200413 batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800414 /* Roaming advertisement */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200415 batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
Marek Lindneref261572013-04-23 21:39:57 +0800416 /* unicast tvlv packet */
417 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800418}
419
Sven Eckelmann56303d32012-06-05 22:31:31 +0200420int
421batadv_recv_handler_register(uint8_t packet_type,
422 int (*recv_handler)(struct sk_buff *,
423 struct batadv_hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800424{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200425 if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800426 return -EBUSY;
427
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200428 batadv_rx_handler[packet_type] = recv_handler;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800429 return 0;
430}
431
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200432void batadv_recv_handler_unregister(uint8_t packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800433{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200434 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800435}
436
Sven Eckelmann56303d32012-06-05 22:31:31 +0200437static struct batadv_algo_ops *batadv_algo_get(char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800438{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200439 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800440
Sasha Levinb67bfe02013-02-27 17:06:00 -0800441 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800442 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
443 continue;
444
445 bat_algo_ops = bat_algo_ops_tmp;
446 break;
447 }
448
449 return bat_algo_ops;
450}
451
Sven Eckelmann56303d32012-06-05 22:31:31 +0200452int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
Marek Lindner1c280472011-11-28 17:40:17 +0800453{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200454 struct batadv_algo_ops *bat_algo_ops_tmp;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200455 int ret;
Marek Lindner1c280472011-11-28 17:40:17 +0800456
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200457 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
Marek Lindner1c280472011-11-28 17:40:17 +0800458 if (bat_algo_ops_tmp) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100459 pr_info("Trying to register already registered routing algorithm: %s\n",
460 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200461 ret = -EEXIST;
Marek Lindner1c280472011-11-28 17:40:17 +0800462 goto out;
463 }
464
Marek Lindner01c42242011-11-28 21:31:55 +0800465 /* all algorithms must implement all ops (for now) */
Marek Lindnerc2aca022012-02-07 17:20:45 +0800466 if (!bat_algo_ops->bat_iface_enable ||
Marek Lindner00a50072012-02-07 17:20:47 +0800467 !bat_algo_ops->bat_iface_disable ||
Marek Lindnerc3229392012-03-11 06:17:50 +0800468 !bat_algo_ops->bat_iface_update_mac ||
Marek Lindnercd8b78e2012-02-07 17:20:49 +0800469 !bat_algo_ops->bat_primary_iface_set ||
Marek Lindner01c42242011-11-28 21:31:55 +0800470 !bat_algo_ops->bat_ogm_schedule ||
Marek Lindnerc3e29312012-03-04 16:56:25 +0800471 !bat_algo_ops->bat_ogm_emit) {
Marek Lindner01c42242011-11-28 21:31:55 +0800472 pr_info("Routing algo '%s' does not implement required ops\n",
473 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200474 ret = -EINVAL;
Marek Lindner01c42242011-11-28 21:31:55 +0800475 goto out;
476 }
477
Marek Lindner1c280472011-11-28 17:40:17 +0800478 INIT_HLIST_NODE(&bat_algo_ops->list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200479 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +0800480 ret = 0;
481
482out:
483 return ret;
484}
485
Sven Eckelmann56303d32012-06-05 22:31:31 +0200486int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800487{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200488 struct batadv_algo_ops *bat_algo_ops;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200489 int ret = -EINVAL;
Marek Lindner1c280472011-11-28 17:40:17 +0800490
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200491 bat_algo_ops = batadv_algo_get(name);
Marek Lindner1c280472011-11-28 17:40:17 +0800492 if (!bat_algo_ops)
493 goto out;
494
495 bat_priv->bat_algo_ops = bat_algo_ops;
496 ret = 0;
497
498out:
499 return ret;
500}
501
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200502int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
Marek Lindner1c280472011-11-28 17:40:17 +0800503{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200504 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800505
Antonio Quartulli0c814652013-03-21 09:23:29 +0100506 seq_puts(seq, "Available routing algorithms:\n");
Marek Lindner1c280472011-11-28 17:40:17 +0800507
Sasha Levinb67bfe02013-02-27 17:06:00 -0800508 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800509 seq_printf(seq, "%s\n", bat_algo_ops->name);
510 }
511
512 return 0;
513}
514
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200515/**
516 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
517 * the header
518 * @skb: skb pointing to fragmented socket buffers
519 * @payload_ptr: Pointer to position inside the head buffer of the skb
520 * marking the start of the data to be CRC'ed
521 *
522 * payload_ptr must always point to an address in the skb head buffer and not to
523 * a fragment.
524 */
525__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
526{
527 u32 crc = 0;
528 unsigned int from;
529 unsigned int to = skb->len;
530 struct skb_seq_state st;
531 const u8 *data;
532 unsigned int len;
533 unsigned int consumed = 0;
534
535 from = (unsigned int)(payload_ptr - skb->data);
536
537 skb_prepare_seq_read(skb, from, to, &st);
538 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
539 crc = crc32c(crc, data, len);
540 consumed += len;
541 }
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200542
543 return htonl(crc);
544}
545
Marek Lindneref261572013-04-23 21:39:57 +0800546/**
547 * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
548 * possibly free it
549 * @tvlv_handler: the tvlv handler to free
550 */
551static void
552batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
553{
554 if (atomic_dec_and_test(&tvlv_handler->refcount))
555 kfree_rcu(tvlv_handler, rcu);
556}
557
558/**
559 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
560 * based on the provided type and version (both need to match)
561 * @bat_priv: the bat priv with all the soft interface information
562 * @type: tvlv handler type to look for
563 * @version: tvlv handler version to look for
564 *
565 * Returns tvlv handler if found or NULL otherwise.
566 */
567static struct batadv_tvlv_handler
568*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
569 uint8_t type, uint8_t version)
570{
571 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
572
573 rcu_read_lock();
574 hlist_for_each_entry_rcu(tvlv_handler_tmp,
575 &bat_priv->tvlv.handler_list, list) {
576 if (tvlv_handler_tmp->type != type)
577 continue;
578
579 if (tvlv_handler_tmp->version != version)
580 continue;
581
582 if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
583 continue;
584
585 tvlv_handler = tvlv_handler_tmp;
586 break;
587 }
588 rcu_read_unlock();
589
590 return tvlv_handler;
591}
592
593/**
594 * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
595 * possibly free it
596 * @tvlv_handler: the tvlv container to free
597 */
598static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
599{
600 if (atomic_dec_and_test(&tvlv->refcount))
601 kfree(tvlv);
602}
603
604/**
605 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
606 * list based on the provided type and version (both need to match)
607 * @bat_priv: the bat priv with all the soft interface information
608 * @type: tvlv container type to look for
609 * @version: tvlv container version to look for
610 *
611 * Has to be called with the appropriate locks being acquired
612 * (tvlv.container_list_lock).
613 *
614 * Returns tvlv container if found or NULL otherwise.
615 */
616static struct batadv_tvlv_container
617*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
618 uint8_t type, uint8_t version)
619{
620 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
621
622 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
623 if (tvlv_tmp->tvlv_hdr.type != type)
624 continue;
625
626 if (tvlv_tmp->tvlv_hdr.version != version)
627 continue;
628
629 if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
630 continue;
631
632 tvlv = tvlv_tmp;
633 break;
634 }
635
636 return tvlv;
637}
638
639/**
640 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
641 * list entries
642 * @bat_priv: the bat priv with all the soft interface information
643 *
644 * Has to be called with the appropriate locks being acquired
645 * (tvlv.container_list_lock).
646 *
647 * Returns size of all currently registered tvlv containers in bytes.
648 */
649static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
650{
651 struct batadv_tvlv_container *tvlv;
652 uint16_t tvlv_len = 0;
653
654 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
655 tvlv_len += sizeof(struct batadv_tvlv_hdr);
656 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
657 }
658
659 return tvlv_len;
660}
661
662/**
663 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
664 * list
665 * @tvlv: the to be removed tvlv container
666 *
667 * Has to be called with the appropriate locks being acquired
668 * (tvlv.container_list_lock).
669 */
670static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
671{
672 if (!tvlv)
673 return;
674
675 hlist_del(&tvlv->list);
676
677 /* first call to decrement the counter, second call to free */
678 batadv_tvlv_container_free_ref(tvlv);
679 batadv_tvlv_container_free_ref(tvlv);
680}
681
682/**
683 * batadv_tvlv_container_unregister - unregister tvlv container based on the
684 * provided type and version (both need to match)
685 * @bat_priv: the bat priv with all the soft interface information
686 * @type: tvlv container type to unregister
687 * @version: tvlv container type to unregister
688 */
689void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
690 uint8_t type, uint8_t version)
691{
692 struct batadv_tvlv_container *tvlv;
693
694 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
695 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
696 batadv_tvlv_container_remove(tvlv);
697 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
698}
699
700/**
701 * batadv_tvlv_container_register - register tvlv type, version and content
702 * to be propagated with each (primary interface) OGM
703 * @bat_priv: the bat priv with all the soft interface information
704 * @type: tvlv container type
705 * @version: tvlv container version
706 * @tvlv_value: tvlv container content
707 * @tvlv_value_len: tvlv container content length
708 *
709 * If a container of the same type and version was already registered the new
710 * content is going to replace the old one.
711 */
712void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
713 uint8_t type, uint8_t version,
714 void *tvlv_value, uint16_t tvlv_value_len)
715{
716 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
717
718 if (!tvlv_value)
719 tvlv_value_len = 0;
720
721 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
722 if (!tvlv_new)
723 return;
724
725 tvlv_new->tvlv_hdr.version = version;
726 tvlv_new->tvlv_hdr.type = type;
727 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
728
729 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
730 INIT_HLIST_NODE(&tvlv_new->list);
731 atomic_set(&tvlv_new->refcount, 1);
732
733 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
734 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
735 batadv_tvlv_container_remove(tvlv_old);
736 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
737 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
738}
739
740/**
741 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
742 * requested packet size
743 * @packet_buff: packet buffer
744 * @packet_buff_len: packet buffer size
745 * @packet_min_len: requested packet minimum size
746 * @additional_packet_len: requested additional packet size on top of minimum
747 * size
748 *
749 * Returns true of the packet buffer could be changed to the requested size,
750 * false otherwise.
751 */
752static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
753 int *packet_buff_len,
754 int min_packet_len,
755 int additional_packet_len)
756{
757 unsigned char *new_buff;
758
759 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
760
761 /* keep old buffer if kmalloc should fail */
762 if (new_buff) {
763 memcpy(new_buff, *packet_buff, min_packet_len);
764 kfree(*packet_buff);
765 *packet_buff = new_buff;
766 *packet_buff_len = min_packet_len + additional_packet_len;
767 return true;
768 }
769
770 return false;
771}
772
773/**
774 * batadv_tvlv_container_ogm_append - append tvlv container content to given
775 * OGM packet buffer
776 * @bat_priv: the bat priv with all the soft interface information
777 * @packet_buff: ogm packet buffer
778 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
779 * content
780 * @packet_min_len: ogm header size to be preserved for the OGM itself
781 *
782 * The ogm packet might be enlarged or shrunk depending on the current size
783 * and the size of the to-be-appended tvlv containers.
784 *
785 * Returns size of all appended tvlv containers in bytes.
786 */
787uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
788 unsigned char **packet_buff,
789 int *packet_buff_len,
790 int packet_min_len)
791{
792 struct batadv_tvlv_container *tvlv;
793 struct batadv_tvlv_hdr *tvlv_hdr;
794 uint16_t tvlv_value_len;
795 void *tvlv_value;
796 bool ret;
797
798 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
799 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
800
801 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
802 packet_min_len, tvlv_value_len);
803
804 if (!ret)
805 goto end;
806
807 if (!tvlv_value_len)
808 goto end;
809
810 tvlv_value = (*packet_buff) + packet_min_len;
811
812 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
813 tvlv_hdr = tvlv_value;
814 tvlv_hdr->type = tvlv->tvlv_hdr.type;
815 tvlv_hdr->version = tvlv->tvlv_hdr.version;
816 tvlv_hdr->len = tvlv->tvlv_hdr.len;
817 tvlv_value = tvlv_hdr + 1;
818 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
819 tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
820 }
821
822end:
823 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
824 return tvlv_value_len;
825}
826
827/**
828 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
829 * appropriate handlers
830 * @bat_priv: the bat priv with all the soft interface information
831 * @tvlv_handler: tvlv callback function handling the tvlv content
832 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
833 * @orig_node: orig node emitting the ogm packet
834 * @src: source mac address of the unicast packet
835 * @dst: destination mac address of the unicast packet
836 * @tvlv_value: tvlv content
837 * @tvlv_value_len: tvlv content length
838 *
839 * Returns success if handler was not found or the return value of the handler
840 * callback.
841 */
842static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
843 struct batadv_tvlv_handler *tvlv_handler,
844 bool ogm_source,
845 struct batadv_orig_node *orig_node,
846 uint8_t *src, uint8_t *dst,
847 void *tvlv_value, uint16_t tvlv_value_len)
848{
849 if (!tvlv_handler)
850 return NET_RX_SUCCESS;
851
852 if (ogm_source) {
853 if (!tvlv_handler->ogm_handler)
854 return NET_RX_SUCCESS;
855
856 if (!orig_node)
857 return NET_RX_SUCCESS;
858
859 tvlv_handler->ogm_handler(bat_priv, orig_node,
860 BATADV_NO_FLAGS,
861 tvlv_value, tvlv_value_len);
862 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
863 } else {
864 if (!src)
865 return NET_RX_SUCCESS;
866
867 if (!dst)
868 return NET_RX_SUCCESS;
869
870 if (!tvlv_handler->unicast_handler)
871 return NET_RX_SUCCESS;
872
873 return tvlv_handler->unicast_handler(bat_priv, src,
874 dst, tvlv_value,
875 tvlv_value_len);
876 }
877
878 return NET_RX_SUCCESS;
879}
880
881/**
882 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
883 * appropriate handlers
884 * @bat_priv: the bat priv with all the soft interface information
885 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
886 * @orig_node: orig node emitting the ogm packet
887 * @src: source mac address of the unicast packet
888 * @dst: destination mac address of the unicast packet
889 * @tvlv_value: tvlv content
890 * @tvlv_value_len: tvlv content length
891 *
892 * Returns success when processing an OGM or the return value of all called
893 * handler callbacks.
894 */
895int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
896 bool ogm_source,
897 struct batadv_orig_node *orig_node,
898 uint8_t *src, uint8_t *dst,
899 void *tvlv_value, uint16_t tvlv_value_len)
900{
901 struct batadv_tvlv_handler *tvlv_handler;
902 struct batadv_tvlv_hdr *tvlv_hdr;
903 uint16_t tvlv_value_cont_len;
904 uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
905 int ret = NET_RX_SUCCESS;
906
907 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
908 tvlv_hdr = tvlv_value;
909 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
910 tvlv_value = tvlv_hdr + 1;
911 tvlv_value_len -= sizeof(*tvlv_hdr);
912
913 if (tvlv_value_cont_len > tvlv_value_len)
914 break;
915
916 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
917 tvlv_hdr->type,
918 tvlv_hdr->version);
919
920 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
921 ogm_source, orig_node,
922 src, dst, tvlv_value,
923 tvlv_value_cont_len);
924 if (tvlv_handler)
925 batadv_tvlv_handler_free_ref(tvlv_handler);
926 tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
927 tvlv_value_len -= tvlv_value_cont_len;
928 }
929
930 if (!ogm_source)
931 return ret;
932
933 rcu_read_lock();
934 hlist_for_each_entry_rcu(tvlv_handler,
935 &bat_priv->tvlv.handler_list, list) {
936 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
937 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
938 tvlv_handler->ogm_handler(bat_priv, orig_node,
939 cifnotfound, NULL, 0);
940
941 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
942 }
943 rcu_read_unlock();
944
945 return NET_RX_SUCCESS;
946}
947
948/**
949 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
950 * handlers
951 * @bat_priv: the bat priv with all the soft interface information
952 * @batadv_ogm_packet: ogm packet containing the tvlv containers
953 * @orig_node: orig node emitting the ogm packet
954 */
955void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
956 struct batadv_ogm_packet *batadv_ogm_packet,
957 struct batadv_orig_node *orig_node)
958{
959 void *tvlv_value;
960 uint16_t tvlv_value_len;
961
962 if (!batadv_ogm_packet)
963 return;
964
965 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
966 if (!tvlv_value_len)
967 return;
968
969 tvlv_value = batadv_ogm_packet + 1;
970
971 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
972 tvlv_value, tvlv_value_len);
973}
974
975/**
976 * batadv_tvlv_handler_register - register tvlv handler based on the provided
977 * type and version (both need to match) for ogm tvlv payload and/or unicast
978 * payload
979 * @bat_priv: the bat priv with all the soft interface information
980 * @optr: ogm tvlv handler callback function. This function receives the orig
981 * node, flags and the tvlv content as argument to process.
982 * @uptr: unicast tvlv handler callback function. This function receives the
983 * source & destination of the unicast packet as well as the tvlv content
984 * to process.
985 * @type: tvlv handler type to be registered
986 * @version: tvlv handler version to be registered
987 * @flags: flags to enable or disable TVLV API behavior
988 */
989void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
990 void (*optr)(struct batadv_priv *bat_priv,
991 struct batadv_orig_node *orig,
992 uint8_t flags,
993 void *tvlv_value,
994 uint16_t tvlv_value_len),
995 int (*uptr)(struct batadv_priv *bat_priv,
996 uint8_t *src, uint8_t *dst,
997 void *tvlv_value,
998 uint16_t tvlv_value_len),
999 uint8_t type, uint8_t version, uint8_t flags)
1000{
1001 struct batadv_tvlv_handler *tvlv_handler;
1002
1003 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1004 if (tvlv_handler) {
1005 batadv_tvlv_handler_free_ref(tvlv_handler);
1006 return;
1007 }
1008
1009 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1010 if (!tvlv_handler)
1011 return;
1012
1013 tvlv_handler->ogm_handler = optr;
1014 tvlv_handler->unicast_handler = uptr;
1015 tvlv_handler->type = type;
1016 tvlv_handler->version = version;
1017 tvlv_handler->flags = flags;
1018 atomic_set(&tvlv_handler->refcount, 1);
1019 INIT_HLIST_NODE(&tvlv_handler->list);
1020
1021 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1022 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1023 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1024}
1025
1026/**
1027 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1028 * provided type and version (both need to match)
1029 * @bat_priv: the bat priv with all the soft interface information
1030 * @type: tvlv handler type to be unregistered
1031 * @version: tvlv handler version to be unregistered
1032 */
1033void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
1034 uint8_t type, uint8_t version)
1035{
1036 struct batadv_tvlv_handler *tvlv_handler;
1037
1038 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1039 if (!tvlv_handler)
1040 return;
1041
1042 batadv_tvlv_handler_free_ref(tvlv_handler);
1043 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1044 hlist_del_rcu(&tvlv_handler->list);
1045 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1046 batadv_tvlv_handler_free_ref(tvlv_handler);
1047}
1048
1049/**
1050 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1051 * specified host
1052 * @bat_priv: the bat priv with all the soft interface information
1053 * @src: source mac address of the unicast packet
1054 * @dst: destination mac address of the unicast packet
1055 * @type: tvlv type
1056 * @version: tvlv version
1057 * @tvlv_value: tvlv content
1058 * @tvlv_value_len: tvlv content length
1059 */
1060void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
1061 uint8_t *dst, uint8_t type, uint8_t version,
1062 void *tvlv_value, uint16_t tvlv_value_len)
1063{
1064 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1065 struct batadv_tvlv_hdr *tvlv_hdr;
1066 struct batadv_orig_node *orig_node;
1067 struct sk_buff *skb = NULL;
1068 unsigned char *tvlv_buff;
1069 unsigned int tvlv_len;
1070 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
1071 bool ret = false;
1072
1073 orig_node = batadv_orig_hash_find(bat_priv, dst);
1074 if (!orig_node)
1075 goto out;
1076
1077 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1078
1079 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1080 if (!skb)
1081 goto out;
1082
1083 skb->priority = TC_PRIO_CONTROL;
1084 skb_reserve(skb, ETH_HLEN);
1085 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1086 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
1087 unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV;
1088 unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION;
1089 unicast_tvlv_packet->header.ttl = BATADV_TTL;
1090 unicast_tvlv_packet->reserved = 0;
1091 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1092 unicast_tvlv_packet->align = 0;
1093 memcpy(unicast_tvlv_packet->src, src, ETH_ALEN);
1094 memcpy(unicast_tvlv_packet->dst, dst, ETH_ALEN);
1095
1096 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1097 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1098 tvlv_hdr->version = version;
1099 tvlv_hdr->type = type;
1100 tvlv_hdr->len = htons(tvlv_value_len);
1101 tvlv_buff += sizeof(*tvlv_hdr);
1102 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1103
1104 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
1105 ret = true;
1106
1107out:
1108 if (skb && !ret)
1109 kfree_skb(skb);
1110 if (orig_node)
1111 batadv_orig_node_free_ref(orig_node);
1112}
1113
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001114static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
Marek Lindnerd419be12011-12-10 19:45:53 +08001115{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001116 struct batadv_algo_ops *bat_algo_ops;
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001117 char *algo_name = (char *)val;
1118 size_t name_len = strlen(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001119
Marek Lindner293c9c12013-04-27 16:22:28 +08001120 if (name_len > 0 && algo_name[name_len - 1] == '\n')
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001121 algo_name[name_len - 1] = '\0';
1122
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001123 bat_algo_ops = batadv_algo_get(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001124 if (!bat_algo_ops) {
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001125 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001126 return -EINVAL;
1127 }
1128
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001129 return param_set_copystring(algo_name, kp);
Marek Lindnerd419be12011-12-10 19:45:53 +08001130}
1131
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001132static const struct kernel_param_ops batadv_param_ops_ra = {
1133 .set = batadv_param_set_ra,
Marek Lindnerd419be12011-12-10 19:45:53 +08001134 .get = param_get_string,
1135};
1136
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001137static struct kparam_string batadv_param_string_ra = {
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001138 .maxlen = sizeof(batadv_routing_algo),
1139 .string = batadv_routing_algo,
Marek Lindnerd419be12011-12-10 19:45:53 +08001140};
1141
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001142module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1143 0644);
1144module_init(batadv_init);
1145module_exit(batadv_exit);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001146
1147MODULE_LICENSE("GPL");
1148
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001149MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
1150MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
1151MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
1152MODULE_VERSION(BATADV_SOURCE_VERSION);