blob: d277ba724a86336ccdf29362ceabf6ef75220281 [file] [log] [blame]
Sven Eckelmann9f6446c2015-04-23 13:16:35 +02001/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
18#include "main.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019
20#include <linux/atomic.h>
21#include <linux/bug.h>
22#include <linux/byteorder/generic.h>
23#include <linux/crc32c.h>
24#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <linux/init.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/kernel.h>
32#include <linux/list.h>
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <linux/netdevice.h>
36#include <linux/pkt_sched.h>
37#include <linux/rculist.h>
38#include <linux/rcupdate.h>
39#include <linux/seq_file.h>
40#include <linux/skbuff.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/stddef.h>
44#include <linux/string.h>
45#include <linux/workqueue.h>
46#include <net/dsfield.h>
47#include <net/rtnetlink.h>
48
49#include "bat_algo.h"
50#include "bridge_loop_avoidance.h"
Sven Eckelmannb706b132012-06-10 23:58:51 +020051#include "debugfs.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020052#include "distributed-arp-table.h"
53#include "gateway_client.h"
54#include "gateway_common.h"
55#include "hard-interface.h"
56#include "icmp_socket.h"
57#include "multicast.h"
58#include "network-coding.h"
59#include "originator.h"
60#include "packet.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000061#include "routing.h"
62#include "send.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000063#include "soft-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000064#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000065
Sven Eckelmannc3caf512011-05-03 11:51:38 +020066/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020067 * list traversals just rcu-locked
68 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020069struct list_head batadv_hardif_list;
Sven Eckelmannee11ad62012-05-16 20:23:19 +020070static int (*batadv_rx_handler[256])(struct sk_buff *,
Sven Eckelmann56303d32012-06-05 22:31:31 +020071 struct batadv_hard_iface *);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020072char batadv_routing_algo[20] = "BATMAN_IV";
Sven Eckelmannee11ad62012-05-16 20:23:19 +020073static struct hlist_head batadv_algo_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000074
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020075unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000076
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020077struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000078
Sven Eckelmannee11ad62012-05-16 20:23:19 +020079static void batadv_recv_handler_init(void);
Marek Lindnerffa995e2012-03-01 15:35:17 +080080
Sven Eckelmannee11ad62012-05-16 20:23:19 +020081static int __init batadv_init(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000082{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020083 INIT_LIST_HEAD(&batadv_hardif_list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +020084 INIT_HLIST_HEAD(&batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +080085
Sven Eckelmannee11ad62012-05-16 20:23:19 +020086 batadv_recv_handler_init();
Marek Lindnerffa995e2012-03-01 15:35:17 +080087
Sven Eckelmann81c524f2012-05-12 02:09:22 +020088 batadv_iv_init();
Matthias Schiffer6c519ba2013-09-27 18:03:39 +020089 batadv_nc_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000090
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020091 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000092
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020093 if (!batadv_event_workqueue)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000094 return -ENOMEM;
95
Sven Eckelmann9039dc72012-05-12 02:09:33 +020096 batadv_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +020097 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000098
Sven Eckelmann95638772012-05-12 02:09:31 +020099 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +0800100 rtnl_link_register(&batadv_link_ops);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000101
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100102 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200103 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000104
105 return 0;
106}
107
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200108static void __exit batadv_exit(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000109{
Sven Eckelmann40a072d2012-05-12 02:09:23 +0200110 batadv_debugfs_destroy();
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +0800111 rtnl_link_unregister(&batadv_link_ops);
Sven Eckelmann95638772012-05-12 02:09:31 +0200112 unregister_netdevice_notifier(&batadv_hard_if_notifier);
113 batadv_hardif_remove_interfaces();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000114
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200115 flush_workqueue(batadv_event_workqueue);
116 destroy_workqueue(batadv_event_workqueue);
117 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000118
119 rcu_barrier();
120}
121
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200122int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000123{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200124 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200125 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000126
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000127 spin_lock_init(&bat_priv->forw_bat_list_lock);
128 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200129 spin_lock_init(&bat_priv->tt.changes_list_lock);
130 spin_lock_init(&bat_priv->tt.req_list_lock);
131 spin_lock_init(&bat_priv->tt.roam_list_lock);
132 spin_lock_init(&bat_priv->tt.last_changeset_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200133 spin_lock_init(&bat_priv->tt.commit_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200134 spin_lock_init(&bat_priv->gw.list_lock);
Linus Lüssingab498862014-02-15 17:47:53 +0100135#ifdef CONFIG_BATMAN_ADV_MCAST
136 spin_lock_init(&bat_priv->mcast.want_lists_lock);
137#endif
Marek Lindneref261572013-04-23 21:39:57 +0800138 spin_lock_init(&bat_priv->tvlv.container_list_lock);
139 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200140 spin_lock_init(&bat_priv->softif_vlan_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000141
142 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
143 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200144 INIT_HLIST_HEAD(&bat_priv->gw.list);
Linus Lüssingab498862014-02-15 17:47:53 +0100145#ifdef CONFIG_BATMAN_ADV_MCAST
146 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
Linus Lüssing4c8755d2014-02-15 17:47:54 +0100147 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
148 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
Linus Lüssingab498862014-02-15 17:47:53 +0100149#endif
Sven Eckelmann807736f2012-07-15 22:26:51 +0200150 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
Marek Lindner7c26a532015-06-28 22:16:06 +0800151 INIT_HLIST_HEAD(&bat_priv->tt.req_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200152 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100153#ifdef CONFIG_BATMAN_ADV_MCAST
154 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
155#endif
Marek Lindneref261572013-04-23 21:39:57 +0800156 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
157 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200158 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000159
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200160 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200161 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000162 goto err;
163
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200164 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200165 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000166 goto err;
167
Sven Eckelmann08adf152012-05-12 13:38:47 +0200168 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200169 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100170 goto err;
171
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200172 ret = batadv_dat_init(bat_priv);
173 if (ret < 0)
174 goto err;
175
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200176 ret = batadv_nc_mesh_init(bat_priv);
Martin Hundebølld353d8d2013-01-25 11:12:38 +0100177 if (ret < 0)
178 goto err;
179
Marek Lindner414254e2013-04-23 21:39:58 +0800180 batadv_gw_init(bat_priv);
Linus Lüssing60432d72014-02-15 17:47:51 +0100181 batadv_mcast_init(bat_priv);
Marek Lindner414254e2013-04-23 21:39:58 +0800182
Sven Eckelmann807736f2012-07-15 22:26:51 +0200183 atomic_set(&bat_priv->gw.reselect, 0);
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200184 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200185
186 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000187
188err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200189 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200190 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000191}
192
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200193void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000194{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200195 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000196
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200197 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000198
Sven Eckelmann9455e342012-05-12 02:09:37 +0200199 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000200
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200201 batadv_gw_node_purge(bat_priv);
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200202 batadv_nc_mesh_free(bat_priv);
Antonio Quartullia4361862013-05-07 01:06:18 +0200203 batadv_dat_free(bat_priv);
Sven Eckelmann08adf152012-05-12 13:38:47 +0200204 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100205
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100206 batadv_mcast_free(bat_priv);
207
Antonio Quartullia4361862013-05-07 01:06:18 +0200208 /* Free the TT and the originator tables only after having terminated
209 * all the other depending components which may use these structures for
210 * their purposes.
211 */
212 batadv_tt_free(bat_priv);
213
214 /* Since the originator table clean up routine is accessing the TT
215 * tables as well, it has to be invoked after the TT tables have been
216 * freed and marked as empty. This ensures that no cleanup RCU callbacks
217 * accessing the TT data are scheduled for later execution.
218 */
219 batadv_originator_free(bat_priv);
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200220
Marek Lindner414254e2013-04-23 21:39:58 +0800221 batadv_gw_free(bat_priv);
222
Martin Hundebøllf8214862012-04-20 17:02:45 +0200223 free_percpu(bat_priv->bat_counters);
Martin Hundebøllf69ae772013-04-17 21:13:16 +0200224 bat_priv->bat_counters = NULL;
Martin Hundebøllf8214862012-04-20 17:02:45 +0200225
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200226 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000227}
228
David S. Miller6e0895c2013-04-22 20:32:51 -0400229/**
230 * batadv_is_my_mac - check if the given mac address belongs to any of the real
231 * interfaces in the current mesh
232 * @bat_priv: the bat priv with all the soft interface information
233 * @addr: the address to check
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100234 *
235 * Returns 'true' if the mac address was found, false otherwise.
David S. Miller6e0895c2013-04-22 20:32:51 -0400236 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200237bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000238{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200239 const struct batadv_hard_iface *hard_iface;
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100240 bool is_my_mac = false;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000241
242 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200243 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200244 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000245 continue;
246
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200247 if (hard_iface->soft_iface != bat_priv->soft_iface)
248 continue;
249
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200250 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100251 is_my_mac = true;
252 break;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000253 }
254 }
255 rcu_read_unlock();
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100256 return is_my_mac;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000257}
258
Marek Lindner30da63a2012-08-03 17:15:46 +0200259/**
260 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
261 * function that requires the primary interface
262 * @seq: debugfs table seq_file struct
263 *
264 * Returns primary interface if found or NULL otherwise.
265 */
266struct batadv_hard_iface *
267batadv_seq_print_text_primary_if_get(struct seq_file *seq)
268{
269 struct net_device *net_dev = (struct net_device *)seq->private;
270 struct batadv_priv *bat_priv = netdev_priv(net_dev);
271 struct batadv_hard_iface *primary_if;
272
273 primary_if = batadv_primary_if_get_selected(bat_priv);
274
275 if (!primary_if) {
276 seq_printf(seq,
277 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
278 net_dev->name);
279 goto out;
280 }
281
282 if (primary_if->if_status == BATADV_IF_ACTIVE)
283 goto out;
284
285 seq_printf(seq,
286 "BATMAN mesh %s disabled - primary interface not active\n",
287 net_dev->name);
288 batadv_hardif_free_ref(primary_if);
289 primary_if = NULL;
290
291out:
292 return primary_if;
293}
294
Simon Wunderlichc54f38c92013-07-29 17:56:44 +0200295/**
Marek Lindner411d6ed2013-05-08 13:31:59 +0800296 * batadv_max_header_len - calculate maximum encapsulation overhead for a
297 * payload packet
298 *
299 * Return the maximum encapsulation overhead in bytes.
300 */
301int batadv_max_header_len(void)
302{
303 int header_len = 0;
304
305 header_len = max_t(int, header_len,
306 sizeof(struct batadv_unicast_packet));
307 header_len = max_t(int, header_len,
308 sizeof(struct batadv_unicast_4addr_packet));
309 header_len = max_t(int, header_len,
310 sizeof(struct batadv_bcast_packet));
311
312#ifdef CONFIG_BATMAN_ADV_NC
313 header_len = max_t(int, header_len,
314 sizeof(struct batadv_coded_packet));
315#endif
316
Marek Lindner1df0cbd2014-01-15 20:31:18 +0800317 return header_len + ETH_HLEN;
Marek Lindner411d6ed2013-05-08 13:31:59 +0800318}
319
320/**
Simon Wunderlichc54f38c92013-07-29 17:56:44 +0200321 * batadv_skb_set_priority - sets skb priority according to packet content
322 * @skb: the packet to be sent
323 * @offset: offset to the packet content
324 *
325 * This function sets a value between 256 and 263 (802.1d priority), which
326 * can be interpreted by the cfg80211 or other drivers.
327 */
328void batadv_skb_set_priority(struct sk_buff *skb, int offset)
329{
330 struct iphdr ip_hdr_tmp, *ip_hdr;
331 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
332 struct ethhdr ethhdr_tmp, *ethhdr;
333 struct vlan_ethhdr *vhdr, vhdr_tmp;
334 u32 prio;
335
336 /* already set, do nothing */
337 if (skb->priority >= 256 && skb->priority <= 263)
338 return;
339
340 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
341 if (!ethhdr)
342 return;
343
344 switch (ethhdr->h_proto) {
345 case htons(ETH_P_8021Q):
346 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
347 sizeof(*vhdr), &vhdr_tmp);
348 if (!vhdr)
349 return;
350 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
351 prio = prio >> VLAN_PRIO_SHIFT;
352 break;
353 case htons(ETH_P_IP):
354 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
355 sizeof(*ip_hdr), &ip_hdr_tmp);
356 if (!ip_hdr)
357 return;
358 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
359 break;
360 case htons(ETH_P_IPV6):
361 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
362 sizeof(*ip6_hdr), &ip6_hdr_tmp);
363 if (!ip6_hdr)
364 return;
365 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
366 break;
367 default:
368 return;
369 }
370
371 skb->priority = prio + 256;
372}
373
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200374static int batadv_recv_unhandled_packet(struct sk_buff *skb,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200375 struct batadv_hard_iface *recv_if)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800376{
377 return NET_RX_DROP;
378}
379
380/* incoming packets with the batman ethertype received on any active hard
381 * interface
382 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200383int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
384 struct packet_type *ptype,
385 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800386{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200387 struct batadv_priv *bat_priv;
Sven Eckelmann96412692012-06-05 22:31:30 +0200388 struct batadv_ogm_packet *batadv_ogm_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200389 struct batadv_hard_iface *hard_iface;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200390 u8 idx;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800391 int ret;
392
Sven Eckelmann56303d32012-06-05 22:31:31 +0200393 hard_iface = container_of(ptype, struct batadv_hard_iface,
394 batman_adv_ptype);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800395 skb = skb_share_check(skb, GFP_ATOMIC);
396
397 /* skb was released by skb_share_check() */
398 if (!skb)
399 goto err_out;
400
401 /* packet should hold at least type and version */
402 if (unlikely(!pskb_may_pull(skb, 2)))
403 goto err_free;
404
405 /* expect a valid ethernet header here. */
406 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
407 goto err_free;
408
409 if (!hard_iface->soft_iface)
410 goto err_free;
411
412 bat_priv = netdev_priv(hard_iface->soft_iface);
413
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200414 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800415 goto err_free;
416
417 /* discard frames on not active interfaces */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200418 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800419 goto err_free;
420
Sven Eckelmann96412692012-06-05 22:31:30 +0200421 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800422
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100423 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200424 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200425 "Drop packet: incompatible batman version (%i)\n",
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100426 batadv_ogm_packet->version);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800427 goto err_free;
428 }
429
Martin Hundebølle0d96772014-09-17 08:56:19 +0200430 /* reset control block to avoid left overs from previous users */
431 memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
432
Marek Lindnerffa995e2012-03-01 15:35:17 +0800433 /* all receive handlers return whether they received or reused
434 * the supplied skb. if not, we have to free the skb.
435 */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100436 idx = batadv_ogm_packet->packet_type;
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200437 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800438
439 if (ret == NET_RX_DROP)
440 kfree_skb(skb);
441
442 /* return NET_RX_SUCCESS in any case as we
443 * most probably dropped the packet for
444 * routing-logical reasons.
445 */
446 return NET_RX_SUCCESS;
447
448err_free:
449 kfree_skb(skb);
450err_out:
451 return NET_RX_DROP;
452}
453
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200454static void batadv_recv_handler_init(void)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800455{
456 int i;
457
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200458 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
459 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800460
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200461 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
462 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
463
Simon Wunderlich031ace82013-12-17 19:12:12 +0100464 /* compile time checks for sizes */
465 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
466 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
467 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
468 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
469 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
470 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
471 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
472 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
473 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
474 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
475 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
476 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
477 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
478 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
479 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
480 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
Simon Wunderlich80067c82013-04-25 10:37:22 +0200481
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200482 /* broadcast packet */
483 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
484
485 /* unicast packets ... */
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200486 /* unicast with 4 addresses packet */
487 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800488 /* unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200489 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
Marek Lindneref261572013-04-23 21:39:57 +0800490 /* unicast tvlv packet */
491 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200492 /* batman icmp packet */
493 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200494 /* Fragmented packets */
495 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800496}
497
Sven Eckelmann56303d32012-06-05 22:31:31 +0200498int
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200499batadv_recv_handler_register(u8 packet_type,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200500 int (*recv_handler)(struct sk_buff *,
501 struct batadv_hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800502{
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200503 int (*curr)(struct sk_buff *,
504 struct batadv_hard_iface *);
505 curr = batadv_rx_handler[packet_type];
506
507 if ((curr != batadv_recv_unhandled_packet) &&
508 (curr != batadv_recv_unhandled_unicast_packet))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800509 return -EBUSY;
510
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200511 batadv_rx_handler[packet_type] = recv_handler;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800512 return 0;
513}
514
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200515void batadv_recv_handler_unregister(u8 packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800516{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200517 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800518}
519
Sven Eckelmann56303d32012-06-05 22:31:31 +0200520static struct batadv_algo_ops *batadv_algo_get(char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800521{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200522 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800523
Sasha Levinb67bfe02013-02-27 17:06:00 -0800524 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800525 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
526 continue;
527
528 bat_algo_ops = bat_algo_ops_tmp;
529 break;
530 }
531
532 return bat_algo_ops;
533}
534
Sven Eckelmann56303d32012-06-05 22:31:31 +0200535int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
Marek Lindner1c280472011-11-28 17:40:17 +0800536{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200537 struct batadv_algo_ops *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800538
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200539 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
Marek Lindner1c280472011-11-28 17:40:17 +0800540 if (bat_algo_ops_tmp) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100541 pr_info("Trying to register already registered routing algorithm: %s\n",
542 bat_algo_ops->name);
Markus Pargmann9fb6c652014-12-26 12:41:40 +0100543 return -EEXIST;
Marek Lindner1c280472011-11-28 17:40:17 +0800544 }
545
Marek Lindner01c42242011-11-28 21:31:55 +0800546 /* all algorithms must implement all ops (for now) */
Marek Lindnerc2aca022012-02-07 17:20:45 +0800547 if (!bat_algo_ops->bat_iface_enable ||
Marek Lindner00a50072012-02-07 17:20:47 +0800548 !bat_algo_ops->bat_iface_disable ||
Marek Lindnerc3229392012-03-11 06:17:50 +0800549 !bat_algo_ops->bat_iface_update_mac ||
Marek Lindnercd8b78e2012-02-07 17:20:49 +0800550 !bat_algo_ops->bat_primary_iface_set ||
Marek Lindner01c42242011-11-28 21:31:55 +0800551 !bat_algo_ops->bat_ogm_schedule ||
Antonio Quartullia3285a82013-09-02 12:15:04 +0200552 !bat_algo_ops->bat_ogm_emit ||
Antonio Quartullic43c9812013-09-02 12:15:05 +0200553 !bat_algo_ops->bat_neigh_cmp ||
554 !bat_algo_ops->bat_neigh_is_equiv_or_better) {
Marek Lindner01c42242011-11-28 21:31:55 +0800555 pr_info("Routing algo '%s' does not implement required ops\n",
556 bat_algo_ops->name);
Markus Pargmann9fb6c652014-12-26 12:41:40 +0100557 return -EINVAL;
Marek Lindner01c42242011-11-28 21:31:55 +0800558 }
559
Marek Lindner1c280472011-11-28 17:40:17 +0800560 INIT_HLIST_NODE(&bat_algo_ops->list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200561 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +0800562
Markus Pargmann9fb6c652014-12-26 12:41:40 +0100563 return 0;
Marek Lindner1c280472011-11-28 17:40:17 +0800564}
565
Sven Eckelmann56303d32012-06-05 22:31:31 +0200566int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800567{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200568 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800569
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200570 bat_algo_ops = batadv_algo_get(name);
Marek Lindner1c280472011-11-28 17:40:17 +0800571 if (!bat_algo_ops)
Markus Pargmannf372d092014-12-26 12:41:41 +0100572 return -EINVAL;
Marek Lindner1c280472011-11-28 17:40:17 +0800573
574 bat_priv->bat_algo_ops = bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800575
Markus Pargmannf372d092014-12-26 12:41:41 +0100576 return 0;
Marek Lindner1c280472011-11-28 17:40:17 +0800577}
578
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200579int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
Marek Lindner1c280472011-11-28 17:40:17 +0800580{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200581 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800582
Antonio Quartulli0c814652013-03-21 09:23:29 +0100583 seq_puts(seq, "Available routing algorithms:\n");
Marek Lindner1c280472011-11-28 17:40:17 +0800584
Sasha Levinb67bfe02013-02-27 17:06:00 -0800585 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800586 seq_printf(seq, "%s\n", bat_algo_ops->name);
587 }
588
589 return 0;
590}
591
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200592/**
593 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
594 * the header
595 * @skb: skb pointing to fragmented socket buffers
596 * @payload_ptr: Pointer to position inside the head buffer of the skb
597 * marking the start of the data to be CRC'ed
598 *
599 * payload_ptr must always point to an address in the skb head buffer and not to
600 * a fragment.
601 */
602__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
603{
604 u32 crc = 0;
605 unsigned int from;
606 unsigned int to = skb->len;
607 struct skb_seq_state st;
608 const u8 *data;
609 unsigned int len;
610 unsigned int consumed = 0;
611
612 from = (unsigned int)(payload_ptr - skb->data);
613
614 skb_prepare_seq_read(skb, from, to, &st);
615 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
616 crc = crc32c(crc, data, len);
617 consumed += len;
618 }
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200619
620 return htonl(crc);
621}
622
Marek Lindneref261572013-04-23 21:39:57 +0800623/**
624 * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
625 * possibly free it
626 * @tvlv_handler: the tvlv handler to free
627 */
628static void
629batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
630{
631 if (atomic_dec_and_test(&tvlv_handler->refcount))
632 kfree_rcu(tvlv_handler, rcu);
633}
634
635/**
636 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
637 * based on the provided type and version (both need to match)
638 * @bat_priv: the bat priv with all the soft interface information
639 * @type: tvlv handler type to look for
640 * @version: tvlv handler version to look for
641 *
642 * Returns tvlv handler if found or NULL otherwise.
643 */
644static struct batadv_tvlv_handler
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200645*batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +0800646{
647 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
648
649 rcu_read_lock();
650 hlist_for_each_entry_rcu(tvlv_handler_tmp,
651 &bat_priv->tvlv.handler_list, list) {
652 if (tvlv_handler_tmp->type != type)
653 continue;
654
655 if (tvlv_handler_tmp->version != version)
656 continue;
657
658 if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
659 continue;
660
661 tvlv_handler = tvlv_handler_tmp;
662 break;
663 }
664 rcu_read_unlock();
665
666 return tvlv_handler;
667}
668
669/**
670 * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
671 * possibly free it
Martin Hundebølla0e28772014-07-15 09:41:08 +0200672 * @tvlv: the tvlv container to free
Marek Lindneref261572013-04-23 21:39:57 +0800673 */
674static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
675{
676 if (atomic_dec_and_test(&tvlv->refcount))
677 kfree(tvlv);
678}
679
680/**
681 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
682 * list based on the provided type and version (both need to match)
683 * @bat_priv: the bat priv with all the soft interface information
684 * @type: tvlv container type to look for
685 * @version: tvlv container version to look for
686 *
687 * Has to be called with the appropriate locks being acquired
688 * (tvlv.container_list_lock).
689 *
690 * Returns tvlv container if found or NULL otherwise.
691 */
692static struct batadv_tvlv_container
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200693*batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +0800694{
695 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
696
697 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
698 if (tvlv_tmp->tvlv_hdr.type != type)
699 continue;
700
701 if (tvlv_tmp->tvlv_hdr.version != version)
702 continue;
703
704 if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
705 continue;
706
707 tvlv = tvlv_tmp;
708 break;
709 }
710
711 return tvlv;
712}
713
714/**
715 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
716 * list entries
717 * @bat_priv: the bat priv with all the soft interface information
718 *
719 * Has to be called with the appropriate locks being acquired
720 * (tvlv.container_list_lock).
721 *
722 * Returns size of all currently registered tvlv containers in bytes.
723 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200724static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
Marek Lindneref261572013-04-23 21:39:57 +0800725{
726 struct batadv_tvlv_container *tvlv;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200727 u16 tvlv_len = 0;
Marek Lindneref261572013-04-23 21:39:57 +0800728
729 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
730 tvlv_len += sizeof(struct batadv_tvlv_hdr);
731 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
732 }
733
734 return tvlv_len;
735}
736
737/**
738 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
739 * list
740 * @tvlv: the to be removed tvlv container
741 *
742 * Has to be called with the appropriate locks being acquired
743 * (tvlv.container_list_lock).
744 */
745static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
746{
747 if (!tvlv)
748 return;
749
750 hlist_del(&tvlv->list);
751
752 /* first call to decrement the counter, second call to free */
753 batadv_tvlv_container_free_ref(tvlv);
754 batadv_tvlv_container_free_ref(tvlv);
755}
756
757/**
758 * batadv_tvlv_container_unregister - unregister tvlv container based on the
759 * provided type and version (both need to match)
760 * @bat_priv: the bat priv with all the soft interface information
761 * @type: tvlv container type to unregister
762 * @version: tvlv container type to unregister
763 */
764void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200765 u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +0800766{
767 struct batadv_tvlv_container *tvlv;
768
769 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
770 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
771 batadv_tvlv_container_remove(tvlv);
772 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
773}
774
775/**
776 * batadv_tvlv_container_register - register tvlv type, version and content
777 * to be propagated with each (primary interface) OGM
778 * @bat_priv: the bat priv with all the soft interface information
779 * @type: tvlv container type
780 * @version: tvlv container version
781 * @tvlv_value: tvlv container content
782 * @tvlv_value_len: tvlv container content length
783 *
784 * If a container of the same type and version was already registered the new
785 * content is going to replace the old one.
786 */
787void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200788 u8 type, u8 version,
789 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +0800790{
791 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
792
793 if (!tvlv_value)
794 tvlv_value_len = 0;
795
796 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
797 if (!tvlv_new)
798 return;
799
800 tvlv_new->tvlv_hdr.version = version;
801 tvlv_new->tvlv_hdr.type = type;
802 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
803
804 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
805 INIT_HLIST_NODE(&tvlv_new->list);
806 atomic_set(&tvlv_new->refcount, 1);
807
808 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
809 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
810 batadv_tvlv_container_remove(tvlv_old);
811 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
812 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
813}
814
815/**
Antonio Quartulli3f687852014-11-02 11:29:56 +0100816 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
Marek Lindneref261572013-04-23 21:39:57 +0800817 * requested packet size
818 * @packet_buff: packet buffer
819 * @packet_buff_len: packet buffer size
Martin Hundebølla0e28772014-07-15 09:41:08 +0200820 * @min_packet_len: requested packet minimum size
Marek Lindneref261572013-04-23 21:39:57 +0800821 * @additional_packet_len: requested additional packet size on top of minimum
822 * size
823 *
824 * Returns true of the packet buffer could be changed to the requested size,
825 * false otherwise.
826 */
827static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
828 int *packet_buff_len,
829 int min_packet_len,
830 int additional_packet_len)
831{
832 unsigned char *new_buff;
833
834 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
835
836 /* keep old buffer if kmalloc should fail */
Markus Pargmann16b9ce82014-12-26 12:41:23 +0100837 if (!new_buff)
838 return false;
Marek Lindneref261572013-04-23 21:39:57 +0800839
Markus Pargmann16b9ce82014-12-26 12:41:23 +0100840 memcpy(new_buff, *packet_buff, min_packet_len);
841 kfree(*packet_buff);
842 *packet_buff = new_buff;
843 *packet_buff_len = min_packet_len + additional_packet_len;
844
845 return true;
Marek Lindneref261572013-04-23 21:39:57 +0800846}
847
848/**
849 * batadv_tvlv_container_ogm_append - append tvlv container content to given
850 * OGM packet buffer
851 * @bat_priv: the bat priv with all the soft interface information
852 * @packet_buff: ogm packet buffer
853 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
854 * content
855 * @packet_min_len: ogm header size to be preserved for the OGM itself
856 *
857 * The ogm packet might be enlarged or shrunk depending on the current size
858 * and the size of the to-be-appended tvlv containers.
859 *
860 * Returns size of all appended tvlv containers in bytes.
861 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200862u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
863 unsigned char **packet_buff,
864 int *packet_buff_len, int packet_min_len)
Marek Lindneref261572013-04-23 21:39:57 +0800865{
866 struct batadv_tvlv_container *tvlv;
867 struct batadv_tvlv_hdr *tvlv_hdr;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200868 u16 tvlv_value_len;
Marek Lindneref261572013-04-23 21:39:57 +0800869 void *tvlv_value;
870 bool ret;
871
872 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
873 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
874
875 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
876 packet_min_len, tvlv_value_len);
877
878 if (!ret)
879 goto end;
880
881 if (!tvlv_value_len)
882 goto end;
883
884 tvlv_value = (*packet_buff) + packet_min_len;
885
886 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
887 tvlv_hdr = tvlv_value;
888 tvlv_hdr->type = tvlv->tvlv_hdr.type;
889 tvlv_hdr->version = tvlv->tvlv_hdr.version;
890 tvlv_hdr->len = tvlv->tvlv_hdr.len;
891 tvlv_value = tvlv_hdr + 1;
892 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200893 tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
Marek Lindneref261572013-04-23 21:39:57 +0800894 }
895
896end:
897 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
898 return tvlv_value_len;
899}
900
901/**
902 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
903 * appropriate handlers
904 * @bat_priv: the bat priv with all the soft interface information
905 * @tvlv_handler: tvlv callback function handling the tvlv content
906 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
907 * @orig_node: orig node emitting the ogm packet
908 * @src: source mac address of the unicast packet
909 * @dst: destination mac address of the unicast packet
910 * @tvlv_value: tvlv content
911 * @tvlv_value_len: tvlv content length
912 *
913 * Returns success if handler was not found or the return value of the handler
914 * callback.
915 */
916static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
917 struct batadv_tvlv_handler *tvlv_handler,
918 bool ogm_source,
919 struct batadv_orig_node *orig_node,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200920 u8 *src, u8 *dst,
921 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +0800922{
923 if (!tvlv_handler)
924 return NET_RX_SUCCESS;
925
926 if (ogm_source) {
927 if (!tvlv_handler->ogm_handler)
928 return NET_RX_SUCCESS;
929
930 if (!orig_node)
931 return NET_RX_SUCCESS;
932
933 tvlv_handler->ogm_handler(bat_priv, orig_node,
934 BATADV_NO_FLAGS,
935 tvlv_value, tvlv_value_len);
936 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
937 } else {
938 if (!src)
939 return NET_RX_SUCCESS;
940
941 if (!dst)
942 return NET_RX_SUCCESS;
943
944 if (!tvlv_handler->unicast_handler)
945 return NET_RX_SUCCESS;
946
947 return tvlv_handler->unicast_handler(bat_priv, src,
948 dst, tvlv_value,
949 tvlv_value_len);
950 }
951
952 return NET_RX_SUCCESS;
953}
954
955/**
956 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
957 * appropriate handlers
958 * @bat_priv: the bat priv with all the soft interface information
959 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
960 * @orig_node: orig node emitting the ogm packet
961 * @src: source mac address of the unicast packet
962 * @dst: destination mac address of the unicast packet
963 * @tvlv_value: tvlv content
964 * @tvlv_value_len: tvlv content length
965 *
966 * Returns success when processing an OGM or the return value of all called
967 * handler callbacks.
968 */
969int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
970 bool ogm_source,
971 struct batadv_orig_node *orig_node,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200972 u8 *src, u8 *dst,
973 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +0800974{
975 struct batadv_tvlv_handler *tvlv_handler;
976 struct batadv_tvlv_hdr *tvlv_hdr;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200977 u16 tvlv_value_cont_len;
978 u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
Marek Lindneref261572013-04-23 21:39:57 +0800979 int ret = NET_RX_SUCCESS;
980
981 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
982 tvlv_hdr = tvlv_value;
983 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
984 tvlv_value = tvlv_hdr + 1;
985 tvlv_value_len -= sizeof(*tvlv_hdr);
986
987 if (tvlv_value_cont_len > tvlv_value_len)
988 break;
989
990 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
991 tvlv_hdr->type,
992 tvlv_hdr->version);
993
994 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
995 ogm_source, orig_node,
996 src, dst, tvlv_value,
997 tvlv_value_cont_len);
998 if (tvlv_handler)
999 batadv_tvlv_handler_free_ref(tvlv_handler);
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001000 tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
Marek Lindneref261572013-04-23 21:39:57 +08001001 tvlv_value_len -= tvlv_value_cont_len;
1002 }
1003
1004 if (!ogm_source)
1005 return ret;
1006
1007 rcu_read_lock();
1008 hlist_for_each_entry_rcu(tvlv_handler,
1009 &bat_priv->tvlv.handler_list, list) {
1010 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
1011 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
1012 tvlv_handler->ogm_handler(bat_priv, orig_node,
1013 cifnotfound, NULL, 0);
1014
1015 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
1016 }
1017 rcu_read_unlock();
1018
1019 return NET_RX_SUCCESS;
1020}
1021
1022/**
1023 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
1024 * handlers
1025 * @bat_priv: the bat priv with all the soft interface information
1026 * @batadv_ogm_packet: ogm packet containing the tvlv containers
1027 * @orig_node: orig node emitting the ogm packet
1028 */
1029void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
1030 struct batadv_ogm_packet *batadv_ogm_packet,
1031 struct batadv_orig_node *orig_node)
1032{
1033 void *tvlv_value;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001034 u16 tvlv_value_len;
Marek Lindneref261572013-04-23 21:39:57 +08001035
1036 if (!batadv_ogm_packet)
1037 return;
1038
1039 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
1040 if (!tvlv_value_len)
1041 return;
1042
1043 tvlv_value = batadv_ogm_packet + 1;
1044
1045 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
1046 tvlv_value, tvlv_value_len);
1047}
1048
1049/**
1050 * batadv_tvlv_handler_register - register tvlv handler based on the provided
1051 * type and version (both need to match) for ogm tvlv payload and/or unicast
1052 * payload
1053 * @bat_priv: the bat priv with all the soft interface information
1054 * @optr: ogm tvlv handler callback function. This function receives the orig
1055 * node, flags and the tvlv content as argument to process.
1056 * @uptr: unicast tvlv handler callback function. This function receives the
1057 * source & destination of the unicast packet as well as the tvlv content
1058 * to process.
1059 * @type: tvlv handler type to be registered
1060 * @version: tvlv handler version to be registered
1061 * @flags: flags to enable or disable TVLV API behavior
1062 */
1063void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1064 void (*optr)(struct batadv_priv *bat_priv,
1065 struct batadv_orig_node *orig,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001066 u8 flags,
Marek Lindneref261572013-04-23 21:39:57 +08001067 void *tvlv_value,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001068 u16 tvlv_value_len),
Marek Lindneref261572013-04-23 21:39:57 +08001069 int (*uptr)(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001070 u8 *src, u8 *dst,
Marek Lindneref261572013-04-23 21:39:57 +08001071 void *tvlv_value,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001072 u16 tvlv_value_len),
1073 u8 type, u8 version, u8 flags)
Marek Lindneref261572013-04-23 21:39:57 +08001074{
1075 struct batadv_tvlv_handler *tvlv_handler;
1076
1077 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1078 if (tvlv_handler) {
1079 batadv_tvlv_handler_free_ref(tvlv_handler);
1080 return;
1081 }
1082
1083 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1084 if (!tvlv_handler)
1085 return;
1086
1087 tvlv_handler->ogm_handler = optr;
1088 tvlv_handler->unicast_handler = uptr;
1089 tvlv_handler->type = type;
1090 tvlv_handler->version = version;
1091 tvlv_handler->flags = flags;
1092 atomic_set(&tvlv_handler->refcount, 1);
1093 INIT_HLIST_NODE(&tvlv_handler->list);
1094
1095 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1096 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1097 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1098}
1099
1100/**
1101 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1102 * provided type and version (both need to match)
1103 * @bat_priv: the bat priv with all the soft interface information
1104 * @type: tvlv handler type to be unregistered
1105 * @version: tvlv handler version to be unregistered
1106 */
1107void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001108 u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +08001109{
1110 struct batadv_tvlv_handler *tvlv_handler;
1111
1112 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1113 if (!tvlv_handler)
1114 return;
1115
1116 batadv_tvlv_handler_free_ref(tvlv_handler);
1117 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1118 hlist_del_rcu(&tvlv_handler->list);
1119 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1120 batadv_tvlv_handler_free_ref(tvlv_handler);
1121}
1122
1123/**
1124 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1125 * specified host
1126 * @bat_priv: the bat priv with all the soft interface information
1127 * @src: source mac address of the unicast packet
1128 * @dst: destination mac address of the unicast packet
1129 * @type: tvlv type
1130 * @version: tvlv version
1131 * @tvlv_value: tvlv content
1132 * @tvlv_value_len: tvlv content length
1133 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001134void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
1135 u8 *dst, u8 type, u8 version,
1136 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +08001137{
1138 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1139 struct batadv_tvlv_hdr *tvlv_hdr;
1140 struct batadv_orig_node *orig_node;
1141 struct sk_buff *skb = NULL;
1142 unsigned char *tvlv_buff;
1143 unsigned int tvlv_len;
1144 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
1145 bool ret = false;
1146
1147 orig_node = batadv_orig_hash_find(bat_priv, dst);
1148 if (!orig_node)
1149 goto out;
1150
1151 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1152
1153 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1154 if (!skb)
1155 goto out;
1156
1157 skb->priority = TC_PRIO_CONTROL;
1158 skb_reserve(skb, ETH_HLEN);
1159 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1160 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +01001161 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1162 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1163 unicast_tvlv_packet->ttl = BATADV_TTL;
Marek Lindneref261572013-04-23 21:39:57 +08001164 unicast_tvlv_packet->reserved = 0;
1165 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1166 unicast_tvlv_packet->align = 0;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +01001167 ether_addr_copy(unicast_tvlv_packet->src, src);
1168 ether_addr_copy(unicast_tvlv_packet->dst, dst);
Marek Lindneref261572013-04-23 21:39:57 +08001169
1170 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1171 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1172 tvlv_hdr->version = version;
1173 tvlv_hdr->type = type;
1174 tvlv_hdr->len = htons(tvlv_value_len);
1175 tvlv_buff += sizeof(*tvlv_hdr);
1176 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1177
1178 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
1179 ret = true;
1180
1181out:
1182 if (skb && !ret)
1183 kfree_skb(skb);
1184 if (orig_node)
1185 batadv_orig_node_free_ref(orig_node);
1186}
1187
Antonio Quartullic018ad32013-06-04 12:11:39 +02001188/**
1189 * batadv_get_vid - extract the VLAN identifier from skb if any
1190 * @skb: the buffer containing the packet
1191 * @header_len: length of the batman header preceding the ethernet header
1192 *
1193 * If the packet embedded in the skb is vlan tagged this function returns the
1194 * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
1195 */
1196unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
1197{
1198 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
1199 struct vlan_ethhdr *vhdr;
1200 unsigned short vid;
1201
1202 if (ethhdr->h_proto != htons(ETH_P_8021Q))
1203 return BATADV_NO_FLAGS;
1204
1205 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
1206 return BATADV_NO_FLAGS;
1207
1208 vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
1209 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1210 vid |= BATADV_VLAN_HAS_TAG;
1211
1212 return vid;
1213}
1214
Antonio Quartullieceb22a2013-11-16 12:03:51 +01001215/**
1216 * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
1217 * @bat_priv: the bat priv with all the soft interface information
1218 * @vid: the VLAN identifier for which the AP isolation attributed as to be
1219 * looked up
1220 *
1221 * Returns true if AP isolation is on for the VLAN idenfied by vid, false
1222 * otherwise
1223 */
1224bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
1225{
1226 bool ap_isolation_enabled = false;
1227 struct batadv_softif_vlan *vlan;
1228
1229 /* if the AP isolation is requested on a VLAN, then check for its
1230 * setting in the proper VLAN private data structure
1231 */
1232 vlan = batadv_softif_vlan_get(bat_priv, vid);
1233 if (vlan) {
1234 ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
1235 batadv_softif_vlan_free_ref(vlan);
1236 }
1237
1238 return ap_isolation_enabled;
1239}
1240
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001241static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
Marek Lindnerd419be12011-12-10 19:45:53 +08001242{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001243 struct batadv_algo_ops *bat_algo_ops;
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001244 char *algo_name = (char *)val;
1245 size_t name_len = strlen(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001246
Marek Lindner293c9c12013-04-27 16:22:28 +08001247 if (name_len > 0 && algo_name[name_len - 1] == '\n')
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001248 algo_name[name_len - 1] = '\0';
1249
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001250 bat_algo_ops = batadv_algo_get(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001251 if (!bat_algo_ops) {
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001252 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001253 return -EINVAL;
1254 }
1255
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001256 return param_set_copystring(algo_name, kp);
Marek Lindnerd419be12011-12-10 19:45:53 +08001257}
1258
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001259static const struct kernel_param_ops batadv_param_ops_ra = {
1260 .set = batadv_param_set_ra,
Marek Lindnerd419be12011-12-10 19:45:53 +08001261 .get = param_get_string,
1262};
1263
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001264static struct kparam_string batadv_param_string_ra = {
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001265 .maxlen = sizeof(batadv_routing_algo),
1266 .string = batadv_routing_algo,
Marek Lindnerd419be12011-12-10 19:45:53 +08001267};
1268
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001269module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1270 0644);
1271module_init(batadv_init);
1272module_exit(batadv_exit);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001273
1274MODULE_LICENSE("GPL");
1275
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001276MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
1277MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
1278MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
1279MODULE_VERSION(BATADV_SOURCE_VERSION);