blob: c5a7cab0f5678c89be74136a52590259a22be443 [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
18#include "main.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019
20#include <linux/atomic.h>
21#include <linux/bug.h>
22#include <linux/byteorder/generic.h>
23#include <linux/crc32c.h>
24#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <linux/init.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/kernel.h>
Sven Eckelmannf7157dd2016-01-16 10:29:48 +010032#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020033#include <linux/list.h>
34#include <linux/module.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020035#include <linux/netdevice.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020036#include <linux/rculist.h>
37#include <linux/rcupdate.h>
38#include <linux/seq_file.h>
39#include <linux/skbuff.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020040#include <linux/spinlock.h>
41#include <linux/stddef.h>
42#include <linux/string.h>
43#include <linux/workqueue.h>
44#include <net/dsfield.h>
45#include <net/rtnetlink.h>
46
47#include "bat_algo.h"
48#include "bridge_loop_avoidance.h"
Sven Eckelmannb706b132012-06-10 23:58:51 +020049#include "debugfs.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020050#include "distributed-arp-table.h"
51#include "gateway_client.h"
52#include "gateway_common.h"
53#include "hard-interface.h"
54#include "icmp_socket.h"
55#include "multicast.h"
56#include "network-coding.h"
57#include "originator.h"
58#include "packet.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000059#include "routing.h"
60#include "send.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000061#include "soft-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000062#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000063
Sven Eckelmannc3caf512011-05-03 11:51:38 +020064/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020065 * list traversals just rcu-locked
66 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020067struct list_head batadv_hardif_list;
Sven Eckelmannee11ad62012-05-16 20:23:19 +020068static int (*batadv_rx_handler[256])(struct sk_buff *,
Sven Eckelmann56303d32012-06-05 22:31:31 +020069 struct batadv_hard_iface *);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000070
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020071unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000072
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020073struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000074
Sven Eckelmannee11ad62012-05-16 20:23:19 +020075static void batadv_recv_handler_init(void);
Marek Lindnerffa995e2012-03-01 15:35:17 +080076
Sven Eckelmannee11ad62012-05-16 20:23:19 +020077static int __init batadv_init(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000078{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020079 INIT_LIST_HEAD(&batadv_hardif_list);
Sven Eckelmann01d350d2016-05-15 11:07:44 +020080 batadv_algo_init();
Marek Lindner1c280472011-11-28 17:40:17 +080081
Sven Eckelmannee11ad62012-05-16 20:23:19 +020082 batadv_recv_handler_init();
Marek Lindnerffa995e2012-03-01 15:35:17 +080083
Linus Luessingd6f94d92016-01-16 16:40:09 +080084 batadv_v_init();
Sven Eckelmann81c524f2012-05-12 02:09:22 +020085 batadv_iv_init();
Matthias Schiffer6c519ba2013-09-27 18:03:39 +020086 batadv_nc_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000087
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020088 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000089
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020090 if (!batadv_event_workqueue)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000091 return -ENOMEM;
92
Sven Eckelmann9039dc72012-05-12 02:09:33 +020093 batadv_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +020094 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000095
Sven Eckelmann95638772012-05-12 02:09:31 +020096 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080097 rtnl_link_register(&batadv_link_ops);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000098
Sven Eckelmann86ceb362012-03-07 09:07:45 +010099 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200100 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000101
102 return 0;
103}
104
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200105static void __exit batadv_exit(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000106{
Sven Eckelmann40a072d2012-05-12 02:09:23 +0200107 batadv_debugfs_destroy();
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +0800108 rtnl_link_unregister(&batadv_link_ops);
Sven Eckelmann95638772012-05-12 02:09:31 +0200109 unregister_netdevice_notifier(&batadv_hard_if_notifier);
110 batadv_hardif_remove_interfaces();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000111
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200112 flush_workqueue(batadv_event_workqueue);
113 destroy_workqueue(batadv_event_workqueue);
114 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000115
116 rcu_barrier();
117}
118
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200119int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000120{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200121 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200122 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000123
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000124 spin_lock_init(&bat_priv->forw_bat_list_lock);
125 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200126 spin_lock_init(&bat_priv->tt.changes_list_lock);
127 spin_lock_init(&bat_priv->tt.req_list_lock);
128 spin_lock_init(&bat_priv->tt.roam_list_lock);
129 spin_lock_init(&bat_priv->tt.last_changeset_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200130 spin_lock_init(&bat_priv->tt.commit_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200131 spin_lock_init(&bat_priv->gw.list_lock);
Linus LĂĽssingab498862014-02-15 17:47:53 +0100132#ifdef CONFIG_BATMAN_ADV_MCAST
133 spin_lock_init(&bat_priv->mcast.want_lists_lock);
134#endif
Marek Lindneref261572013-04-23 21:39:57 +0800135 spin_lock_init(&bat_priv->tvlv.container_list_lock);
136 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200137 spin_lock_init(&bat_priv->softif_vlan_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000138
139 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
140 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200141 INIT_HLIST_HEAD(&bat_priv->gw.list);
Linus LĂĽssingab498862014-02-15 17:47:53 +0100142#ifdef CONFIG_BATMAN_ADV_MCAST
143 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
Linus LĂĽssing4c8755d2014-02-15 17:47:54 +0100144 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
145 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
Linus LĂĽssingab498862014-02-15 17:47:53 +0100146#endif
Sven Eckelmann807736f2012-07-15 22:26:51 +0200147 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
Marek Lindner7c26a532015-06-28 22:16:06 +0800148 INIT_HLIST_HEAD(&bat_priv->tt.req_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200149 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
Linus LĂĽssingc5caf4e2014-02-15 17:47:49 +0100150#ifdef CONFIG_BATMAN_ADV_MCAST
151 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
152#endif
Marek Lindneref261572013-04-23 21:39:57 +0800153 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
154 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200155 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000156
Antonio Quartulli0da00352016-01-16 16:40:12 +0800157 ret = batadv_v_mesh_init(bat_priv);
158 if (ret < 0)
159 goto err;
160
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200161 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200162 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000163 goto err;
164
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200165 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200166 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000167 goto err;
168
Sven Eckelmann08adf152012-05-12 13:38:47 +0200169 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200170 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100171 goto err;
172
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200173 ret = batadv_dat_init(bat_priv);
174 if (ret < 0)
175 goto err;
176
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200177 ret = batadv_nc_mesh_init(bat_priv);
Martin Hundebølld353d8d2013-01-25 11:12:38 +0100178 if (ret < 0)
179 goto err;
180
Marek Lindner414254e2013-04-23 21:39:58 +0800181 batadv_gw_init(bat_priv);
Linus LĂĽssing60432d72014-02-15 17:47:51 +0100182 batadv_mcast_init(bat_priv);
Marek Lindner414254e2013-04-23 21:39:58 +0800183
Sven Eckelmann807736f2012-07-15 22:26:51 +0200184 atomic_set(&bat_priv->gw.reselect, 0);
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200185 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200186
187 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000188
189err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200190 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200191 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000192}
193
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200194void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000195{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200196 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000197
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200198 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000199
Sven Eckelmann9455e342012-05-12 02:09:37 +0200200 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000201
Simon Wunderlichbd3524c2015-08-03 19:13:58 +0200202 batadv_gw_node_free(bat_priv);
Antonio Quartulli0da00352016-01-16 16:40:12 +0800203
204 batadv_v_mesh_free(bat_priv);
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200205 batadv_nc_mesh_free(bat_priv);
Antonio Quartullia4361862013-05-07 01:06:18 +0200206 batadv_dat_free(bat_priv);
Sven Eckelmann08adf152012-05-12 13:38:47 +0200207 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100208
Linus LĂĽssingc5caf4e2014-02-15 17:47:49 +0100209 batadv_mcast_free(bat_priv);
210
Antonio Quartullia4361862013-05-07 01:06:18 +0200211 /* Free the TT and the originator tables only after having terminated
212 * all the other depending components which may use these structures for
213 * their purposes.
214 */
215 batadv_tt_free(bat_priv);
216
217 /* Since the originator table clean up routine is accessing the TT
218 * tables as well, it has to be invoked after the TT tables have been
219 * freed and marked as empty. This ensures that no cleanup RCU callbacks
220 * accessing the TT data are scheduled for later execution.
221 */
222 batadv_originator_free(bat_priv);
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200223
Marek Lindner414254e2013-04-23 21:39:58 +0800224 batadv_gw_free(bat_priv);
225
Martin Hundebøllf8214862012-04-20 17:02:45 +0200226 free_percpu(bat_priv->bat_counters);
Martin Hundebøllf69ae772013-04-17 21:13:16 +0200227 bat_priv->bat_counters = NULL;
Martin Hundebøllf8214862012-04-20 17:02:45 +0200228
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200229 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000230}
231
David S. Miller6e0895c2013-04-22 20:32:51 -0400232/**
233 * batadv_is_my_mac - check if the given mac address belongs to any of the real
234 * interfaces in the current mesh
235 * @bat_priv: the bat priv with all the soft interface information
236 * @addr: the address to check
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100237 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200238 * Return: 'true' if the mac address was found, false otherwise.
David S. Miller6e0895c2013-04-22 20:32:51 -0400239 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200240bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000241{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200242 const struct batadv_hard_iface *hard_iface;
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100243 bool is_my_mac = false;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000244
245 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200246 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200247 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000248 continue;
249
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200250 if (hard_iface->soft_iface != bat_priv->soft_iface)
251 continue;
252
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200253 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100254 is_my_mac = true;
255 break;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000256 }
257 }
258 rcu_read_unlock();
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100259 return is_my_mac;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000260}
261
Marek Lindner30da63a2012-08-03 17:15:46 +0200262/**
263 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
264 * function that requires the primary interface
265 * @seq: debugfs table seq_file struct
266 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200267 * Return: primary interface if found or NULL otherwise.
Marek Lindner30da63a2012-08-03 17:15:46 +0200268 */
269struct batadv_hard_iface *
270batadv_seq_print_text_primary_if_get(struct seq_file *seq)
271{
272 struct net_device *net_dev = (struct net_device *)seq->private;
273 struct batadv_priv *bat_priv = netdev_priv(net_dev);
274 struct batadv_hard_iface *primary_if;
275
276 primary_if = batadv_primary_if_get_selected(bat_priv);
277
278 if (!primary_if) {
279 seq_printf(seq,
280 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
281 net_dev->name);
282 goto out;
283 }
284
285 if (primary_if->if_status == BATADV_IF_ACTIVE)
286 goto out;
287
288 seq_printf(seq,
289 "BATMAN mesh %s disabled - primary interface not active\n",
290 net_dev->name);
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100291 batadv_hardif_put(primary_if);
Marek Lindner30da63a2012-08-03 17:15:46 +0200292 primary_if = NULL;
293
294out:
295 return primary_if;
296}
297
Simon Wunderlichc54f38c2013-07-29 17:56:44 +0200298/**
Marek Lindner411d6ed2013-05-08 13:31:59 +0800299 * batadv_max_header_len - calculate maximum encapsulation overhead for a
300 * payload packet
301 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200302 * Return: the maximum encapsulation overhead in bytes.
Marek Lindner411d6ed2013-05-08 13:31:59 +0800303 */
304int batadv_max_header_len(void)
305{
306 int header_len = 0;
307
308 header_len = max_t(int, header_len,
309 sizeof(struct batadv_unicast_packet));
310 header_len = max_t(int, header_len,
311 sizeof(struct batadv_unicast_4addr_packet));
312 header_len = max_t(int, header_len,
313 sizeof(struct batadv_bcast_packet));
314
315#ifdef CONFIG_BATMAN_ADV_NC
316 header_len = max_t(int, header_len,
317 sizeof(struct batadv_coded_packet));
318#endif
319
Marek Lindner1df0cbd2014-01-15 20:31:18 +0800320 return header_len + ETH_HLEN;
Marek Lindner411d6ed2013-05-08 13:31:59 +0800321}
322
323/**
Simon Wunderlichc54f38c2013-07-29 17:56:44 +0200324 * batadv_skb_set_priority - sets skb priority according to packet content
325 * @skb: the packet to be sent
326 * @offset: offset to the packet content
327 *
328 * This function sets a value between 256 and 263 (802.1d priority), which
329 * can be interpreted by the cfg80211 or other drivers.
330 */
331void batadv_skb_set_priority(struct sk_buff *skb, int offset)
332{
333 struct iphdr ip_hdr_tmp, *ip_hdr;
334 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
335 struct ethhdr ethhdr_tmp, *ethhdr;
336 struct vlan_ethhdr *vhdr, vhdr_tmp;
337 u32 prio;
338
339 /* already set, do nothing */
340 if (skb->priority >= 256 && skb->priority <= 263)
341 return;
342
343 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
344 if (!ethhdr)
345 return;
346
347 switch (ethhdr->h_proto) {
348 case htons(ETH_P_8021Q):
349 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
350 sizeof(*vhdr), &vhdr_tmp);
351 if (!vhdr)
352 return;
353 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
354 prio = prio >> VLAN_PRIO_SHIFT;
355 break;
356 case htons(ETH_P_IP):
357 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
358 sizeof(*ip_hdr), &ip_hdr_tmp);
359 if (!ip_hdr)
360 return;
361 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
362 break;
363 case htons(ETH_P_IPV6):
364 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
365 sizeof(*ip6_hdr), &ip6_hdr_tmp);
366 if (!ip6_hdr)
367 return;
368 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
369 break;
370 default:
371 return;
372 }
373
374 skb->priority = prio + 256;
375}
376
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200377static int batadv_recv_unhandled_packet(struct sk_buff *skb,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200378 struct batadv_hard_iface *recv_if)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800379{
380 return NET_RX_DROP;
381}
382
383/* incoming packets with the batman ethertype received on any active hard
384 * interface
385 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200386int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
387 struct packet_type *ptype,
388 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800389{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200390 struct batadv_priv *bat_priv;
Sven Eckelmann96412692012-06-05 22:31:30 +0200391 struct batadv_ogm_packet *batadv_ogm_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200392 struct batadv_hard_iface *hard_iface;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200393 u8 idx;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800394 int ret;
395
Sven Eckelmann56303d32012-06-05 22:31:31 +0200396 hard_iface = container_of(ptype, struct batadv_hard_iface,
397 batman_adv_ptype);
Sven Eckelmann4fe56e62016-03-05 16:09:17 +0100398
399 /* Prevent processing a packet received on an interface which is getting
400 * shut down otherwise the packet may trigger de-reference errors
401 * further down in the receive path.
402 */
403 if (!kref_get_unless_zero(&hard_iface->refcount))
404 goto err_out;
405
Marek Lindnerffa995e2012-03-01 15:35:17 +0800406 skb = skb_share_check(skb, GFP_ATOMIC);
407
408 /* skb was released by skb_share_check() */
409 if (!skb)
Sven Eckelmann4fe56e62016-03-05 16:09:17 +0100410 goto err_put;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800411
412 /* packet should hold at least type and version */
413 if (unlikely(!pskb_may_pull(skb, 2)))
414 goto err_free;
415
416 /* expect a valid ethernet header here. */
417 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
418 goto err_free;
419
420 if (!hard_iface->soft_iface)
421 goto err_free;
422
423 bat_priv = netdev_priv(hard_iface->soft_iface);
424
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200425 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800426 goto err_free;
427
428 /* discard frames on not active interfaces */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200429 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800430 goto err_free;
431
Sven Eckelmann96412692012-06-05 22:31:30 +0200432 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800433
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100434 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200435 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200436 "Drop packet: incompatible batman version (%i)\n",
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100437 batadv_ogm_packet->version);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800438 goto err_free;
439 }
440
Martin Hundebølle0d96772014-09-17 08:56:19 +0200441 /* reset control block to avoid left overs from previous users */
442 memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
443
Marek Lindnerffa995e2012-03-01 15:35:17 +0800444 /* all receive handlers return whether they received or reused
445 * the supplied skb. if not, we have to free the skb.
446 */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100447 idx = batadv_ogm_packet->packet_type;
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200448 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800449
450 if (ret == NET_RX_DROP)
451 kfree_skb(skb);
452
Sven Eckelmann4fe56e62016-03-05 16:09:17 +0100453 batadv_hardif_put(hard_iface);
454
Marek Lindnerffa995e2012-03-01 15:35:17 +0800455 /* return NET_RX_SUCCESS in any case as we
456 * most probably dropped the packet for
457 * routing-logical reasons.
458 */
459 return NET_RX_SUCCESS;
460
461err_free:
462 kfree_skb(skb);
Sven Eckelmann4fe56e62016-03-05 16:09:17 +0100463err_put:
464 batadv_hardif_put(hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800465err_out:
466 return NET_RX_DROP;
467}
468
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200469static void batadv_recv_handler_init(void)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800470{
471 int i;
472
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200473 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
474 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800475
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200476 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
477 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
478
Simon Wunderlich031ace82013-12-17 19:12:12 +0100479 /* compile time checks for sizes */
480 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
481 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
482 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
483 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
484 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
485 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
486 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
487 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
488 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
489 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
490 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
491 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
492 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
493 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
494 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
495 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
Simon Wunderlich80067c82013-04-25 10:37:22 +0200496
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200497 /* broadcast packet */
498 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
499
500 /* unicast packets ... */
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200501 /* unicast with 4 addresses packet */
502 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800503 /* unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200504 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
Marek Lindneref261572013-04-23 21:39:57 +0800505 /* unicast tvlv packet */
506 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200507 /* batman icmp packet */
508 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200509 /* Fragmented packets */
510 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800511}
512
Sven Eckelmann56303d32012-06-05 22:31:31 +0200513int
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200514batadv_recv_handler_register(u8 packet_type,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200515 int (*recv_handler)(struct sk_buff *,
516 struct batadv_hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800517{
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200518 int (*curr)(struct sk_buff *,
519 struct batadv_hard_iface *);
520 curr = batadv_rx_handler[packet_type];
521
522 if ((curr != batadv_recv_unhandled_packet) &&
523 (curr != batadv_recv_unhandled_unicast_packet))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800524 return -EBUSY;
525
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200526 batadv_rx_handler[packet_type] = recv_handler;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800527 return 0;
528}
529
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200530void batadv_recv_handler_unregister(u8 packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800531{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200532 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800533}
534
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200535/**
536 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
537 * the header
538 * @skb: skb pointing to fragmented socket buffers
539 * @payload_ptr: Pointer to position inside the head buffer of the skb
540 * marking the start of the data to be CRC'ed
541 *
542 * payload_ptr must always point to an address in the skb head buffer and not to
543 * a fragment.
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +0100544 *
545 * Return: big endian crc32c of the checksummed data
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200546 */
547__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
548{
549 u32 crc = 0;
550 unsigned int from;
551 unsigned int to = skb->len;
552 struct skb_seq_state st;
553 const u8 *data;
554 unsigned int len;
555 unsigned int consumed = 0;
556
557 from = (unsigned int)(payload_ptr - skb->data);
558
559 skb_prepare_seq_read(skb, from, to, &st);
560 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
561 crc = crc32c(crc, data, len);
562 consumed += len;
563 }
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200564
565 return htonl(crc);
566}
567
Marek Lindneref261572013-04-23 21:39:57 +0800568/**
Antonio Quartullic018ad32013-06-04 12:11:39 +0200569 * batadv_get_vid - extract the VLAN identifier from skb if any
570 * @skb: the buffer containing the packet
571 * @header_len: length of the batman header preceding the ethernet header
572 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200573 * Return: VID with the BATADV_VLAN_HAS_TAG flag when the packet embedded in the
574 * skb is vlan tagged. Otherwise BATADV_NO_FLAGS.
Antonio Quartullic018ad32013-06-04 12:11:39 +0200575 */
576unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
577{
578 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
579 struct vlan_ethhdr *vhdr;
580 unsigned short vid;
581
582 if (ethhdr->h_proto != htons(ETH_P_8021Q))
583 return BATADV_NO_FLAGS;
584
585 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
586 return BATADV_NO_FLAGS;
587
588 vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
589 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
590 vid |= BATADV_VLAN_HAS_TAG;
591
592 return vid;
593}
594
Antonio Quartullieceb22a2013-11-16 12:03:51 +0100595/**
596 * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
597 * @bat_priv: the bat priv with all the soft interface information
598 * @vid: the VLAN identifier for which the AP isolation attributed as to be
599 * looked up
600 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200601 * Return: true if AP isolation is on for the VLAN idenfied by vid, false
Antonio Quartullieceb22a2013-11-16 12:03:51 +0100602 * otherwise
603 */
604bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
605{
606 bool ap_isolation_enabled = false;
607 struct batadv_softif_vlan *vlan;
608
609 /* if the AP isolation is requested on a VLAN, then check for its
610 * setting in the proper VLAN private data structure
611 */
612 vlan = batadv_softif_vlan_get(bat_priv, vid);
613 if (vlan) {
614 ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
Sven Eckelmann9c3bf082016-01-17 11:01:21 +0100615 batadv_softif_vlan_put(vlan);
Antonio Quartullieceb22a2013-11-16 12:03:51 +0100616 }
617
618 return ap_isolation_enabled;
619}
620
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200621module_init(batadv_init);
622module_exit(batadv_exit);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000623
624MODULE_LICENSE("GPL");
625
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200626MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
627MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
628MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
629MODULE_VERSION(BATADV_SOURCE_VERSION);