blob: d64ddb961979ae083ba99907b07fc090a3dfe775 [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
18#include "main.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019
20#include <linux/atomic.h>
21#include <linux/bug.h>
22#include <linux/byteorder/generic.h>
23#include <linux/crc32c.h>
24#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <linux/init.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/kernel.h>
Sven Eckelmannf7157dd2016-01-16 10:29:48 +010032#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020033#include <linux/list.h>
Sven Eckelmann2c72d652015-06-21 14:45:14 +020034#include <linux/lockdep.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020035#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/netdevice.h>
38#include <linux/pkt_sched.h>
39#include <linux/rculist.h>
40#include <linux/rcupdate.h>
41#include <linux/seq_file.h>
42#include <linux/skbuff.h>
43#include <linux/slab.h>
44#include <linux/spinlock.h>
45#include <linux/stddef.h>
46#include <linux/string.h>
47#include <linux/workqueue.h>
48#include <net/dsfield.h>
49#include <net/rtnetlink.h>
50
51#include "bat_algo.h"
52#include "bridge_loop_avoidance.h"
Sven Eckelmannb706b132012-06-10 23:58:51 +020053#include "debugfs.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020054#include "distributed-arp-table.h"
55#include "gateway_client.h"
56#include "gateway_common.h"
57#include "hard-interface.h"
58#include "icmp_socket.h"
59#include "multicast.h"
60#include "network-coding.h"
61#include "originator.h"
62#include "packet.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000063#include "routing.h"
64#include "send.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000065#include "soft-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000066#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000067
Sven Eckelmannc3caf512011-05-03 11:51:38 +020068/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020069 * list traversals just rcu-locked
70 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020071struct list_head batadv_hardif_list;
Sven Eckelmannee11ad62012-05-16 20:23:19 +020072static int (*batadv_rx_handler[256])(struct sk_buff *,
Sven Eckelmann56303d32012-06-05 22:31:31 +020073 struct batadv_hard_iface *);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020074char batadv_routing_algo[20] = "BATMAN_IV";
Sven Eckelmannee11ad62012-05-16 20:23:19 +020075static struct hlist_head batadv_algo_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000076
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020077unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000078
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020079struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000080
Sven Eckelmannee11ad62012-05-16 20:23:19 +020081static void batadv_recv_handler_init(void);
Marek Lindnerffa995e2012-03-01 15:35:17 +080082
Sven Eckelmannee11ad62012-05-16 20:23:19 +020083static int __init batadv_init(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000084{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020085 INIT_LIST_HEAD(&batadv_hardif_list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +020086 INIT_HLIST_HEAD(&batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +080087
Sven Eckelmannee11ad62012-05-16 20:23:19 +020088 batadv_recv_handler_init();
Marek Lindnerffa995e2012-03-01 15:35:17 +080089
Linus Luessingd6f94d92016-01-16 16:40:09 +080090 batadv_v_init();
Sven Eckelmann81c524f2012-05-12 02:09:22 +020091 batadv_iv_init();
Matthias Schiffer6c519ba2013-09-27 18:03:39 +020092 batadv_nc_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000093
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020094 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000095
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020096 if (!batadv_event_workqueue)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000097 return -ENOMEM;
98
Sven Eckelmann9039dc72012-05-12 02:09:33 +020099 batadv_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +0200100 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000101
Sven Eckelmann95638772012-05-12 02:09:31 +0200102 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +0800103 rtnl_link_register(&batadv_link_ops);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000104
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100105 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200106 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000107
108 return 0;
109}
110
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200111static void __exit batadv_exit(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000112{
Sven Eckelmann40a072d2012-05-12 02:09:23 +0200113 batadv_debugfs_destroy();
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +0800114 rtnl_link_unregister(&batadv_link_ops);
Sven Eckelmann95638772012-05-12 02:09:31 +0200115 unregister_netdevice_notifier(&batadv_hard_if_notifier);
116 batadv_hardif_remove_interfaces();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000117
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200118 flush_workqueue(batadv_event_workqueue);
119 destroy_workqueue(batadv_event_workqueue);
120 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000121
122 rcu_barrier();
123}
124
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200125int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000126{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200127 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200128 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000129
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000130 spin_lock_init(&bat_priv->forw_bat_list_lock);
131 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200132 spin_lock_init(&bat_priv->tt.changes_list_lock);
133 spin_lock_init(&bat_priv->tt.req_list_lock);
134 spin_lock_init(&bat_priv->tt.roam_list_lock);
135 spin_lock_init(&bat_priv->tt.last_changeset_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200136 spin_lock_init(&bat_priv->tt.commit_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200137 spin_lock_init(&bat_priv->gw.list_lock);
Linus Lüssingab498862014-02-15 17:47:53 +0100138#ifdef CONFIG_BATMAN_ADV_MCAST
139 spin_lock_init(&bat_priv->mcast.want_lists_lock);
140#endif
Marek Lindneref261572013-04-23 21:39:57 +0800141 spin_lock_init(&bat_priv->tvlv.container_list_lock);
142 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200143 spin_lock_init(&bat_priv->softif_vlan_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000144
145 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
146 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200147 INIT_HLIST_HEAD(&bat_priv->gw.list);
Linus Lüssingab498862014-02-15 17:47:53 +0100148#ifdef CONFIG_BATMAN_ADV_MCAST
149 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
Linus Lüssing4c8755d2014-02-15 17:47:54 +0100150 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
151 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
Linus Lüssingab498862014-02-15 17:47:53 +0100152#endif
Sven Eckelmann807736f2012-07-15 22:26:51 +0200153 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
Marek Lindner7c26a532015-06-28 22:16:06 +0800154 INIT_HLIST_HEAD(&bat_priv->tt.req_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200155 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100156#ifdef CONFIG_BATMAN_ADV_MCAST
157 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
158#endif
Marek Lindneref261572013-04-23 21:39:57 +0800159 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
160 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200161 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000162
Antonio Quartulli0da00352016-01-16 16:40:12 +0800163 ret = batadv_v_mesh_init(bat_priv);
164 if (ret < 0)
165 goto err;
166
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200167 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200168 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000169 goto err;
170
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200171 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200172 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000173 goto err;
174
Sven Eckelmann08adf152012-05-12 13:38:47 +0200175 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200176 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100177 goto err;
178
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200179 ret = batadv_dat_init(bat_priv);
180 if (ret < 0)
181 goto err;
182
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200183 ret = batadv_nc_mesh_init(bat_priv);
Martin Hundebølld353d8d2013-01-25 11:12:38 +0100184 if (ret < 0)
185 goto err;
186
Marek Lindner414254e2013-04-23 21:39:58 +0800187 batadv_gw_init(bat_priv);
Linus Lüssing60432d72014-02-15 17:47:51 +0100188 batadv_mcast_init(bat_priv);
Marek Lindner414254e2013-04-23 21:39:58 +0800189
Sven Eckelmann807736f2012-07-15 22:26:51 +0200190 atomic_set(&bat_priv->gw.reselect, 0);
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200191 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200192
193 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000194
195err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200196 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200197 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000198}
199
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200200void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000201{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200202 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000203
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200204 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000205
Sven Eckelmann9455e342012-05-12 02:09:37 +0200206 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000207
Simon Wunderlichbd3524c2015-08-03 19:13:58 +0200208 batadv_gw_node_free(bat_priv);
Antonio Quartulli0da00352016-01-16 16:40:12 +0800209
210 batadv_v_mesh_free(bat_priv);
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200211 batadv_nc_mesh_free(bat_priv);
Antonio Quartullia4361862013-05-07 01:06:18 +0200212 batadv_dat_free(bat_priv);
Sven Eckelmann08adf152012-05-12 13:38:47 +0200213 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100214
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100215 batadv_mcast_free(bat_priv);
216
Antonio Quartullia4361862013-05-07 01:06:18 +0200217 /* Free the TT and the originator tables only after having terminated
218 * all the other depending components which may use these structures for
219 * their purposes.
220 */
221 batadv_tt_free(bat_priv);
222
223 /* Since the originator table clean up routine is accessing the TT
224 * tables as well, it has to be invoked after the TT tables have been
225 * freed and marked as empty. This ensures that no cleanup RCU callbacks
226 * accessing the TT data are scheduled for later execution.
227 */
228 batadv_originator_free(bat_priv);
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200229
Marek Lindner414254e2013-04-23 21:39:58 +0800230 batadv_gw_free(bat_priv);
231
Martin Hundebøllf8214862012-04-20 17:02:45 +0200232 free_percpu(bat_priv->bat_counters);
Martin Hundebøllf69ae772013-04-17 21:13:16 +0200233 bat_priv->bat_counters = NULL;
Martin Hundebøllf8214862012-04-20 17:02:45 +0200234
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200235 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000236}
237
David S. Miller6e0895c2013-04-22 20:32:51 -0400238/**
239 * batadv_is_my_mac - check if the given mac address belongs to any of the real
240 * interfaces in the current mesh
241 * @bat_priv: the bat priv with all the soft interface information
242 * @addr: the address to check
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100243 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200244 * Return: 'true' if the mac address was found, false otherwise.
David S. Miller6e0895c2013-04-22 20:32:51 -0400245 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200246bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000247{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200248 const struct batadv_hard_iface *hard_iface;
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100249 bool is_my_mac = false;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000250
251 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200252 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200253 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000254 continue;
255
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200256 if (hard_iface->soft_iface != bat_priv->soft_iface)
257 continue;
258
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200259 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100260 is_my_mac = true;
261 break;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000262 }
263 }
264 rcu_read_unlock();
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100265 return is_my_mac;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000266}
267
Marek Lindner30da63a2012-08-03 17:15:46 +0200268/**
269 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
270 * function that requires the primary interface
271 * @seq: debugfs table seq_file struct
272 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200273 * Return: primary interface if found or NULL otherwise.
Marek Lindner30da63a2012-08-03 17:15:46 +0200274 */
275struct batadv_hard_iface *
276batadv_seq_print_text_primary_if_get(struct seq_file *seq)
277{
278 struct net_device *net_dev = (struct net_device *)seq->private;
279 struct batadv_priv *bat_priv = netdev_priv(net_dev);
280 struct batadv_hard_iface *primary_if;
281
282 primary_if = batadv_primary_if_get_selected(bat_priv);
283
284 if (!primary_if) {
285 seq_printf(seq,
286 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
287 net_dev->name);
288 goto out;
289 }
290
291 if (primary_if->if_status == BATADV_IF_ACTIVE)
292 goto out;
293
294 seq_printf(seq,
295 "BATMAN mesh %s disabled - primary interface not active\n",
296 net_dev->name);
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100297 batadv_hardif_put(primary_if);
Marek Lindner30da63a2012-08-03 17:15:46 +0200298 primary_if = NULL;
299
300out:
301 return primary_if;
302}
303
Simon Wunderlichc54f38c92013-07-29 17:56:44 +0200304/**
Marek Lindner411d6ed2013-05-08 13:31:59 +0800305 * batadv_max_header_len - calculate maximum encapsulation overhead for a
306 * payload packet
307 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200308 * Return: the maximum encapsulation overhead in bytes.
Marek Lindner411d6ed2013-05-08 13:31:59 +0800309 */
310int batadv_max_header_len(void)
311{
312 int header_len = 0;
313
314 header_len = max_t(int, header_len,
315 sizeof(struct batadv_unicast_packet));
316 header_len = max_t(int, header_len,
317 sizeof(struct batadv_unicast_4addr_packet));
318 header_len = max_t(int, header_len,
319 sizeof(struct batadv_bcast_packet));
320
321#ifdef CONFIG_BATMAN_ADV_NC
322 header_len = max_t(int, header_len,
323 sizeof(struct batadv_coded_packet));
324#endif
325
Marek Lindner1df0cbd2014-01-15 20:31:18 +0800326 return header_len + ETH_HLEN;
Marek Lindner411d6ed2013-05-08 13:31:59 +0800327}
328
329/**
Simon Wunderlichc54f38c92013-07-29 17:56:44 +0200330 * batadv_skb_set_priority - sets skb priority according to packet content
331 * @skb: the packet to be sent
332 * @offset: offset to the packet content
333 *
334 * This function sets a value between 256 and 263 (802.1d priority), which
335 * can be interpreted by the cfg80211 or other drivers.
336 */
337void batadv_skb_set_priority(struct sk_buff *skb, int offset)
338{
339 struct iphdr ip_hdr_tmp, *ip_hdr;
340 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
341 struct ethhdr ethhdr_tmp, *ethhdr;
342 struct vlan_ethhdr *vhdr, vhdr_tmp;
343 u32 prio;
344
345 /* already set, do nothing */
346 if (skb->priority >= 256 && skb->priority <= 263)
347 return;
348
349 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
350 if (!ethhdr)
351 return;
352
353 switch (ethhdr->h_proto) {
354 case htons(ETH_P_8021Q):
355 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
356 sizeof(*vhdr), &vhdr_tmp);
357 if (!vhdr)
358 return;
359 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
360 prio = prio >> VLAN_PRIO_SHIFT;
361 break;
362 case htons(ETH_P_IP):
363 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
364 sizeof(*ip_hdr), &ip_hdr_tmp);
365 if (!ip_hdr)
366 return;
367 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
368 break;
369 case htons(ETH_P_IPV6):
370 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
371 sizeof(*ip6_hdr), &ip6_hdr_tmp);
372 if (!ip6_hdr)
373 return;
374 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
375 break;
376 default:
377 return;
378 }
379
380 skb->priority = prio + 256;
381}
382
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200383static int batadv_recv_unhandled_packet(struct sk_buff *skb,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200384 struct batadv_hard_iface *recv_if)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800385{
386 return NET_RX_DROP;
387}
388
389/* incoming packets with the batman ethertype received on any active hard
390 * interface
391 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200392int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
393 struct packet_type *ptype,
394 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800395{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200396 struct batadv_priv *bat_priv;
Sven Eckelmann96412692012-06-05 22:31:30 +0200397 struct batadv_ogm_packet *batadv_ogm_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200398 struct batadv_hard_iface *hard_iface;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200399 u8 idx;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800400 int ret;
401
Sven Eckelmann56303d32012-06-05 22:31:31 +0200402 hard_iface = container_of(ptype, struct batadv_hard_iface,
403 batman_adv_ptype);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800404 skb = skb_share_check(skb, GFP_ATOMIC);
405
406 /* skb was released by skb_share_check() */
407 if (!skb)
408 goto err_out;
409
410 /* packet should hold at least type and version */
411 if (unlikely(!pskb_may_pull(skb, 2)))
412 goto err_free;
413
414 /* expect a valid ethernet header here. */
415 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
416 goto err_free;
417
418 if (!hard_iface->soft_iface)
419 goto err_free;
420
421 bat_priv = netdev_priv(hard_iface->soft_iface);
422
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200423 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800424 goto err_free;
425
426 /* discard frames on not active interfaces */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200427 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800428 goto err_free;
429
Sven Eckelmann96412692012-06-05 22:31:30 +0200430 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800431
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100432 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200433 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200434 "Drop packet: incompatible batman version (%i)\n",
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100435 batadv_ogm_packet->version);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800436 goto err_free;
437 }
438
Martin Hundebølle0d96772014-09-17 08:56:19 +0200439 /* reset control block to avoid left overs from previous users */
440 memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
441
Marek Lindnerffa995e2012-03-01 15:35:17 +0800442 /* all receive handlers return whether they received or reused
443 * the supplied skb. if not, we have to free the skb.
444 */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100445 idx = batadv_ogm_packet->packet_type;
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200446 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800447
448 if (ret == NET_RX_DROP)
449 kfree_skb(skb);
450
451 /* return NET_RX_SUCCESS in any case as we
452 * most probably dropped the packet for
453 * routing-logical reasons.
454 */
455 return NET_RX_SUCCESS;
456
457err_free:
458 kfree_skb(skb);
459err_out:
460 return NET_RX_DROP;
461}
462
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200463static void batadv_recv_handler_init(void)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800464{
465 int i;
466
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200467 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
468 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800469
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200470 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
471 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
472
Simon Wunderlich031ace82013-12-17 19:12:12 +0100473 /* compile time checks for sizes */
474 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
475 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
476 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
477 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
478 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
479 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
480 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
481 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
482 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
483 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
484 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
485 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
486 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
487 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
488 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
489 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
Simon Wunderlich80067c82013-04-25 10:37:22 +0200490
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200491 /* broadcast packet */
492 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
493
494 /* unicast packets ... */
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200495 /* unicast with 4 addresses packet */
496 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800497 /* unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200498 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
Marek Lindneref261572013-04-23 21:39:57 +0800499 /* unicast tvlv packet */
500 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200501 /* batman icmp packet */
502 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200503 /* Fragmented packets */
504 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800505}
506
Sven Eckelmann56303d32012-06-05 22:31:31 +0200507int
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200508batadv_recv_handler_register(u8 packet_type,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200509 int (*recv_handler)(struct sk_buff *,
510 struct batadv_hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800511{
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200512 int (*curr)(struct sk_buff *,
513 struct batadv_hard_iface *);
514 curr = batadv_rx_handler[packet_type];
515
516 if ((curr != batadv_recv_unhandled_packet) &&
517 (curr != batadv_recv_unhandled_unicast_packet))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800518 return -EBUSY;
519
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200520 batadv_rx_handler[packet_type] = recv_handler;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800521 return 0;
522}
523
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200524void batadv_recv_handler_unregister(u8 packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800525{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200526 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800527}
528
Sven Eckelmann56303d32012-06-05 22:31:31 +0200529static struct batadv_algo_ops *batadv_algo_get(char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800530{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200531 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800532
Sasha Levinb67bfe02013-02-27 17:06:00 -0800533 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800534 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
535 continue;
536
537 bat_algo_ops = bat_algo_ops_tmp;
538 break;
539 }
540
541 return bat_algo_ops;
542}
543
Sven Eckelmann56303d32012-06-05 22:31:31 +0200544int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
Marek Lindner1c280472011-11-28 17:40:17 +0800545{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200546 struct batadv_algo_ops *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800547
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200548 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
Marek Lindner1c280472011-11-28 17:40:17 +0800549 if (bat_algo_ops_tmp) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100550 pr_info("Trying to register already registered routing algorithm: %s\n",
551 bat_algo_ops->name);
Markus Pargmann9fb6c652014-12-26 12:41:40 +0100552 return -EEXIST;
Marek Lindner1c280472011-11-28 17:40:17 +0800553 }
554
Marek Lindner01c42242011-11-28 21:31:55 +0800555 /* all algorithms must implement all ops (for now) */
Marek Lindnerc2aca022012-02-07 17:20:45 +0800556 if (!bat_algo_ops->bat_iface_enable ||
Marek Lindner00a50072012-02-07 17:20:47 +0800557 !bat_algo_ops->bat_iface_disable ||
Marek Lindnerc3229392012-03-11 06:17:50 +0800558 !bat_algo_ops->bat_iface_update_mac ||
Marek Lindnercd8b78e2012-02-07 17:20:49 +0800559 !bat_algo_ops->bat_primary_iface_set ||
Marek Lindner01c42242011-11-28 21:31:55 +0800560 !bat_algo_ops->bat_ogm_schedule ||
Antonio Quartullia3285a82013-09-02 12:15:04 +0200561 !bat_algo_ops->bat_ogm_emit ||
Antonio Quartullic43c9812013-09-02 12:15:05 +0200562 !bat_algo_ops->bat_neigh_cmp ||
Simon Wunderlich18165f62015-08-08 02:01:50 +0200563 !bat_algo_ops->bat_neigh_is_similar_or_better) {
Marek Lindner01c42242011-11-28 21:31:55 +0800564 pr_info("Routing algo '%s' does not implement required ops\n",
565 bat_algo_ops->name);
Markus Pargmann9fb6c652014-12-26 12:41:40 +0100566 return -EINVAL;
Marek Lindner01c42242011-11-28 21:31:55 +0800567 }
568
Marek Lindner1c280472011-11-28 17:40:17 +0800569 INIT_HLIST_NODE(&bat_algo_ops->list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200570 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +0800571
Markus Pargmann9fb6c652014-12-26 12:41:40 +0100572 return 0;
Marek Lindner1c280472011-11-28 17:40:17 +0800573}
574
Sven Eckelmann56303d32012-06-05 22:31:31 +0200575int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800576{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200577 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800578
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200579 bat_algo_ops = batadv_algo_get(name);
Marek Lindner1c280472011-11-28 17:40:17 +0800580 if (!bat_algo_ops)
Markus Pargmannf372d092014-12-26 12:41:41 +0100581 return -EINVAL;
Marek Lindner1c280472011-11-28 17:40:17 +0800582
583 bat_priv->bat_algo_ops = bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800584
Markus Pargmannf372d092014-12-26 12:41:41 +0100585 return 0;
Marek Lindner1c280472011-11-28 17:40:17 +0800586}
587
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200588int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
Marek Lindner1c280472011-11-28 17:40:17 +0800589{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200590 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800591
Antonio Quartulli0c814652013-03-21 09:23:29 +0100592 seq_puts(seq, "Available routing algorithms:\n");
Marek Lindner1c280472011-11-28 17:40:17 +0800593
Sasha Levinb67bfe02013-02-27 17:06:00 -0800594 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
Marek Lindner854d2a62015-07-17 22:25:59 +0800595 seq_printf(seq, " * %s\n", bat_algo_ops->name);
Marek Lindner1c280472011-11-28 17:40:17 +0800596 }
597
598 return 0;
599}
600
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200601/**
602 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
603 * the header
604 * @skb: skb pointing to fragmented socket buffers
605 * @payload_ptr: Pointer to position inside the head buffer of the skb
606 * marking the start of the data to be CRC'ed
607 *
608 * payload_ptr must always point to an address in the skb head buffer and not to
609 * a fragment.
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +0100610 *
611 * Return: big endian crc32c of the checksummed data
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200612 */
613__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
614{
615 u32 crc = 0;
616 unsigned int from;
617 unsigned int to = skb->len;
618 struct skb_seq_state st;
619 const u8 *data;
620 unsigned int len;
621 unsigned int consumed = 0;
622
623 from = (unsigned int)(payload_ptr - skb->data);
624
625 skb_prepare_seq_read(skb, from, to, &st);
626 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
627 crc = crc32c(crc, data, len);
628 consumed += len;
629 }
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200630
631 return htonl(crc);
632}
633
Marek Lindneref261572013-04-23 21:39:57 +0800634/**
Sven Eckelmann32836f52016-01-16 10:29:49 +0100635 * batadv_tvlv_handler_release - release tvlv handler from lists and queue for
636 * free after rcu grace period
637 * @ref: kref pointer of the tvlv
638 */
639static void batadv_tvlv_handler_release(struct kref *ref)
640{
641 struct batadv_tvlv_handler *tvlv_handler;
642
643 tvlv_handler = container_of(ref, struct batadv_tvlv_handler, refcount);
644 kfree_rcu(tvlv_handler, rcu);
645}
646
647/**
Sven Eckelmannba610042016-01-17 11:01:19 +0100648 * batadv_tvlv_handler_put - decrement the tvlv container refcounter and
Sven Eckelmann32836f52016-01-16 10:29:49 +0100649 * possibly release it
Marek Lindneref261572013-04-23 21:39:57 +0800650 * @tvlv_handler: the tvlv handler to free
651 */
Sven Eckelmannba610042016-01-17 11:01:19 +0100652static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
Marek Lindneref261572013-04-23 21:39:57 +0800653{
Sven Eckelmann32836f52016-01-16 10:29:49 +0100654 kref_put(&tvlv_handler->refcount, batadv_tvlv_handler_release);
Marek Lindneref261572013-04-23 21:39:57 +0800655}
656
657/**
658 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
659 * based on the provided type and version (both need to match)
660 * @bat_priv: the bat priv with all the soft interface information
661 * @type: tvlv handler type to look for
662 * @version: tvlv handler version to look for
663 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200664 * Return: tvlv handler if found or NULL otherwise.
Marek Lindneref261572013-04-23 21:39:57 +0800665 */
666static struct batadv_tvlv_handler
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200667*batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +0800668{
669 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
670
671 rcu_read_lock();
672 hlist_for_each_entry_rcu(tvlv_handler_tmp,
673 &bat_priv->tvlv.handler_list, list) {
674 if (tvlv_handler_tmp->type != type)
675 continue;
676
677 if (tvlv_handler_tmp->version != version)
678 continue;
679
Sven Eckelmann32836f52016-01-16 10:29:49 +0100680 if (!kref_get_unless_zero(&tvlv_handler_tmp->refcount))
Marek Lindneref261572013-04-23 21:39:57 +0800681 continue;
682
683 tvlv_handler = tvlv_handler_tmp;
684 break;
685 }
686 rcu_read_unlock();
687
688 return tvlv_handler;
689}
690
691/**
Sven Eckelmannf7157dd2016-01-16 10:29:48 +0100692 * batadv_tvlv_container_release - release tvlv from lists and free
693 * @ref: kref pointer of the tvlv
694 */
695static void batadv_tvlv_container_release(struct kref *ref)
696{
697 struct batadv_tvlv_container *tvlv;
698
699 tvlv = container_of(ref, struct batadv_tvlv_container, refcount);
700 kfree(tvlv);
701}
702
703/**
Sven Eckelmann4a131472016-01-17 11:01:20 +0100704 * batadv_tvlv_container_put - decrement the tvlv container refcounter and
Sven Eckelmannf7157dd2016-01-16 10:29:48 +0100705 * possibly release it
Martin Hundebølla0e28772014-07-15 09:41:08 +0200706 * @tvlv: the tvlv container to free
Marek Lindneref261572013-04-23 21:39:57 +0800707 */
Sven Eckelmann4a131472016-01-17 11:01:20 +0100708static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
Marek Lindneref261572013-04-23 21:39:57 +0800709{
Sven Eckelmannf7157dd2016-01-16 10:29:48 +0100710 kref_put(&tvlv->refcount, batadv_tvlv_container_release);
Marek Lindneref261572013-04-23 21:39:57 +0800711}
712
713/**
714 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
715 * list based on the provided type and version (both need to match)
716 * @bat_priv: the bat priv with all the soft interface information
717 * @type: tvlv container type to look for
718 * @version: tvlv container version to look for
719 *
720 * Has to be called with the appropriate locks being acquired
721 * (tvlv.container_list_lock).
722 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200723 * Return: tvlv container if found or NULL otherwise.
Marek Lindneref261572013-04-23 21:39:57 +0800724 */
725static struct batadv_tvlv_container
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200726*batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +0800727{
728 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
729
Sven Eckelmanndded0692015-12-20 09:04:03 +0100730 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
731
Marek Lindneref261572013-04-23 21:39:57 +0800732 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
733 if (tvlv_tmp->tvlv_hdr.type != type)
734 continue;
735
736 if (tvlv_tmp->tvlv_hdr.version != version)
737 continue;
738
Sven Eckelmannf7157dd2016-01-16 10:29:48 +0100739 if (!kref_get_unless_zero(&tvlv_tmp->refcount))
Marek Lindneref261572013-04-23 21:39:57 +0800740 continue;
741
742 tvlv = tvlv_tmp;
743 break;
744 }
745
746 return tvlv;
747}
748
749/**
750 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
751 * list entries
752 * @bat_priv: the bat priv with all the soft interface information
753 *
754 * Has to be called with the appropriate locks being acquired
755 * (tvlv.container_list_lock).
756 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200757 * Return: size of all currently registered tvlv containers in bytes.
Marek Lindneref261572013-04-23 21:39:57 +0800758 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200759static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
Marek Lindneref261572013-04-23 21:39:57 +0800760{
761 struct batadv_tvlv_container *tvlv;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200762 u16 tvlv_len = 0;
Marek Lindneref261572013-04-23 21:39:57 +0800763
Sven Eckelmanndded0692015-12-20 09:04:03 +0100764 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
765
Marek Lindneref261572013-04-23 21:39:57 +0800766 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
767 tvlv_len += sizeof(struct batadv_tvlv_hdr);
768 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
769 }
770
771 return tvlv_len;
772}
773
774/**
775 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
776 * list
Sven Eckelmann2c72d652015-06-21 14:45:14 +0200777 * @bat_priv: the bat priv with all the soft interface information
Marek Lindneref261572013-04-23 21:39:57 +0800778 * @tvlv: the to be removed tvlv container
779 *
780 * Has to be called with the appropriate locks being acquired
781 * (tvlv.container_list_lock).
782 */
Sven Eckelmann2c72d652015-06-21 14:45:14 +0200783static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
784 struct batadv_tvlv_container *tvlv)
Marek Lindneref261572013-04-23 21:39:57 +0800785{
Sven Eckelmann008a3742015-11-03 19:20:34 +0100786 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
Sven Eckelmann2c72d652015-06-21 14:45:14 +0200787
Marek Lindneref261572013-04-23 21:39:57 +0800788 if (!tvlv)
789 return;
790
791 hlist_del(&tvlv->list);
792
793 /* first call to decrement the counter, second call to free */
Sven Eckelmann4a131472016-01-17 11:01:20 +0100794 batadv_tvlv_container_put(tvlv);
795 batadv_tvlv_container_put(tvlv);
Marek Lindneref261572013-04-23 21:39:57 +0800796}
797
798/**
799 * batadv_tvlv_container_unregister - unregister tvlv container based on the
800 * provided type and version (both need to match)
801 * @bat_priv: the bat priv with all the soft interface information
802 * @type: tvlv container type to unregister
803 * @version: tvlv container type to unregister
804 */
805void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200806 u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +0800807{
808 struct batadv_tvlv_container *tvlv;
809
810 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
811 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
Sven Eckelmann2c72d652015-06-21 14:45:14 +0200812 batadv_tvlv_container_remove(bat_priv, tvlv);
Marek Lindneref261572013-04-23 21:39:57 +0800813 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
814}
815
816/**
817 * batadv_tvlv_container_register - register tvlv type, version and content
818 * to be propagated with each (primary interface) OGM
819 * @bat_priv: the bat priv with all the soft interface information
820 * @type: tvlv container type
821 * @version: tvlv container version
822 * @tvlv_value: tvlv container content
823 * @tvlv_value_len: tvlv container content length
824 *
825 * If a container of the same type and version was already registered the new
826 * content is going to replace the old one.
827 */
828void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200829 u8 type, u8 version,
830 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +0800831{
832 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
833
834 if (!tvlv_value)
835 tvlv_value_len = 0;
836
837 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
838 if (!tvlv_new)
839 return;
840
841 tvlv_new->tvlv_hdr.version = version;
842 tvlv_new->tvlv_hdr.type = type;
843 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
844
845 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
846 INIT_HLIST_NODE(&tvlv_new->list);
Sven Eckelmannf7157dd2016-01-16 10:29:48 +0100847 kref_init(&tvlv_new->refcount);
Marek Lindneref261572013-04-23 21:39:57 +0800848
849 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
850 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
Sven Eckelmann2c72d652015-06-21 14:45:14 +0200851 batadv_tvlv_container_remove(bat_priv, tvlv_old);
Marek Lindneref261572013-04-23 21:39:57 +0800852 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
853 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
854}
855
856/**
Antonio Quartulli3f687852014-11-02 11:29:56 +0100857 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
Marek Lindneref261572013-04-23 21:39:57 +0800858 * requested packet size
859 * @packet_buff: packet buffer
860 * @packet_buff_len: packet buffer size
Martin Hundebølla0e28772014-07-15 09:41:08 +0200861 * @min_packet_len: requested packet minimum size
Marek Lindneref261572013-04-23 21:39:57 +0800862 * @additional_packet_len: requested additional packet size on top of minimum
863 * size
864 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200865 * Return: true of the packet buffer could be changed to the requested size,
Marek Lindneref261572013-04-23 21:39:57 +0800866 * false otherwise.
867 */
868static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
869 int *packet_buff_len,
870 int min_packet_len,
871 int additional_packet_len)
872{
873 unsigned char *new_buff;
874
875 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
876
877 /* keep old buffer if kmalloc should fail */
Markus Pargmann16b9ce82014-12-26 12:41:23 +0100878 if (!new_buff)
879 return false;
Marek Lindneref261572013-04-23 21:39:57 +0800880
Markus Pargmann16b9ce82014-12-26 12:41:23 +0100881 memcpy(new_buff, *packet_buff, min_packet_len);
882 kfree(*packet_buff);
883 *packet_buff = new_buff;
884 *packet_buff_len = min_packet_len + additional_packet_len;
885
886 return true;
Marek Lindneref261572013-04-23 21:39:57 +0800887}
888
889/**
890 * batadv_tvlv_container_ogm_append - append tvlv container content to given
891 * OGM packet buffer
892 * @bat_priv: the bat priv with all the soft interface information
893 * @packet_buff: ogm packet buffer
894 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
895 * content
896 * @packet_min_len: ogm header size to be preserved for the OGM itself
897 *
898 * The ogm packet might be enlarged or shrunk depending on the current size
899 * and the size of the to-be-appended tvlv containers.
900 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200901 * Return: size of all appended tvlv containers in bytes.
Marek Lindneref261572013-04-23 21:39:57 +0800902 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200903u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
904 unsigned char **packet_buff,
905 int *packet_buff_len, int packet_min_len)
Marek Lindneref261572013-04-23 21:39:57 +0800906{
907 struct batadv_tvlv_container *tvlv;
908 struct batadv_tvlv_hdr *tvlv_hdr;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200909 u16 tvlv_value_len;
Marek Lindneref261572013-04-23 21:39:57 +0800910 void *tvlv_value;
911 bool ret;
912
913 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
914 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
915
916 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
917 packet_min_len, tvlv_value_len);
918
919 if (!ret)
920 goto end;
921
922 if (!tvlv_value_len)
923 goto end;
924
925 tvlv_value = (*packet_buff) + packet_min_len;
926
927 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
928 tvlv_hdr = tvlv_value;
929 tvlv_hdr->type = tvlv->tvlv_hdr.type;
930 tvlv_hdr->version = tvlv->tvlv_hdr.version;
931 tvlv_hdr->len = tvlv->tvlv_hdr.len;
932 tvlv_value = tvlv_hdr + 1;
933 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200934 tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
Marek Lindneref261572013-04-23 21:39:57 +0800935 }
936
937end:
938 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
939 return tvlv_value_len;
940}
941
942/**
943 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
944 * appropriate handlers
945 * @bat_priv: the bat priv with all the soft interface information
946 * @tvlv_handler: tvlv callback function handling the tvlv content
Sven Eckelmannc05a57f2015-08-26 10:31:51 +0200947 * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
Marek Lindneref261572013-04-23 21:39:57 +0800948 * @orig_node: orig node emitting the ogm packet
949 * @src: source mac address of the unicast packet
950 * @dst: destination mac address of the unicast packet
951 * @tvlv_value: tvlv content
952 * @tvlv_value_len: tvlv content length
953 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200954 * Return: success if handler was not found or the return value of the handler
Marek Lindneref261572013-04-23 21:39:57 +0800955 * callback.
956 */
957static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
958 struct batadv_tvlv_handler *tvlv_handler,
959 bool ogm_source,
960 struct batadv_orig_node *orig_node,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200961 u8 *src, u8 *dst,
962 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +0800963{
964 if (!tvlv_handler)
965 return NET_RX_SUCCESS;
966
967 if (ogm_source) {
968 if (!tvlv_handler->ogm_handler)
969 return NET_RX_SUCCESS;
970
971 if (!orig_node)
972 return NET_RX_SUCCESS;
973
974 tvlv_handler->ogm_handler(bat_priv, orig_node,
975 BATADV_NO_FLAGS,
976 tvlv_value, tvlv_value_len);
977 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
978 } else {
979 if (!src)
980 return NET_RX_SUCCESS;
981
982 if (!dst)
983 return NET_RX_SUCCESS;
984
985 if (!tvlv_handler->unicast_handler)
986 return NET_RX_SUCCESS;
987
988 return tvlv_handler->unicast_handler(bat_priv, src,
989 dst, tvlv_value,
990 tvlv_value_len);
991 }
992
993 return NET_RX_SUCCESS;
994}
995
996/**
997 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
998 * appropriate handlers
999 * @bat_priv: the bat priv with all the soft interface information
Sven Eckelmannc05a57f2015-08-26 10:31:51 +02001000 * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
Marek Lindneref261572013-04-23 21:39:57 +08001001 * @orig_node: orig node emitting the ogm packet
1002 * @src: source mac address of the unicast packet
1003 * @dst: destination mac address of the unicast packet
1004 * @tvlv_value: tvlv content
1005 * @tvlv_value_len: tvlv content length
1006 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001007 * Return: success when processing an OGM or the return value of all called
Marek Lindneref261572013-04-23 21:39:57 +08001008 * handler callbacks.
1009 */
1010int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
1011 bool ogm_source,
1012 struct batadv_orig_node *orig_node,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001013 u8 *src, u8 *dst,
1014 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +08001015{
1016 struct batadv_tvlv_handler *tvlv_handler;
1017 struct batadv_tvlv_hdr *tvlv_hdr;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001018 u16 tvlv_value_cont_len;
1019 u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
Marek Lindneref261572013-04-23 21:39:57 +08001020 int ret = NET_RX_SUCCESS;
1021
1022 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
1023 tvlv_hdr = tvlv_value;
1024 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
1025 tvlv_value = tvlv_hdr + 1;
1026 tvlv_value_len -= sizeof(*tvlv_hdr);
1027
1028 if (tvlv_value_cont_len > tvlv_value_len)
1029 break;
1030
1031 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
1032 tvlv_hdr->type,
1033 tvlv_hdr->version);
1034
1035 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
1036 ogm_source, orig_node,
1037 src, dst, tvlv_value,
1038 tvlv_value_cont_len);
1039 if (tvlv_handler)
Sven Eckelmannba610042016-01-17 11:01:19 +01001040 batadv_tvlv_handler_put(tvlv_handler);
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001041 tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
Marek Lindneref261572013-04-23 21:39:57 +08001042 tvlv_value_len -= tvlv_value_cont_len;
1043 }
1044
1045 if (!ogm_source)
1046 return ret;
1047
1048 rcu_read_lock();
1049 hlist_for_each_entry_rcu(tvlv_handler,
1050 &bat_priv->tvlv.handler_list, list) {
1051 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
1052 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
1053 tvlv_handler->ogm_handler(bat_priv, orig_node,
1054 cifnotfound, NULL, 0);
1055
1056 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
1057 }
1058 rcu_read_unlock();
1059
1060 return NET_RX_SUCCESS;
1061}
1062
1063/**
1064 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
1065 * handlers
1066 * @bat_priv: the bat priv with all the soft interface information
1067 * @batadv_ogm_packet: ogm packet containing the tvlv containers
1068 * @orig_node: orig node emitting the ogm packet
1069 */
1070void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
1071 struct batadv_ogm_packet *batadv_ogm_packet,
1072 struct batadv_orig_node *orig_node)
1073{
1074 void *tvlv_value;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001075 u16 tvlv_value_len;
Marek Lindneref261572013-04-23 21:39:57 +08001076
1077 if (!batadv_ogm_packet)
1078 return;
1079
1080 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
1081 if (!tvlv_value_len)
1082 return;
1083
1084 tvlv_value = batadv_ogm_packet + 1;
1085
1086 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
1087 tvlv_value, tvlv_value_len);
1088}
1089
1090/**
1091 * batadv_tvlv_handler_register - register tvlv handler based on the provided
1092 * type and version (both need to match) for ogm tvlv payload and/or unicast
1093 * payload
1094 * @bat_priv: the bat priv with all the soft interface information
1095 * @optr: ogm tvlv handler callback function. This function receives the orig
1096 * node, flags and the tvlv content as argument to process.
1097 * @uptr: unicast tvlv handler callback function. This function receives the
1098 * source & destination of the unicast packet as well as the tvlv content
1099 * to process.
1100 * @type: tvlv handler type to be registered
1101 * @version: tvlv handler version to be registered
1102 * @flags: flags to enable or disable TVLV API behavior
1103 */
1104void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1105 void (*optr)(struct batadv_priv *bat_priv,
1106 struct batadv_orig_node *orig,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001107 u8 flags,
Marek Lindneref261572013-04-23 21:39:57 +08001108 void *tvlv_value,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001109 u16 tvlv_value_len),
Marek Lindneref261572013-04-23 21:39:57 +08001110 int (*uptr)(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001111 u8 *src, u8 *dst,
Marek Lindneref261572013-04-23 21:39:57 +08001112 void *tvlv_value,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001113 u16 tvlv_value_len),
1114 u8 type, u8 version, u8 flags)
Marek Lindneref261572013-04-23 21:39:57 +08001115{
1116 struct batadv_tvlv_handler *tvlv_handler;
1117
1118 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1119 if (tvlv_handler) {
Sven Eckelmannba610042016-01-17 11:01:19 +01001120 batadv_tvlv_handler_put(tvlv_handler);
Marek Lindneref261572013-04-23 21:39:57 +08001121 return;
1122 }
1123
1124 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1125 if (!tvlv_handler)
1126 return;
1127
1128 tvlv_handler->ogm_handler = optr;
1129 tvlv_handler->unicast_handler = uptr;
1130 tvlv_handler->type = type;
1131 tvlv_handler->version = version;
1132 tvlv_handler->flags = flags;
Sven Eckelmann32836f52016-01-16 10:29:49 +01001133 kref_init(&tvlv_handler->refcount);
Marek Lindneref261572013-04-23 21:39:57 +08001134 INIT_HLIST_NODE(&tvlv_handler->list);
1135
1136 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1137 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1138 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1139}
1140
1141/**
1142 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1143 * provided type and version (both need to match)
1144 * @bat_priv: the bat priv with all the soft interface information
1145 * @type: tvlv handler type to be unregistered
1146 * @version: tvlv handler version to be unregistered
1147 */
1148void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001149 u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +08001150{
1151 struct batadv_tvlv_handler *tvlv_handler;
1152
1153 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1154 if (!tvlv_handler)
1155 return;
1156
Sven Eckelmannba610042016-01-17 11:01:19 +01001157 batadv_tvlv_handler_put(tvlv_handler);
Marek Lindneref261572013-04-23 21:39:57 +08001158 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1159 hlist_del_rcu(&tvlv_handler->list);
1160 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
Sven Eckelmannba610042016-01-17 11:01:19 +01001161 batadv_tvlv_handler_put(tvlv_handler);
Marek Lindneref261572013-04-23 21:39:57 +08001162}
1163
1164/**
1165 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1166 * specified host
1167 * @bat_priv: the bat priv with all the soft interface information
1168 * @src: source mac address of the unicast packet
1169 * @dst: destination mac address of the unicast packet
1170 * @type: tvlv type
1171 * @version: tvlv version
1172 * @tvlv_value: tvlv content
1173 * @tvlv_value_len: tvlv content length
1174 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001175void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
1176 u8 *dst, u8 type, u8 version,
1177 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +08001178{
1179 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1180 struct batadv_tvlv_hdr *tvlv_hdr;
1181 struct batadv_orig_node *orig_node;
Markus Elfring8bbb7cb2015-11-15 09:00:42 +01001182 struct sk_buff *skb;
Marek Lindneref261572013-04-23 21:39:57 +08001183 unsigned char *tvlv_buff;
1184 unsigned int tvlv_len;
1185 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
Marek Lindneref261572013-04-23 21:39:57 +08001186
1187 orig_node = batadv_orig_hash_find(bat_priv, dst);
1188 if (!orig_node)
Markus Elfring8bbb7cb2015-11-15 09:00:42 +01001189 return;
Marek Lindneref261572013-04-23 21:39:57 +08001190
1191 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1192
1193 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1194 if (!skb)
1195 goto out;
1196
1197 skb->priority = TC_PRIO_CONTROL;
1198 skb_reserve(skb, ETH_HLEN);
1199 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1200 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +01001201 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1202 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1203 unicast_tvlv_packet->ttl = BATADV_TTL;
Marek Lindneref261572013-04-23 21:39:57 +08001204 unicast_tvlv_packet->reserved = 0;
1205 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1206 unicast_tvlv_packet->align = 0;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +01001207 ether_addr_copy(unicast_tvlv_packet->src, src);
1208 ether_addr_copy(unicast_tvlv_packet->dst, dst);
Marek Lindneref261572013-04-23 21:39:57 +08001209
1210 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1211 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1212 tvlv_hdr->version = version;
1213 tvlv_hdr->type = type;
1214 tvlv_hdr->len = htons(tvlv_value_len);
1215 tvlv_buff += sizeof(*tvlv_hdr);
1216 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1217
Markus Elfring8bbb7cb2015-11-15 09:00:42 +01001218 if (batadv_send_skb_to_orig(skb, orig_node, NULL) == NET_XMIT_DROP)
Marek Lindneref261572013-04-23 21:39:57 +08001219 kfree_skb(skb);
Markus Elfring8bbb7cb2015-11-15 09:00:42 +01001220out:
Sven Eckelmann5d967312016-01-17 11:01:09 +01001221 batadv_orig_node_put(orig_node);
Marek Lindneref261572013-04-23 21:39:57 +08001222}
1223
Antonio Quartullic018ad32013-06-04 12:11:39 +02001224/**
1225 * batadv_get_vid - extract the VLAN identifier from skb if any
1226 * @skb: the buffer containing the packet
1227 * @header_len: length of the batman header preceding the ethernet header
1228 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001229 * Return: VID with the BATADV_VLAN_HAS_TAG flag when the packet embedded in the
1230 * skb is vlan tagged. Otherwise BATADV_NO_FLAGS.
Antonio Quartullic018ad32013-06-04 12:11:39 +02001231 */
1232unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
1233{
1234 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
1235 struct vlan_ethhdr *vhdr;
1236 unsigned short vid;
1237
1238 if (ethhdr->h_proto != htons(ETH_P_8021Q))
1239 return BATADV_NO_FLAGS;
1240
1241 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
1242 return BATADV_NO_FLAGS;
1243
1244 vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
1245 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1246 vid |= BATADV_VLAN_HAS_TAG;
1247
1248 return vid;
1249}
1250
Antonio Quartullieceb22a2013-11-16 12:03:51 +01001251/**
1252 * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
1253 * @bat_priv: the bat priv with all the soft interface information
1254 * @vid: the VLAN identifier for which the AP isolation attributed as to be
1255 * looked up
1256 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001257 * Return: true if AP isolation is on for the VLAN idenfied by vid, false
Antonio Quartullieceb22a2013-11-16 12:03:51 +01001258 * otherwise
1259 */
1260bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
1261{
1262 bool ap_isolation_enabled = false;
1263 struct batadv_softif_vlan *vlan;
1264
1265 /* if the AP isolation is requested on a VLAN, then check for its
1266 * setting in the proper VLAN private data structure
1267 */
1268 vlan = batadv_softif_vlan_get(bat_priv, vid);
1269 if (vlan) {
1270 ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
Sven Eckelmann9c3bf082016-01-17 11:01:21 +01001271 batadv_softif_vlan_put(vlan);
Antonio Quartullieceb22a2013-11-16 12:03:51 +01001272 }
1273
1274 return ap_isolation_enabled;
1275}
1276
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001277static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
Marek Lindnerd419be12011-12-10 19:45:53 +08001278{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001279 struct batadv_algo_ops *bat_algo_ops;
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001280 char *algo_name = (char *)val;
1281 size_t name_len = strlen(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001282
Marek Lindner293c9c12013-04-27 16:22:28 +08001283 if (name_len > 0 && algo_name[name_len - 1] == '\n')
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001284 algo_name[name_len - 1] = '\0';
1285
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001286 bat_algo_ops = batadv_algo_get(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001287 if (!bat_algo_ops) {
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001288 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001289 return -EINVAL;
1290 }
1291
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001292 return param_set_copystring(algo_name, kp);
Marek Lindnerd419be12011-12-10 19:45:53 +08001293}
1294
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001295static const struct kernel_param_ops batadv_param_ops_ra = {
1296 .set = batadv_param_set_ra,
Marek Lindnerd419be12011-12-10 19:45:53 +08001297 .get = param_get_string,
1298};
1299
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001300static struct kparam_string batadv_param_string_ra = {
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001301 .maxlen = sizeof(batadv_routing_algo),
1302 .string = batadv_routing_algo,
Marek Lindnerd419be12011-12-10 19:45:53 +08001303};
1304
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001305module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1306 0644);
1307module_init(batadv_init);
1308module_exit(batadv_exit);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001309
1310MODULE_LICENSE("GPL");
1311
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001312MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
1313MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
1314MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
1315MODULE_VERSION(BATADV_SOURCE_VERSION);