blob: 12fc77bef23faf0313a551ec268c836891171813 [file] [log] [blame]
Simon Wunderliche19f9752014-01-04 18:04:25 +01001/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmann95a066d2012-10-17 21:10:39 +020018#include <linux/crc32c.h>
19#include <linux/highmem.h>
Simon Wunderlichc54f38c92013-07-29 17:56:44 +020020#include <linux/if_vlan.h>
21#include <net/ip.h>
22#include <net/ipv6.h>
23#include <net/dsfield.h>
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000024#include "main.h"
Sven Eckelmannb706b132012-06-10 23:58:51 +020025#include "sysfs.h"
26#include "debugfs.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000027#include "routing.h"
28#include "send.h"
29#include "originator.h"
30#include "soft-interface.h"
31#include "icmp_socket.h"
32#include "translation-table.h"
33#include "hard-interface.h"
34#include "gateway_client.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010035#include "bridge_loop_avoidance.h"
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +020036#include "distributed-arp-table.h"
Linus Lüssingc5caf4e2014-02-15 17:47:49 +010037#include "multicast.h"
Marek Lindner414254e2013-04-23 21:39:58 +080038#include "gateway_common.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000039#include "hash.h"
Marek Lindner1c280472011-11-28 17:40:17 +080040#include "bat_algo.h"
Martin Hundebølld353d8d2013-01-25 11:12:38 +010041#include "network-coding.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020042#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000043
Sven Eckelmannc3caf512011-05-03 11:51:38 +020044/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020045 * list traversals just rcu-locked
46 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020047struct list_head batadv_hardif_list;
Sven Eckelmannee11ad62012-05-16 20:23:19 +020048static int (*batadv_rx_handler[256])(struct sk_buff *,
Sven Eckelmann56303d32012-06-05 22:31:31 +020049 struct batadv_hard_iface *);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020050char batadv_routing_algo[20] = "BATMAN_IV";
Sven Eckelmannee11ad62012-05-16 20:23:19 +020051static struct hlist_head batadv_algo_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000052
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020053unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000054
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020055struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000056
Sven Eckelmannee11ad62012-05-16 20:23:19 +020057static void batadv_recv_handler_init(void);
Marek Lindnerffa995e2012-03-01 15:35:17 +080058
Sven Eckelmannee11ad62012-05-16 20:23:19 +020059static int __init batadv_init(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000060{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020061 INIT_LIST_HEAD(&batadv_hardif_list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +020062 INIT_HLIST_HEAD(&batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +080063
Sven Eckelmannee11ad62012-05-16 20:23:19 +020064 batadv_recv_handler_init();
Marek Lindnerffa995e2012-03-01 15:35:17 +080065
Sven Eckelmann81c524f2012-05-12 02:09:22 +020066 batadv_iv_init();
Matthias Schiffer6c519ba2013-09-27 18:03:39 +020067 batadv_nc_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000068
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020069 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000070
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020071 if (!batadv_event_workqueue)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000072 return -ENOMEM;
73
Sven Eckelmann9039dc72012-05-12 02:09:33 +020074 batadv_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +020075 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000076
Sven Eckelmann95638772012-05-12 02:09:31 +020077 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080078 rtnl_link_register(&batadv_link_ops);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000079
Sven Eckelmann86ceb362012-03-07 09:07:45 +010080 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +020081 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000082
83 return 0;
84}
85
Sven Eckelmannee11ad62012-05-16 20:23:19 +020086static void __exit batadv_exit(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000087{
Sven Eckelmann40a072d2012-05-12 02:09:23 +020088 batadv_debugfs_destroy();
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080089 rtnl_link_unregister(&batadv_link_ops);
Sven Eckelmann95638772012-05-12 02:09:31 +020090 unregister_netdevice_notifier(&batadv_hard_if_notifier);
91 batadv_hardif_remove_interfaces();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000092
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020093 flush_workqueue(batadv_event_workqueue);
94 destroy_workqueue(batadv_event_workqueue);
95 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000096
97 rcu_barrier();
98}
99
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200100int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000101{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200102 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200103 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000104
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000105 spin_lock_init(&bat_priv->forw_bat_list_lock);
106 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200107 spin_lock_init(&bat_priv->tt.changes_list_lock);
108 spin_lock_init(&bat_priv->tt.req_list_lock);
109 spin_lock_init(&bat_priv->tt.roam_list_lock);
110 spin_lock_init(&bat_priv->tt.last_changeset_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200111 spin_lock_init(&bat_priv->tt.commit_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200112 spin_lock_init(&bat_priv->gw.list_lock);
Linus Lüssingab498862014-02-15 17:47:53 +0100113#ifdef CONFIG_BATMAN_ADV_MCAST
114 spin_lock_init(&bat_priv->mcast.want_lists_lock);
115#endif
Marek Lindneref261572013-04-23 21:39:57 +0800116 spin_lock_init(&bat_priv->tvlv.container_list_lock);
117 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200118 spin_lock_init(&bat_priv->softif_vlan_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000119
120 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
121 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200122 INIT_HLIST_HEAD(&bat_priv->gw.list);
Linus Lüssingab498862014-02-15 17:47:53 +0100123#ifdef CONFIG_BATMAN_ADV_MCAST
124 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
Linus Lüssing4c8755d2014-02-15 17:47:54 +0100125 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
126 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
Linus Lüssingab498862014-02-15 17:47:53 +0100127#endif
Sven Eckelmann807736f2012-07-15 22:26:51 +0200128 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
129 INIT_LIST_HEAD(&bat_priv->tt.req_list);
130 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100131#ifdef CONFIG_BATMAN_ADV_MCAST
132 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
133#endif
Marek Lindneref261572013-04-23 21:39:57 +0800134 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
135 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200136 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000137
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200138 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200139 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000140 goto err;
141
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200142 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200143 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000144 goto err;
145
Sven Eckelmann08adf152012-05-12 13:38:47 +0200146 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200147 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100148 goto err;
149
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200150 ret = batadv_dat_init(bat_priv);
151 if (ret < 0)
152 goto err;
153
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200154 ret = batadv_nc_mesh_init(bat_priv);
Martin Hundebølld353d8d2013-01-25 11:12:38 +0100155 if (ret < 0)
156 goto err;
157
Marek Lindner414254e2013-04-23 21:39:58 +0800158 batadv_gw_init(bat_priv);
Linus Lüssing60432d72014-02-15 17:47:51 +0100159 batadv_mcast_init(bat_priv);
Marek Lindner414254e2013-04-23 21:39:58 +0800160
Sven Eckelmann807736f2012-07-15 22:26:51 +0200161 atomic_set(&bat_priv->gw.reselect, 0);
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200162 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200163
164 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000165
166err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200167 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200168 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000169}
170
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200171void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000172{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200173 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000174
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200175 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000176
Sven Eckelmann9455e342012-05-12 02:09:37 +0200177 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000178
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200179 batadv_gw_node_purge(bat_priv);
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200180 batadv_nc_mesh_free(bat_priv);
Antonio Quartullia4361862013-05-07 01:06:18 +0200181 batadv_dat_free(bat_priv);
Sven Eckelmann08adf152012-05-12 13:38:47 +0200182 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100183
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100184 batadv_mcast_free(bat_priv);
185
Antonio Quartullia4361862013-05-07 01:06:18 +0200186 /* Free the TT and the originator tables only after having terminated
187 * all the other depending components which may use these structures for
188 * their purposes.
189 */
190 batadv_tt_free(bat_priv);
191
192 /* Since the originator table clean up routine is accessing the TT
193 * tables as well, it has to be invoked after the TT tables have been
194 * freed and marked as empty. This ensures that no cleanup RCU callbacks
195 * accessing the TT data are scheduled for later execution.
196 */
197 batadv_originator_free(bat_priv);
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200198
Marek Lindner414254e2013-04-23 21:39:58 +0800199 batadv_gw_free(bat_priv);
200
Martin Hundebøllf8214862012-04-20 17:02:45 +0200201 free_percpu(bat_priv->bat_counters);
Martin Hundebøllf69ae772013-04-17 21:13:16 +0200202 bat_priv->bat_counters = NULL;
Martin Hundebøllf8214862012-04-20 17:02:45 +0200203
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200204 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000205}
206
David S. Miller6e0895c2013-04-22 20:32:51 -0400207/**
208 * batadv_is_my_mac - check if the given mac address belongs to any of the real
209 * interfaces in the current mesh
210 * @bat_priv: the bat priv with all the soft interface information
211 * @addr: the address to check
212 */
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200213int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000214{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200215 const struct batadv_hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000216
217 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200218 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200219 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000220 continue;
221
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200222 if (hard_iface->soft_iface != bat_priv->soft_iface)
223 continue;
224
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200225 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000226 rcu_read_unlock();
227 return 1;
228 }
229 }
230 rcu_read_unlock();
231 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000232}
233
Marek Lindner30da63a2012-08-03 17:15:46 +0200234/**
235 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
236 * function that requires the primary interface
237 * @seq: debugfs table seq_file struct
238 *
239 * Returns primary interface if found or NULL otherwise.
240 */
241struct batadv_hard_iface *
242batadv_seq_print_text_primary_if_get(struct seq_file *seq)
243{
244 struct net_device *net_dev = (struct net_device *)seq->private;
245 struct batadv_priv *bat_priv = netdev_priv(net_dev);
246 struct batadv_hard_iface *primary_if;
247
248 primary_if = batadv_primary_if_get_selected(bat_priv);
249
250 if (!primary_if) {
251 seq_printf(seq,
252 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
253 net_dev->name);
254 goto out;
255 }
256
257 if (primary_if->if_status == BATADV_IF_ACTIVE)
258 goto out;
259
260 seq_printf(seq,
261 "BATMAN mesh %s disabled - primary interface not active\n",
262 net_dev->name);
263 batadv_hardif_free_ref(primary_if);
264 primary_if = NULL;
265
266out:
267 return primary_if;
268}
269
Simon Wunderlichc54f38c92013-07-29 17:56:44 +0200270/**
Marek Lindner411d6ed2013-05-08 13:31:59 +0800271 * batadv_max_header_len - calculate maximum encapsulation overhead for a
272 * payload packet
273 *
274 * Return the maximum encapsulation overhead in bytes.
275 */
276int batadv_max_header_len(void)
277{
278 int header_len = 0;
279
280 header_len = max_t(int, header_len,
281 sizeof(struct batadv_unicast_packet));
282 header_len = max_t(int, header_len,
283 sizeof(struct batadv_unicast_4addr_packet));
284 header_len = max_t(int, header_len,
285 sizeof(struct batadv_bcast_packet));
286
287#ifdef CONFIG_BATMAN_ADV_NC
288 header_len = max_t(int, header_len,
289 sizeof(struct batadv_coded_packet));
290#endif
291
Marek Lindner1df0cbd2014-01-15 20:31:18 +0800292 return header_len + ETH_HLEN;
Marek Lindner411d6ed2013-05-08 13:31:59 +0800293}
294
295/**
Simon Wunderlichc54f38c92013-07-29 17:56:44 +0200296 * batadv_skb_set_priority - sets skb priority according to packet content
297 * @skb: the packet to be sent
298 * @offset: offset to the packet content
299 *
300 * This function sets a value between 256 and 263 (802.1d priority), which
301 * can be interpreted by the cfg80211 or other drivers.
302 */
303void batadv_skb_set_priority(struct sk_buff *skb, int offset)
304{
305 struct iphdr ip_hdr_tmp, *ip_hdr;
306 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
307 struct ethhdr ethhdr_tmp, *ethhdr;
308 struct vlan_ethhdr *vhdr, vhdr_tmp;
309 u32 prio;
310
311 /* already set, do nothing */
312 if (skb->priority >= 256 && skb->priority <= 263)
313 return;
314
315 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
316 if (!ethhdr)
317 return;
318
319 switch (ethhdr->h_proto) {
320 case htons(ETH_P_8021Q):
321 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
322 sizeof(*vhdr), &vhdr_tmp);
323 if (!vhdr)
324 return;
325 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
326 prio = prio >> VLAN_PRIO_SHIFT;
327 break;
328 case htons(ETH_P_IP):
329 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
330 sizeof(*ip_hdr), &ip_hdr_tmp);
331 if (!ip_hdr)
332 return;
333 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
334 break;
335 case htons(ETH_P_IPV6):
336 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
337 sizeof(*ip6_hdr), &ip6_hdr_tmp);
338 if (!ip6_hdr)
339 return;
340 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
341 break;
342 default:
343 return;
344 }
345
346 skb->priority = prio + 256;
347}
348
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200349static int batadv_recv_unhandled_packet(struct sk_buff *skb,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200350 struct batadv_hard_iface *recv_if)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800351{
352 return NET_RX_DROP;
353}
354
355/* incoming packets with the batman ethertype received on any active hard
356 * interface
357 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200358int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
359 struct packet_type *ptype,
360 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800361{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200362 struct batadv_priv *bat_priv;
Sven Eckelmann96412692012-06-05 22:31:30 +0200363 struct batadv_ogm_packet *batadv_ogm_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200364 struct batadv_hard_iface *hard_iface;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800365 uint8_t idx;
366 int ret;
367
Sven Eckelmann56303d32012-06-05 22:31:31 +0200368 hard_iface = container_of(ptype, struct batadv_hard_iface,
369 batman_adv_ptype);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800370 skb = skb_share_check(skb, GFP_ATOMIC);
371
372 /* skb was released by skb_share_check() */
373 if (!skb)
374 goto err_out;
375
376 /* packet should hold at least type and version */
377 if (unlikely(!pskb_may_pull(skb, 2)))
378 goto err_free;
379
380 /* expect a valid ethernet header here. */
381 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
382 goto err_free;
383
384 if (!hard_iface->soft_iface)
385 goto err_free;
386
387 bat_priv = netdev_priv(hard_iface->soft_iface);
388
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200389 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800390 goto err_free;
391
392 /* discard frames on not active interfaces */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200393 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800394 goto err_free;
395
Sven Eckelmann96412692012-06-05 22:31:30 +0200396 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800397
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100398 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200399 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200400 "Drop packet: incompatible batman version (%i)\n",
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100401 batadv_ogm_packet->version);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800402 goto err_free;
403 }
404
Martin Hundebølle0d96772014-09-17 08:56:19 +0200405 /* reset control block to avoid left overs from previous users */
406 memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
407
Marek Lindnerffa995e2012-03-01 15:35:17 +0800408 /* all receive handlers return whether they received or reused
409 * the supplied skb. if not, we have to free the skb.
410 */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100411 idx = batadv_ogm_packet->packet_type;
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200412 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800413
414 if (ret == NET_RX_DROP)
415 kfree_skb(skb);
416
417 /* return NET_RX_SUCCESS in any case as we
418 * most probably dropped the packet for
419 * routing-logical reasons.
420 */
421 return NET_RX_SUCCESS;
422
423err_free:
424 kfree_skb(skb);
425err_out:
426 return NET_RX_DROP;
427}
428
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200429static void batadv_recv_handler_init(void)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800430{
431 int i;
432
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200433 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
434 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800435
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200436 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
437 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
438
Simon Wunderlich031ace82013-12-17 19:12:12 +0100439 /* compile time checks for sizes */
440 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
441 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
442 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
443 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
444 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
445 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
446 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
447 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
448 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
449 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
450 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
451 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
452 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
453 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
454 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
455 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
Simon Wunderlich80067c82013-04-25 10:37:22 +0200456
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200457 /* broadcast packet */
458 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
459
460 /* unicast packets ... */
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200461 /* unicast with 4 addresses packet */
462 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800463 /* unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200464 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
Marek Lindneref261572013-04-23 21:39:57 +0800465 /* unicast tvlv packet */
466 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200467 /* batman icmp packet */
468 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200469 /* Fragmented packets */
470 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800471}
472
Sven Eckelmann56303d32012-06-05 22:31:31 +0200473int
474batadv_recv_handler_register(uint8_t packet_type,
475 int (*recv_handler)(struct sk_buff *,
476 struct batadv_hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800477{
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200478 int (*curr)(struct sk_buff *,
479 struct batadv_hard_iface *);
480 curr = batadv_rx_handler[packet_type];
481
482 if ((curr != batadv_recv_unhandled_packet) &&
483 (curr != batadv_recv_unhandled_unicast_packet))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800484 return -EBUSY;
485
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200486 batadv_rx_handler[packet_type] = recv_handler;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800487 return 0;
488}
489
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200490void batadv_recv_handler_unregister(uint8_t packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800491{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200492 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800493}
494
Sven Eckelmann56303d32012-06-05 22:31:31 +0200495static struct batadv_algo_ops *batadv_algo_get(char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800496{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200497 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800498
Sasha Levinb67bfe02013-02-27 17:06:00 -0800499 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800500 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
501 continue;
502
503 bat_algo_ops = bat_algo_ops_tmp;
504 break;
505 }
506
507 return bat_algo_ops;
508}
509
Sven Eckelmann56303d32012-06-05 22:31:31 +0200510int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
Marek Lindner1c280472011-11-28 17:40:17 +0800511{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200512 struct batadv_algo_ops *bat_algo_ops_tmp;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200513 int ret;
Marek Lindner1c280472011-11-28 17:40:17 +0800514
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200515 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
Marek Lindner1c280472011-11-28 17:40:17 +0800516 if (bat_algo_ops_tmp) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100517 pr_info("Trying to register already registered routing algorithm: %s\n",
518 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200519 ret = -EEXIST;
Marek Lindner1c280472011-11-28 17:40:17 +0800520 goto out;
521 }
522
Marek Lindner01c42242011-11-28 21:31:55 +0800523 /* all algorithms must implement all ops (for now) */
Marek Lindnerc2aca022012-02-07 17:20:45 +0800524 if (!bat_algo_ops->bat_iface_enable ||
Marek Lindner00a50072012-02-07 17:20:47 +0800525 !bat_algo_ops->bat_iface_disable ||
Marek Lindnerc3229392012-03-11 06:17:50 +0800526 !bat_algo_ops->bat_iface_update_mac ||
Marek Lindnercd8b78e2012-02-07 17:20:49 +0800527 !bat_algo_ops->bat_primary_iface_set ||
Marek Lindner01c42242011-11-28 21:31:55 +0800528 !bat_algo_ops->bat_ogm_schedule ||
Antonio Quartullia3285a82013-09-02 12:15:04 +0200529 !bat_algo_ops->bat_ogm_emit ||
Antonio Quartullic43c9812013-09-02 12:15:05 +0200530 !bat_algo_ops->bat_neigh_cmp ||
531 !bat_algo_ops->bat_neigh_is_equiv_or_better) {
Marek Lindner01c42242011-11-28 21:31:55 +0800532 pr_info("Routing algo '%s' does not implement required ops\n",
533 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200534 ret = -EINVAL;
Marek Lindner01c42242011-11-28 21:31:55 +0800535 goto out;
536 }
537
Marek Lindner1c280472011-11-28 17:40:17 +0800538 INIT_HLIST_NODE(&bat_algo_ops->list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200539 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +0800540 ret = 0;
541
542out:
543 return ret;
544}
545
Sven Eckelmann56303d32012-06-05 22:31:31 +0200546int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800547{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200548 struct batadv_algo_ops *bat_algo_ops;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200549 int ret = -EINVAL;
Marek Lindner1c280472011-11-28 17:40:17 +0800550
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200551 bat_algo_ops = batadv_algo_get(name);
Marek Lindner1c280472011-11-28 17:40:17 +0800552 if (!bat_algo_ops)
553 goto out;
554
555 bat_priv->bat_algo_ops = bat_algo_ops;
556 ret = 0;
557
558out:
559 return ret;
560}
561
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200562int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
Marek Lindner1c280472011-11-28 17:40:17 +0800563{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200564 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800565
Antonio Quartulli0c814652013-03-21 09:23:29 +0100566 seq_puts(seq, "Available routing algorithms:\n");
Marek Lindner1c280472011-11-28 17:40:17 +0800567
Sasha Levinb67bfe02013-02-27 17:06:00 -0800568 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800569 seq_printf(seq, "%s\n", bat_algo_ops->name);
570 }
571
572 return 0;
573}
574
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200575/**
576 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
577 * the header
578 * @skb: skb pointing to fragmented socket buffers
579 * @payload_ptr: Pointer to position inside the head buffer of the skb
580 * marking the start of the data to be CRC'ed
581 *
582 * payload_ptr must always point to an address in the skb head buffer and not to
583 * a fragment.
584 */
585__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
586{
587 u32 crc = 0;
588 unsigned int from;
589 unsigned int to = skb->len;
590 struct skb_seq_state st;
591 const u8 *data;
592 unsigned int len;
593 unsigned int consumed = 0;
594
595 from = (unsigned int)(payload_ptr - skb->data);
596
597 skb_prepare_seq_read(skb, from, to, &st);
598 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
599 crc = crc32c(crc, data, len);
600 consumed += len;
601 }
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200602
603 return htonl(crc);
604}
605
Marek Lindneref261572013-04-23 21:39:57 +0800606/**
607 * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
608 * possibly free it
609 * @tvlv_handler: the tvlv handler to free
610 */
611static void
612batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
613{
614 if (atomic_dec_and_test(&tvlv_handler->refcount))
615 kfree_rcu(tvlv_handler, rcu);
616}
617
618/**
619 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
620 * based on the provided type and version (both need to match)
621 * @bat_priv: the bat priv with all the soft interface information
622 * @type: tvlv handler type to look for
623 * @version: tvlv handler version to look for
624 *
625 * Returns tvlv handler if found or NULL otherwise.
626 */
627static struct batadv_tvlv_handler
628*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
629 uint8_t type, uint8_t version)
630{
631 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
632
633 rcu_read_lock();
634 hlist_for_each_entry_rcu(tvlv_handler_tmp,
635 &bat_priv->tvlv.handler_list, list) {
636 if (tvlv_handler_tmp->type != type)
637 continue;
638
639 if (tvlv_handler_tmp->version != version)
640 continue;
641
642 if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
643 continue;
644
645 tvlv_handler = tvlv_handler_tmp;
646 break;
647 }
648 rcu_read_unlock();
649
650 return tvlv_handler;
651}
652
653/**
654 * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
655 * possibly free it
Martin Hundebølla0e28772014-07-15 09:41:08 +0200656 * @tvlv: the tvlv container to free
Marek Lindneref261572013-04-23 21:39:57 +0800657 */
658static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
659{
660 if (atomic_dec_and_test(&tvlv->refcount))
661 kfree(tvlv);
662}
663
664/**
665 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
666 * list based on the provided type and version (both need to match)
667 * @bat_priv: the bat priv with all the soft interface information
668 * @type: tvlv container type to look for
669 * @version: tvlv container version to look for
670 *
671 * Has to be called with the appropriate locks being acquired
672 * (tvlv.container_list_lock).
673 *
674 * Returns tvlv container if found or NULL otherwise.
675 */
676static struct batadv_tvlv_container
677*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
678 uint8_t type, uint8_t version)
679{
680 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
681
682 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
683 if (tvlv_tmp->tvlv_hdr.type != type)
684 continue;
685
686 if (tvlv_tmp->tvlv_hdr.version != version)
687 continue;
688
689 if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
690 continue;
691
692 tvlv = tvlv_tmp;
693 break;
694 }
695
696 return tvlv;
697}
698
699/**
700 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
701 * list entries
702 * @bat_priv: the bat priv with all the soft interface information
703 *
704 * Has to be called with the appropriate locks being acquired
705 * (tvlv.container_list_lock).
706 *
707 * Returns size of all currently registered tvlv containers in bytes.
708 */
709static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
710{
711 struct batadv_tvlv_container *tvlv;
712 uint16_t tvlv_len = 0;
713
714 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
715 tvlv_len += sizeof(struct batadv_tvlv_hdr);
716 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
717 }
718
719 return tvlv_len;
720}
721
722/**
723 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
724 * list
725 * @tvlv: the to be removed tvlv container
726 *
727 * Has to be called with the appropriate locks being acquired
728 * (tvlv.container_list_lock).
729 */
730static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
731{
732 if (!tvlv)
733 return;
734
735 hlist_del(&tvlv->list);
736
737 /* first call to decrement the counter, second call to free */
738 batadv_tvlv_container_free_ref(tvlv);
739 batadv_tvlv_container_free_ref(tvlv);
740}
741
742/**
743 * batadv_tvlv_container_unregister - unregister tvlv container based on the
744 * provided type and version (both need to match)
745 * @bat_priv: the bat priv with all the soft interface information
746 * @type: tvlv container type to unregister
747 * @version: tvlv container type to unregister
748 */
749void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
750 uint8_t type, uint8_t version)
751{
752 struct batadv_tvlv_container *tvlv;
753
754 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
755 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
756 batadv_tvlv_container_remove(tvlv);
757 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
758}
759
760/**
761 * batadv_tvlv_container_register - register tvlv type, version and content
762 * to be propagated with each (primary interface) OGM
763 * @bat_priv: the bat priv with all the soft interface information
764 * @type: tvlv container type
765 * @version: tvlv container version
766 * @tvlv_value: tvlv container content
767 * @tvlv_value_len: tvlv container content length
768 *
769 * If a container of the same type and version was already registered the new
770 * content is going to replace the old one.
771 */
772void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
773 uint8_t type, uint8_t version,
774 void *tvlv_value, uint16_t tvlv_value_len)
775{
776 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
777
778 if (!tvlv_value)
779 tvlv_value_len = 0;
780
781 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
782 if (!tvlv_new)
783 return;
784
785 tvlv_new->tvlv_hdr.version = version;
786 tvlv_new->tvlv_hdr.type = type;
787 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
788
789 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
790 INIT_HLIST_NODE(&tvlv_new->list);
791 atomic_set(&tvlv_new->refcount, 1);
792
793 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
794 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
795 batadv_tvlv_container_remove(tvlv_old);
796 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
797 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
798}
799
800/**
Antonio Quartulli3f687852014-11-02 11:29:56 +0100801 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
Marek Lindneref261572013-04-23 21:39:57 +0800802 * requested packet size
803 * @packet_buff: packet buffer
804 * @packet_buff_len: packet buffer size
Martin Hundebølla0e28772014-07-15 09:41:08 +0200805 * @min_packet_len: requested packet minimum size
Marek Lindneref261572013-04-23 21:39:57 +0800806 * @additional_packet_len: requested additional packet size on top of minimum
807 * size
808 *
809 * Returns true of the packet buffer could be changed to the requested size,
810 * false otherwise.
811 */
812static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
813 int *packet_buff_len,
814 int min_packet_len,
815 int additional_packet_len)
816{
817 unsigned char *new_buff;
818
819 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
820
821 /* keep old buffer if kmalloc should fail */
822 if (new_buff) {
823 memcpy(new_buff, *packet_buff, min_packet_len);
824 kfree(*packet_buff);
825 *packet_buff = new_buff;
826 *packet_buff_len = min_packet_len + additional_packet_len;
827 return true;
828 }
829
830 return false;
831}
832
833/**
834 * batadv_tvlv_container_ogm_append - append tvlv container content to given
835 * OGM packet buffer
836 * @bat_priv: the bat priv with all the soft interface information
837 * @packet_buff: ogm packet buffer
838 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
839 * content
840 * @packet_min_len: ogm header size to be preserved for the OGM itself
841 *
842 * The ogm packet might be enlarged or shrunk depending on the current size
843 * and the size of the to-be-appended tvlv containers.
844 *
845 * Returns size of all appended tvlv containers in bytes.
846 */
847uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
848 unsigned char **packet_buff,
849 int *packet_buff_len,
850 int packet_min_len)
851{
852 struct batadv_tvlv_container *tvlv;
853 struct batadv_tvlv_hdr *tvlv_hdr;
854 uint16_t tvlv_value_len;
855 void *tvlv_value;
856 bool ret;
857
858 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
859 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
860
861 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
862 packet_min_len, tvlv_value_len);
863
864 if (!ret)
865 goto end;
866
867 if (!tvlv_value_len)
868 goto end;
869
870 tvlv_value = (*packet_buff) + packet_min_len;
871
872 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
873 tvlv_hdr = tvlv_value;
874 tvlv_hdr->type = tvlv->tvlv_hdr.type;
875 tvlv_hdr->version = tvlv->tvlv_hdr.version;
876 tvlv_hdr->len = tvlv->tvlv_hdr.len;
877 tvlv_value = tvlv_hdr + 1;
878 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
879 tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
880 }
881
882end:
883 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
884 return tvlv_value_len;
885}
886
887/**
888 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
889 * appropriate handlers
890 * @bat_priv: the bat priv with all the soft interface information
891 * @tvlv_handler: tvlv callback function handling the tvlv content
892 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
893 * @orig_node: orig node emitting the ogm packet
894 * @src: source mac address of the unicast packet
895 * @dst: destination mac address of the unicast packet
896 * @tvlv_value: tvlv content
897 * @tvlv_value_len: tvlv content length
898 *
899 * Returns success if handler was not found or the return value of the handler
900 * callback.
901 */
902static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
903 struct batadv_tvlv_handler *tvlv_handler,
904 bool ogm_source,
905 struct batadv_orig_node *orig_node,
906 uint8_t *src, uint8_t *dst,
907 void *tvlv_value, uint16_t tvlv_value_len)
908{
909 if (!tvlv_handler)
910 return NET_RX_SUCCESS;
911
912 if (ogm_source) {
913 if (!tvlv_handler->ogm_handler)
914 return NET_RX_SUCCESS;
915
916 if (!orig_node)
917 return NET_RX_SUCCESS;
918
919 tvlv_handler->ogm_handler(bat_priv, orig_node,
920 BATADV_NO_FLAGS,
921 tvlv_value, tvlv_value_len);
922 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
923 } else {
924 if (!src)
925 return NET_RX_SUCCESS;
926
927 if (!dst)
928 return NET_RX_SUCCESS;
929
930 if (!tvlv_handler->unicast_handler)
931 return NET_RX_SUCCESS;
932
933 return tvlv_handler->unicast_handler(bat_priv, src,
934 dst, tvlv_value,
935 tvlv_value_len);
936 }
937
938 return NET_RX_SUCCESS;
939}
940
941/**
942 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
943 * appropriate handlers
944 * @bat_priv: the bat priv with all the soft interface information
945 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
946 * @orig_node: orig node emitting the ogm packet
947 * @src: source mac address of the unicast packet
948 * @dst: destination mac address of the unicast packet
949 * @tvlv_value: tvlv content
950 * @tvlv_value_len: tvlv content length
951 *
952 * Returns success when processing an OGM or the return value of all called
953 * handler callbacks.
954 */
955int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
956 bool ogm_source,
957 struct batadv_orig_node *orig_node,
958 uint8_t *src, uint8_t *dst,
959 void *tvlv_value, uint16_t tvlv_value_len)
960{
961 struct batadv_tvlv_handler *tvlv_handler;
962 struct batadv_tvlv_hdr *tvlv_hdr;
963 uint16_t tvlv_value_cont_len;
964 uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
965 int ret = NET_RX_SUCCESS;
966
967 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
968 tvlv_hdr = tvlv_value;
969 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
970 tvlv_value = tvlv_hdr + 1;
971 tvlv_value_len -= sizeof(*tvlv_hdr);
972
973 if (tvlv_value_cont_len > tvlv_value_len)
974 break;
975
976 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
977 tvlv_hdr->type,
978 tvlv_hdr->version);
979
980 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
981 ogm_source, orig_node,
982 src, dst, tvlv_value,
983 tvlv_value_cont_len);
984 if (tvlv_handler)
985 batadv_tvlv_handler_free_ref(tvlv_handler);
986 tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
987 tvlv_value_len -= tvlv_value_cont_len;
988 }
989
990 if (!ogm_source)
991 return ret;
992
993 rcu_read_lock();
994 hlist_for_each_entry_rcu(tvlv_handler,
995 &bat_priv->tvlv.handler_list, list) {
996 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
997 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
998 tvlv_handler->ogm_handler(bat_priv, orig_node,
999 cifnotfound, NULL, 0);
1000
1001 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
1002 }
1003 rcu_read_unlock();
1004
1005 return NET_RX_SUCCESS;
1006}
1007
1008/**
1009 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
1010 * handlers
1011 * @bat_priv: the bat priv with all the soft interface information
1012 * @batadv_ogm_packet: ogm packet containing the tvlv containers
1013 * @orig_node: orig node emitting the ogm packet
1014 */
1015void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
1016 struct batadv_ogm_packet *batadv_ogm_packet,
1017 struct batadv_orig_node *orig_node)
1018{
1019 void *tvlv_value;
1020 uint16_t tvlv_value_len;
1021
1022 if (!batadv_ogm_packet)
1023 return;
1024
1025 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
1026 if (!tvlv_value_len)
1027 return;
1028
1029 tvlv_value = batadv_ogm_packet + 1;
1030
1031 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
1032 tvlv_value, tvlv_value_len);
1033}
1034
1035/**
1036 * batadv_tvlv_handler_register - register tvlv handler based on the provided
1037 * type and version (both need to match) for ogm tvlv payload and/or unicast
1038 * payload
1039 * @bat_priv: the bat priv with all the soft interface information
1040 * @optr: ogm tvlv handler callback function. This function receives the orig
1041 * node, flags and the tvlv content as argument to process.
1042 * @uptr: unicast tvlv handler callback function. This function receives the
1043 * source & destination of the unicast packet as well as the tvlv content
1044 * to process.
1045 * @type: tvlv handler type to be registered
1046 * @version: tvlv handler version to be registered
1047 * @flags: flags to enable or disable TVLV API behavior
1048 */
1049void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1050 void (*optr)(struct batadv_priv *bat_priv,
1051 struct batadv_orig_node *orig,
1052 uint8_t flags,
1053 void *tvlv_value,
1054 uint16_t tvlv_value_len),
1055 int (*uptr)(struct batadv_priv *bat_priv,
1056 uint8_t *src, uint8_t *dst,
1057 void *tvlv_value,
1058 uint16_t tvlv_value_len),
1059 uint8_t type, uint8_t version, uint8_t flags)
1060{
1061 struct batadv_tvlv_handler *tvlv_handler;
1062
1063 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1064 if (tvlv_handler) {
1065 batadv_tvlv_handler_free_ref(tvlv_handler);
1066 return;
1067 }
1068
1069 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1070 if (!tvlv_handler)
1071 return;
1072
1073 tvlv_handler->ogm_handler = optr;
1074 tvlv_handler->unicast_handler = uptr;
1075 tvlv_handler->type = type;
1076 tvlv_handler->version = version;
1077 tvlv_handler->flags = flags;
1078 atomic_set(&tvlv_handler->refcount, 1);
1079 INIT_HLIST_NODE(&tvlv_handler->list);
1080
1081 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1082 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1083 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1084}
1085
1086/**
1087 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1088 * provided type and version (both need to match)
1089 * @bat_priv: the bat priv with all the soft interface information
1090 * @type: tvlv handler type to be unregistered
1091 * @version: tvlv handler version to be unregistered
1092 */
1093void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
1094 uint8_t type, uint8_t version)
1095{
1096 struct batadv_tvlv_handler *tvlv_handler;
1097
1098 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1099 if (!tvlv_handler)
1100 return;
1101
1102 batadv_tvlv_handler_free_ref(tvlv_handler);
1103 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1104 hlist_del_rcu(&tvlv_handler->list);
1105 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1106 batadv_tvlv_handler_free_ref(tvlv_handler);
1107}
1108
1109/**
1110 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1111 * specified host
1112 * @bat_priv: the bat priv with all the soft interface information
1113 * @src: source mac address of the unicast packet
1114 * @dst: destination mac address of the unicast packet
1115 * @type: tvlv type
1116 * @version: tvlv version
1117 * @tvlv_value: tvlv content
1118 * @tvlv_value_len: tvlv content length
1119 */
1120void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
1121 uint8_t *dst, uint8_t type, uint8_t version,
1122 void *tvlv_value, uint16_t tvlv_value_len)
1123{
1124 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1125 struct batadv_tvlv_hdr *tvlv_hdr;
1126 struct batadv_orig_node *orig_node;
1127 struct sk_buff *skb = NULL;
1128 unsigned char *tvlv_buff;
1129 unsigned int tvlv_len;
1130 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
1131 bool ret = false;
1132
1133 orig_node = batadv_orig_hash_find(bat_priv, dst);
1134 if (!orig_node)
1135 goto out;
1136
1137 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1138
1139 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1140 if (!skb)
1141 goto out;
1142
1143 skb->priority = TC_PRIO_CONTROL;
1144 skb_reserve(skb, ETH_HLEN);
1145 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1146 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +01001147 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1148 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1149 unicast_tvlv_packet->ttl = BATADV_TTL;
Marek Lindneref261572013-04-23 21:39:57 +08001150 unicast_tvlv_packet->reserved = 0;
1151 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1152 unicast_tvlv_packet->align = 0;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +01001153 ether_addr_copy(unicast_tvlv_packet->src, src);
1154 ether_addr_copy(unicast_tvlv_packet->dst, dst);
Marek Lindneref261572013-04-23 21:39:57 +08001155
1156 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1157 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1158 tvlv_hdr->version = version;
1159 tvlv_hdr->type = type;
1160 tvlv_hdr->len = htons(tvlv_value_len);
1161 tvlv_buff += sizeof(*tvlv_hdr);
1162 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1163
1164 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
1165 ret = true;
1166
1167out:
1168 if (skb && !ret)
1169 kfree_skb(skb);
1170 if (orig_node)
1171 batadv_orig_node_free_ref(orig_node);
1172}
1173
Antonio Quartullic018ad32013-06-04 12:11:39 +02001174/**
1175 * batadv_get_vid - extract the VLAN identifier from skb if any
1176 * @skb: the buffer containing the packet
1177 * @header_len: length of the batman header preceding the ethernet header
1178 *
1179 * If the packet embedded in the skb is vlan tagged this function returns the
1180 * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
1181 */
1182unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
1183{
1184 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
1185 struct vlan_ethhdr *vhdr;
1186 unsigned short vid;
1187
1188 if (ethhdr->h_proto != htons(ETH_P_8021Q))
1189 return BATADV_NO_FLAGS;
1190
1191 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
1192 return BATADV_NO_FLAGS;
1193
1194 vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
1195 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1196 vid |= BATADV_VLAN_HAS_TAG;
1197
1198 return vid;
1199}
1200
Antonio Quartullieceb22a2013-11-16 12:03:51 +01001201/**
1202 * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
1203 * @bat_priv: the bat priv with all the soft interface information
1204 * @vid: the VLAN identifier for which the AP isolation attributed as to be
1205 * looked up
1206 *
1207 * Returns true if AP isolation is on for the VLAN idenfied by vid, false
1208 * otherwise
1209 */
1210bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
1211{
1212 bool ap_isolation_enabled = false;
1213 struct batadv_softif_vlan *vlan;
1214
1215 /* if the AP isolation is requested on a VLAN, then check for its
1216 * setting in the proper VLAN private data structure
1217 */
1218 vlan = batadv_softif_vlan_get(bat_priv, vid);
1219 if (vlan) {
1220 ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
1221 batadv_softif_vlan_free_ref(vlan);
1222 }
1223
1224 return ap_isolation_enabled;
1225}
1226
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001227static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
Marek Lindnerd419be12011-12-10 19:45:53 +08001228{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001229 struct batadv_algo_ops *bat_algo_ops;
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001230 char *algo_name = (char *)val;
1231 size_t name_len = strlen(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001232
Marek Lindner293c9c12013-04-27 16:22:28 +08001233 if (name_len > 0 && algo_name[name_len - 1] == '\n')
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001234 algo_name[name_len - 1] = '\0';
1235
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001236 bat_algo_ops = batadv_algo_get(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001237 if (!bat_algo_ops) {
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001238 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001239 return -EINVAL;
1240 }
1241
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001242 return param_set_copystring(algo_name, kp);
Marek Lindnerd419be12011-12-10 19:45:53 +08001243}
1244
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001245static const struct kernel_param_ops batadv_param_ops_ra = {
1246 .set = batadv_param_set_ra,
Marek Lindnerd419be12011-12-10 19:45:53 +08001247 .get = param_get_string,
1248};
1249
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001250static struct kparam_string batadv_param_string_ra = {
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001251 .maxlen = sizeof(batadv_routing_algo),
1252 .string = batadv_routing_algo,
Marek Lindnerd419be12011-12-10 19:45:53 +08001253};
1254
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001255module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1256 0644);
1257module_init(batadv_init);
1258module_exit(batadv_exit);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001259
1260MODULE_LICENSE("GPL");
1261
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001262MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
1263MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
1264MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
1265MODULE_VERSION(BATADV_SOURCE_VERSION);