blob: 57b09fa54b14fee703ab205dd7e12ac2a4b11723 [file] [log] [blame]
Simon Wunderliche19f9752014-01-04 18:04:25 +01001/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmann95a066d2012-10-17 21:10:39 +020018#include <linux/crc32c.h>
19#include <linux/highmem.h>
Simon Wunderlichc54f38c92013-07-29 17:56:44 +020020#include <linux/if_vlan.h>
21#include <net/ip.h>
22#include <net/ipv6.h>
23#include <net/dsfield.h>
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000024#include "main.h"
Sven Eckelmannb706b132012-06-10 23:58:51 +020025#include "sysfs.h"
26#include "debugfs.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000027#include "routing.h"
28#include "send.h"
29#include "originator.h"
30#include "soft-interface.h"
31#include "icmp_socket.h"
32#include "translation-table.h"
33#include "hard-interface.h"
34#include "gateway_client.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010035#include "bridge_loop_avoidance.h"
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +020036#include "distributed-arp-table.h"
Linus Lüssingc5caf4e2014-02-15 17:47:49 +010037#include "multicast.h"
Marek Lindner414254e2013-04-23 21:39:58 +080038#include "gateway_common.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000039#include "hash.h"
Marek Lindner1c280472011-11-28 17:40:17 +080040#include "bat_algo.h"
Martin Hundebølld353d8d2013-01-25 11:12:38 +010041#include "network-coding.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020042#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000043
Sven Eckelmannc3caf512011-05-03 11:51:38 +020044
45/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020046 * list traversals just rcu-locked
47 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020048struct list_head batadv_hardif_list;
Sven Eckelmannee11ad62012-05-16 20:23:19 +020049static int (*batadv_rx_handler[256])(struct sk_buff *,
Sven Eckelmann56303d32012-06-05 22:31:31 +020050 struct batadv_hard_iface *);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020051char batadv_routing_algo[20] = "BATMAN_IV";
Sven Eckelmannee11ad62012-05-16 20:23:19 +020052static struct hlist_head batadv_algo_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000053
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020054unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000055
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020056struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000057
Sven Eckelmannee11ad62012-05-16 20:23:19 +020058static void batadv_recv_handler_init(void);
Marek Lindnerffa995e2012-03-01 15:35:17 +080059
Sven Eckelmannee11ad62012-05-16 20:23:19 +020060static int __init batadv_init(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000061{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020062 INIT_LIST_HEAD(&batadv_hardif_list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +020063 INIT_HLIST_HEAD(&batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +080064
Sven Eckelmannee11ad62012-05-16 20:23:19 +020065 batadv_recv_handler_init();
Marek Lindnerffa995e2012-03-01 15:35:17 +080066
Sven Eckelmann81c524f2012-05-12 02:09:22 +020067 batadv_iv_init();
Matthias Schiffer6c519ba2013-09-27 18:03:39 +020068 batadv_nc_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000069
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020070 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000071
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020072 if (!batadv_event_workqueue)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000073 return -ENOMEM;
74
Sven Eckelmann9039dc72012-05-12 02:09:33 +020075 batadv_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +020076 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000077
Sven Eckelmann95638772012-05-12 02:09:31 +020078 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080079 rtnl_link_register(&batadv_link_ops);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000080
Sven Eckelmann86ceb362012-03-07 09:07:45 +010081 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +020082 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000083
84 return 0;
85}
86
Sven Eckelmannee11ad62012-05-16 20:23:19 +020087static void __exit batadv_exit(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000088{
Sven Eckelmann40a072d2012-05-12 02:09:23 +020089 batadv_debugfs_destroy();
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080090 rtnl_link_unregister(&batadv_link_ops);
Sven Eckelmann95638772012-05-12 02:09:31 +020091 unregister_netdevice_notifier(&batadv_hard_if_notifier);
92 batadv_hardif_remove_interfaces();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000093
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020094 flush_workqueue(batadv_event_workqueue);
95 destroy_workqueue(batadv_event_workqueue);
96 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000097
98 rcu_barrier();
99}
100
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200101int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000102{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200103 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200104 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000105
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000106 spin_lock_init(&bat_priv->forw_bat_list_lock);
107 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200108 spin_lock_init(&bat_priv->tt.changes_list_lock);
109 spin_lock_init(&bat_priv->tt.req_list_lock);
110 spin_lock_init(&bat_priv->tt.roam_list_lock);
111 spin_lock_init(&bat_priv->tt.last_changeset_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200112 spin_lock_init(&bat_priv->tt.commit_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200113 spin_lock_init(&bat_priv->gw.list_lock);
Linus Lüssingab498862014-02-15 17:47:53 +0100114#ifdef CONFIG_BATMAN_ADV_MCAST
115 spin_lock_init(&bat_priv->mcast.want_lists_lock);
116#endif
Marek Lindneref261572013-04-23 21:39:57 +0800117 spin_lock_init(&bat_priv->tvlv.container_list_lock);
118 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200119 spin_lock_init(&bat_priv->softif_vlan_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000120
121 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
122 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200123 INIT_HLIST_HEAD(&bat_priv->gw.list);
Linus Lüssingab498862014-02-15 17:47:53 +0100124#ifdef CONFIG_BATMAN_ADV_MCAST
125 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
126#endif
Sven Eckelmann807736f2012-07-15 22:26:51 +0200127 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
128 INIT_LIST_HEAD(&bat_priv->tt.req_list);
129 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100130#ifdef CONFIG_BATMAN_ADV_MCAST
131 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
132#endif
Marek Lindneref261572013-04-23 21:39:57 +0800133 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
134 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200135 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000136
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200137 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200138 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000139 goto err;
140
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200141 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200142 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000143 goto err;
144
Sven Eckelmann08adf152012-05-12 13:38:47 +0200145 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200146 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100147 goto err;
148
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200149 ret = batadv_dat_init(bat_priv);
150 if (ret < 0)
151 goto err;
152
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200153 ret = batadv_nc_mesh_init(bat_priv);
Martin Hundebølld353d8d2013-01-25 11:12:38 +0100154 if (ret < 0)
155 goto err;
156
Marek Lindner414254e2013-04-23 21:39:58 +0800157 batadv_gw_init(bat_priv);
Linus Lüssing60432d72014-02-15 17:47:51 +0100158 batadv_mcast_init(bat_priv);
Marek Lindner414254e2013-04-23 21:39:58 +0800159
Sven Eckelmann807736f2012-07-15 22:26:51 +0200160 atomic_set(&bat_priv->gw.reselect, 0);
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200161 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200162
163 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000164
165err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200166 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200167 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000168}
169
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200170void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000171{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200172 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000173
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200174 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000175
Sven Eckelmann9455e342012-05-12 02:09:37 +0200176 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000177
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200178 batadv_gw_node_purge(bat_priv);
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200179 batadv_nc_mesh_free(bat_priv);
Antonio Quartullia4361862013-05-07 01:06:18 +0200180 batadv_dat_free(bat_priv);
Sven Eckelmann08adf152012-05-12 13:38:47 +0200181 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100182
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100183 batadv_mcast_free(bat_priv);
184
Antonio Quartullia4361862013-05-07 01:06:18 +0200185 /* Free the TT and the originator tables only after having terminated
186 * all the other depending components which may use these structures for
187 * their purposes.
188 */
189 batadv_tt_free(bat_priv);
190
191 /* Since the originator table clean up routine is accessing the TT
192 * tables as well, it has to be invoked after the TT tables have been
193 * freed and marked as empty. This ensures that no cleanup RCU callbacks
194 * accessing the TT data are scheduled for later execution.
195 */
196 batadv_originator_free(bat_priv);
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200197
Marek Lindner414254e2013-04-23 21:39:58 +0800198 batadv_gw_free(bat_priv);
199
Martin Hundebøllf8214862012-04-20 17:02:45 +0200200 free_percpu(bat_priv->bat_counters);
Martin Hundebøllf69ae772013-04-17 21:13:16 +0200201 bat_priv->bat_counters = NULL;
Martin Hundebøllf8214862012-04-20 17:02:45 +0200202
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200203 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000204}
205
David S. Miller6e0895c2013-04-22 20:32:51 -0400206/**
207 * batadv_is_my_mac - check if the given mac address belongs to any of the real
208 * interfaces in the current mesh
209 * @bat_priv: the bat priv with all the soft interface information
210 * @addr: the address to check
211 */
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200212int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000213{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200214 const struct batadv_hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000215
216 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200217 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200218 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000219 continue;
220
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200221 if (hard_iface->soft_iface != bat_priv->soft_iface)
222 continue;
223
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200224 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000225 rcu_read_unlock();
226 return 1;
227 }
228 }
229 rcu_read_unlock();
230 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000231}
232
Marek Lindner30da63a2012-08-03 17:15:46 +0200233/**
234 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
235 * function that requires the primary interface
236 * @seq: debugfs table seq_file struct
237 *
238 * Returns primary interface if found or NULL otherwise.
239 */
240struct batadv_hard_iface *
241batadv_seq_print_text_primary_if_get(struct seq_file *seq)
242{
243 struct net_device *net_dev = (struct net_device *)seq->private;
244 struct batadv_priv *bat_priv = netdev_priv(net_dev);
245 struct batadv_hard_iface *primary_if;
246
247 primary_if = batadv_primary_if_get_selected(bat_priv);
248
249 if (!primary_if) {
250 seq_printf(seq,
251 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
252 net_dev->name);
253 goto out;
254 }
255
256 if (primary_if->if_status == BATADV_IF_ACTIVE)
257 goto out;
258
259 seq_printf(seq,
260 "BATMAN mesh %s disabled - primary interface not active\n",
261 net_dev->name);
262 batadv_hardif_free_ref(primary_if);
263 primary_if = NULL;
264
265out:
266 return primary_if;
267}
268
Simon Wunderlichc54f38c92013-07-29 17:56:44 +0200269/**
Marek Lindner411d6ed2013-05-08 13:31:59 +0800270 * batadv_max_header_len - calculate maximum encapsulation overhead for a
271 * payload packet
272 *
273 * Return the maximum encapsulation overhead in bytes.
274 */
275int batadv_max_header_len(void)
276{
277 int header_len = 0;
278
279 header_len = max_t(int, header_len,
280 sizeof(struct batadv_unicast_packet));
281 header_len = max_t(int, header_len,
282 sizeof(struct batadv_unicast_4addr_packet));
283 header_len = max_t(int, header_len,
284 sizeof(struct batadv_bcast_packet));
285
286#ifdef CONFIG_BATMAN_ADV_NC
287 header_len = max_t(int, header_len,
288 sizeof(struct batadv_coded_packet));
289#endif
290
Marek Lindner1df0cbd2014-01-15 20:31:18 +0800291 return header_len + ETH_HLEN;
Marek Lindner411d6ed2013-05-08 13:31:59 +0800292}
293
294/**
Simon Wunderlichc54f38c92013-07-29 17:56:44 +0200295 * batadv_skb_set_priority - sets skb priority according to packet content
296 * @skb: the packet to be sent
297 * @offset: offset to the packet content
298 *
299 * This function sets a value between 256 and 263 (802.1d priority), which
300 * can be interpreted by the cfg80211 or other drivers.
301 */
302void batadv_skb_set_priority(struct sk_buff *skb, int offset)
303{
304 struct iphdr ip_hdr_tmp, *ip_hdr;
305 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
306 struct ethhdr ethhdr_tmp, *ethhdr;
307 struct vlan_ethhdr *vhdr, vhdr_tmp;
308 u32 prio;
309
310 /* already set, do nothing */
311 if (skb->priority >= 256 && skb->priority <= 263)
312 return;
313
314 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
315 if (!ethhdr)
316 return;
317
318 switch (ethhdr->h_proto) {
319 case htons(ETH_P_8021Q):
320 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
321 sizeof(*vhdr), &vhdr_tmp);
322 if (!vhdr)
323 return;
324 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
325 prio = prio >> VLAN_PRIO_SHIFT;
326 break;
327 case htons(ETH_P_IP):
328 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
329 sizeof(*ip_hdr), &ip_hdr_tmp);
330 if (!ip_hdr)
331 return;
332 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
333 break;
334 case htons(ETH_P_IPV6):
335 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
336 sizeof(*ip6_hdr), &ip6_hdr_tmp);
337 if (!ip6_hdr)
338 return;
339 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
340 break;
341 default:
342 return;
343 }
344
345 skb->priority = prio + 256;
346}
347
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200348static int batadv_recv_unhandled_packet(struct sk_buff *skb,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200349 struct batadv_hard_iface *recv_if)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800350{
351 return NET_RX_DROP;
352}
353
354/* incoming packets with the batman ethertype received on any active hard
355 * interface
356 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200357int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
358 struct packet_type *ptype,
359 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800360{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200361 struct batadv_priv *bat_priv;
Sven Eckelmann96412692012-06-05 22:31:30 +0200362 struct batadv_ogm_packet *batadv_ogm_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200363 struct batadv_hard_iface *hard_iface;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800364 uint8_t idx;
365 int ret;
366
Sven Eckelmann56303d32012-06-05 22:31:31 +0200367 hard_iface = container_of(ptype, struct batadv_hard_iface,
368 batman_adv_ptype);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800369 skb = skb_share_check(skb, GFP_ATOMIC);
370
371 /* skb was released by skb_share_check() */
372 if (!skb)
373 goto err_out;
374
375 /* packet should hold at least type and version */
376 if (unlikely(!pskb_may_pull(skb, 2)))
377 goto err_free;
378
379 /* expect a valid ethernet header here. */
380 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
381 goto err_free;
382
383 if (!hard_iface->soft_iface)
384 goto err_free;
385
386 bat_priv = netdev_priv(hard_iface->soft_iface);
387
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200388 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800389 goto err_free;
390
391 /* discard frames on not active interfaces */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200392 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800393 goto err_free;
394
Sven Eckelmann96412692012-06-05 22:31:30 +0200395 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800396
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100397 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200398 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200399 "Drop packet: incompatible batman version (%i)\n",
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100400 batadv_ogm_packet->version);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800401 goto err_free;
402 }
403
404 /* all receive handlers return whether they received or reused
405 * the supplied skb. if not, we have to free the skb.
406 */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100407 idx = batadv_ogm_packet->packet_type;
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200408 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800409
410 if (ret == NET_RX_DROP)
411 kfree_skb(skb);
412
413 /* return NET_RX_SUCCESS in any case as we
414 * most probably dropped the packet for
415 * routing-logical reasons.
416 */
417 return NET_RX_SUCCESS;
418
419err_free:
420 kfree_skb(skb);
421err_out:
422 return NET_RX_DROP;
423}
424
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200425static void batadv_recv_handler_init(void)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800426{
427 int i;
428
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200429 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
430 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800431
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200432 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
433 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
434
Simon Wunderlich031ace82013-12-17 19:12:12 +0100435 /* compile time checks for sizes */
436 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
437 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
438 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
439 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
440 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
441 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
442 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
443 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
444 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
445 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
446 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
447 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
448 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
449 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
450 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
451 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
Simon Wunderlich80067c82013-04-25 10:37:22 +0200452
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200453 /* broadcast packet */
454 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
455
456 /* unicast packets ... */
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200457 /* unicast with 4 addresses packet */
458 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800459 /* unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200460 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
Marek Lindneref261572013-04-23 21:39:57 +0800461 /* unicast tvlv packet */
462 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200463 /* batman icmp packet */
464 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200465 /* Fragmented packets */
466 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800467}
468
Sven Eckelmann56303d32012-06-05 22:31:31 +0200469int
470batadv_recv_handler_register(uint8_t packet_type,
471 int (*recv_handler)(struct sk_buff *,
472 struct batadv_hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800473{
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200474 int (*curr)(struct sk_buff *,
475 struct batadv_hard_iface *);
476 curr = batadv_rx_handler[packet_type];
477
478 if ((curr != batadv_recv_unhandled_packet) &&
479 (curr != batadv_recv_unhandled_unicast_packet))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800480 return -EBUSY;
481
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200482 batadv_rx_handler[packet_type] = recv_handler;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800483 return 0;
484}
485
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200486void batadv_recv_handler_unregister(uint8_t packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800487{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200488 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800489}
490
Sven Eckelmann56303d32012-06-05 22:31:31 +0200491static struct batadv_algo_ops *batadv_algo_get(char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800492{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200493 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800494
Sasha Levinb67bfe02013-02-27 17:06:00 -0800495 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800496 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
497 continue;
498
499 bat_algo_ops = bat_algo_ops_tmp;
500 break;
501 }
502
503 return bat_algo_ops;
504}
505
Sven Eckelmann56303d32012-06-05 22:31:31 +0200506int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
Marek Lindner1c280472011-11-28 17:40:17 +0800507{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200508 struct batadv_algo_ops *bat_algo_ops_tmp;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200509 int ret;
Marek Lindner1c280472011-11-28 17:40:17 +0800510
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200511 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
Marek Lindner1c280472011-11-28 17:40:17 +0800512 if (bat_algo_ops_tmp) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100513 pr_info("Trying to register already registered routing algorithm: %s\n",
514 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200515 ret = -EEXIST;
Marek Lindner1c280472011-11-28 17:40:17 +0800516 goto out;
517 }
518
Marek Lindner01c42242011-11-28 21:31:55 +0800519 /* all algorithms must implement all ops (for now) */
Marek Lindnerc2aca022012-02-07 17:20:45 +0800520 if (!bat_algo_ops->bat_iface_enable ||
Marek Lindner00a50072012-02-07 17:20:47 +0800521 !bat_algo_ops->bat_iface_disable ||
Marek Lindnerc3229392012-03-11 06:17:50 +0800522 !bat_algo_ops->bat_iface_update_mac ||
Marek Lindnercd8b78e2012-02-07 17:20:49 +0800523 !bat_algo_ops->bat_primary_iface_set ||
Marek Lindner01c42242011-11-28 21:31:55 +0800524 !bat_algo_ops->bat_ogm_schedule ||
Antonio Quartullia3285a82013-09-02 12:15:04 +0200525 !bat_algo_ops->bat_ogm_emit ||
Antonio Quartullic43c9812013-09-02 12:15:05 +0200526 !bat_algo_ops->bat_neigh_cmp ||
527 !bat_algo_ops->bat_neigh_is_equiv_or_better) {
Marek Lindner01c42242011-11-28 21:31:55 +0800528 pr_info("Routing algo '%s' does not implement required ops\n",
529 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200530 ret = -EINVAL;
Marek Lindner01c42242011-11-28 21:31:55 +0800531 goto out;
532 }
533
Marek Lindner1c280472011-11-28 17:40:17 +0800534 INIT_HLIST_NODE(&bat_algo_ops->list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200535 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +0800536 ret = 0;
537
538out:
539 return ret;
540}
541
Sven Eckelmann56303d32012-06-05 22:31:31 +0200542int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800543{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200544 struct batadv_algo_ops *bat_algo_ops;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200545 int ret = -EINVAL;
Marek Lindner1c280472011-11-28 17:40:17 +0800546
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200547 bat_algo_ops = batadv_algo_get(name);
Marek Lindner1c280472011-11-28 17:40:17 +0800548 if (!bat_algo_ops)
549 goto out;
550
551 bat_priv->bat_algo_ops = bat_algo_ops;
552 ret = 0;
553
554out:
555 return ret;
556}
557
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200558int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
Marek Lindner1c280472011-11-28 17:40:17 +0800559{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200560 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800561
Antonio Quartulli0c814652013-03-21 09:23:29 +0100562 seq_puts(seq, "Available routing algorithms:\n");
Marek Lindner1c280472011-11-28 17:40:17 +0800563
Sasha Levinb67bfe02013-02-27 17:06:00 -0800564 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800565 seq_printf(seq, "%s\n", bat_algo_ops->name);
566 }
567
568 return 0;
569}
570
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200571/**
572 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
573 * the header
574 * @skb: skb pointing to fragmented socket buffers
575 * @payload_ptr: Pointer to position inside the head buffer of the skb
576 * marking the start of the data to be CRC'ed
577 *
578 * payload_ptr must always point to an address in the skb head buffer and not to
579 * a fragment.
580 */
581__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
582{
583 u32 crc = 0;
584 unsigned int from;
585 unsigned int to = skb->len;
586 struct skb_seq_state st;
587 const u8 *data;
588 unsigned int len;
589 unsigned int consumed = 0;
590
591 from = (unsigned int)(payload_ptr - skb->data);
592
593 skb_prepare_seq_read(skb, from, to, &st);
594 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
595 crc = crc32c(crc, data, len);
596 consumed += len;
597 }
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200598
599 return htonl(crc);
600}
601
Marek Lindneref261572013-04-23 21:39:57 +0800602/**
603 * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
604 * possibly free it
605 * @tvlv_handler: the tvlv handler to free
606 */
607static void
608batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
609{
610 if (atomic_dec_and_test(&tvlv_handler->refcount))
611 kfree_rcu(tvlv_handler, rcu);
612}
613
614/**
615 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
616 * based on the provided type and version (both need to match)
617 * @bat_priv: the bat priv with all the soft interface information
618 * @type: tvlv handler type to look for
619 * @version: tvlv handler version to look for
620 *
621 * Returns tvlv handler if found or NULL otherwise.
622 */
623static struct batadv_tvlv_handler
624*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
625 uint8_t type, uint8_t version)
626{
627 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
628
629 rcu_read_lock();
630 hlist_for_each_entry_rcu(tvlv_handler_tmp,
631 &bat_priv->tvlv.handler_list, list) {
632 if (tvlv_handler_tmp->type != type)
633 continue;
634
635 if (tvlv_handler_tmp->version != version)
636 continue;
637
638 if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
639 continue;
640
641 tvlv_handler = tvlv_handler_tmp;
642 break;
643 }
644 rcu_read_unlock();
645
646 return tvlv_handler;
647}
648
649/**
650 * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
651 * possibly free it
652 * @tvlv_handler: the tvlv container to free
653 */
654static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
655{
656 if (atomic_dec_and_test(&tvlv->refcount))
657 kfree(tvlv);
658}
659
660/**
661 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
662 * list based on the provided type and version (both need to match)
663 * @bat_priv: the bat priv with all the soft interface information
664 * @type: tvlv container type to look for
665 * @version: tvlv container version to look for
666 *
667 * Has to be called with the appropriate locks being acquired
668 * (tvlv.container_list_lock).
669 *
670 * Returns tvlv container if found or NULL otherwise.
671 */
672static struct batadv_tvlv_container
673*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
674 uint8_t type, uint8_t version)
675{
676 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
677
678 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
679 if (tvlv_tmp->tvlv_hdr.type != type)
680 continue;
681
682 if (tvlv_tmp->tvlv_hdr.version != version)
683 continue;
684
685 if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
686 continue;
687
688 tvlv = tvlv_tmp;
689 break;
690 }
691
692 return tvlv;
693}
694
695/**
696 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
697 * list entries
698 * @bat_priv: the bat priv with all the soft interface information
699 *
700 * Has to be called with the appropriate locks being acquired
701 * (tvlv.container_list_lock).
702 *
703 * Returns size of all currently registered tvlv containers in bytes.
704 */
705static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
706{
707 struct batadv_tvlv_container *tvlv;
708 uint16_t tvlv_len = 0;
709
710 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
711 tvlv_len += sizeof(struct batadv_tvlv_hdr);
712 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
713 }
714
715 return tvlv_len;
716}
717
718/**
719 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
720 * list
721 * @tvlv: the to be removed tvlv container
722 *
723 * Has to be called with the appropriate locks being acquired
724 * (tvlv.container_list_lock).
725 */
726static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
727{
728 if (!tvlv)
729 return;
730
731 hlist_del(&tvlv->list);
732
733 /* first call to decrement the counter, second call to free */
734 batadv_tvlv_container_free_ref(tvlv);
735 batadv_tvlv_container_free_ref(tvlv);
736}
737
738/**
739 * batadv_tvlv_container_unregister - unregister tvlv container based on the
740 * provided type and version (both need to match)
741 * @bat_priv: the bat priv with all the soft interface information
742 * @type: tvlv container type to unregister
743 * @version: tvlv container type to unregister
744 */
745void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
746 uint8_t type, uint8_t version)
747{
748 struct batadv_tvlv_container *tvlv;
749
750 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
751 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
752 batadv_tvlv_container_remove(tvlv);
753 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
754}
755
756/**
757 * batadv_tvlv_container_register - register tvlv type, version and content
758 * to be propagated with each (primary interface) OGM
759 * @bat_priv: the bat priv with all the soft interface information
760 * @type: tvlv container type
761 * @version: tvlv container version
762 * @tvlv_value: tvlv container content
763 * @tvlv_value_len: tvlv container content length
764 *
765 * If a container of the same type and version was already registered the new
766 * content is going to replace the old one.
767 */
768void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
769 uint8_t type, uint8_t version,
770 void *tvlv_value, uint16_t tvlv_value_len)
771{
772 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
773
774 if (!tvlv_value)
775 tvlv_value_len = 0;
776
777 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
778 if (!tvlv_new)
779 return;
780
781 tvlv_new->tvlv_hdr.version = version;
782 tvlv_new->tvlv_hdr.type = type;
783 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
784
785 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
786 INIT_HLIST_NODE(&tvlv_new->list);
787 atomic_set(&tvlv_new->refcount, 1);
788
789 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
790 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
791 batadv_tvlv_container_remove(tvlv_old);
792 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
793 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
794}
795
796/**
797 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
798 * requested packet size
799 * @packet_buff: packet buffer
800 * @packet_buff_len: packet buffer size
801 * @packet_min_len: requested packet minimum size
802 * @additional_packet_len: requested additional packet size on top of minimum
803 * size
804 *
805 * Returns true of the packet buffer could be changed to the requested size,
806 * false otherwise.
807 */
808static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
809 int *packet_buff_len,
810 int min_packet_len,
811 int additional_packet_len)
812{
813 unsigned char *new_buff;
814
815 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
816
817 /* keep old buffer if kmalloc should fail */
818 if (new_buff) {
819 memcpy(new_buff, *packet_buff, min_packet_len);
820 kfree(*packet_buff);
821 *packet_buff = new_buff;
822 *packet_buff_len = min_packet_len + additional_packet_len;
823 return true;
824 }
825
826 return false;
827}
828
829/**
830 * batadv_tvlv_container_ogm_append - append tvlv container content to given
831 * OGM packet buffer
832 * @bat_priv: the bat priv with all the soft interface information
833 * @packet_buff: ogm packet buffer
834 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
835 * content
836 * @packet_min_len: ogm header size to be preserved for the OGM itself
837 *
838 * The ogm packet might be enlarged or shrunk depending on the current size
839 * and the size of the to-be-appended tvlv containers.
840 *
841 * Returns size of all appended tvlv containers in bytes.
842 */
843uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
844 unsigned char **packet_buff,
845 int *packet_buff_len,
846 int packet_min_len)
847{
848 struct batadv_tvlv_container *tvlv;
849 struct batadv_tvlv_hdr *tvlv_hdr;
850 uint16_t tvlv_value_len;
851 void *tvlv_value;
852 bool ret;
853
854 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
855 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
856
857 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
858 packet_min_len, tvlv_value_len);
859
860 if (!ret)
861 goto end;
862
863 if (!tvlv_value_len)
864 goto end;
865
866 tvlv_value = (*packet_buff) + packet_min_len;
867
868 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
869 tvlv_hdr = tvlv_value;
870 tvlv_hdr->type = tvlv->tvlv_hdr.type;
871 tvlv_hdr->version = tvlv->tvlv_hdr.version;
872 tvlv_hdr->len = tvlv->tvlv_hdr.len;
873 tvlv_value = tvlv_hdr + 1;
874 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
875 tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
876 }
877
878end:
879 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
880 return tvlv_value_len;
881}
882
883/**
884 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
885 * appropriate handlers
886 * @bat_priv: the bat priv with all the soft interface information
887 * @tvlv_handler: tvlv callback function handling the tvlv content
888 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
889 * @orig_node: orig node emitting the ogm packet
890 * @src: source mac address of the unicast packet
891 * @dst: destination mac address of the unicast packet
892 * @tvlv_value: tvlv content
893 * @tvlv_value_len: tvlv content length
894 *
895 * Returns success if handler was not found or the return value of the handler
896 * callback.
897 */
898static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
899 struct batadv_tvlv_handler *tvlv_handler,
900 bool ogm_source,
901 struct batadv_orig_node *orig_node,
902 uint8_t *src, uint8_t *dst,
903 void *tvlv_value, uint16_t tvlv_value_len)
904{
905 if (!tvlv_handler)
906 return NET_RX_SUCCESS;
907
908 if (ogm_source) {
909 if (!tvlv_handler->ogm_handler)
910 return NET_RX_SUCCESS;
911
912 if (!orig_node)
913 return NET_RX_SUCCESS;
914
915 tvlv_handler->ogm_handler(bat_priv, orig_node,
916 BATADV_NO_FLAGS,
917 tvlv_value, tvlv_value_len);
918 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
919 } else {
920 if (!src)
921 return NET_RX_SUCCESS;
922
923 if (!dst)
924 return NET_RX_SUCCESS;
925
926 if (!tvlv_handler->unicast_handler)
927 return NET_RX_SUCCESS;
928
929 return tvlv_handler->unicast_handler(bat_priv, src,
930 dst, tvlv_value,
931 tvlv_value_len);
932 }
933
934 return NET_RX_SUCCESS;
935}
936
937/**
938 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
939 * appropriate handlers
940 * @bat_priv: the bat priv with all the soft interface information
941 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
942 * @orig_node: orig node emitting the ogm packet
943 * @src: source mac address of the unicast packet
944 * @dst: destination mac address of the unicast packet
945 * @tvlv_value: tvlv content
946 * @tvlv_value_len: tvlv content length
947 *
948 * Returns success when processing an OGM or the return value of all called
949 * handler callbacks.
950 */
951int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
952 bool ogm_source,
953 struct batadv_orig_node *orig_node,
954 uint8_t *src, uint8_t *dst,
955 void *tvlv_value, uint16_t tvlv_value_len)
956{
957 struct batadv_tvlv_handler *tvlv_handler;
958 struct batadv_tvlv_hdr *tvlv_hdr;
959 uint16_t tvlv_value_cont_len;
960 uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
961 int ret = NET_RX_SUCCESS;
962
963 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
964 tvlv_hdr = tvlv_value;
965 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
966 tvlv_value = tvlv_hdr + 1;
967 tvlv_value_len -= sizeof(*tvlv_hdr);
968
969 if (tvlv_value_cont_len > tvlv_value_len)
970 break;
971
972 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
973 tvlv_hdr->type,
974 tvlv_hdr->version);
975
976 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
977 ogm_source, orig_node,
978 src, dst, tvlv_value,
979 tvlv_value_cont_len);
980 if (tvlv_handler)
981 batadv_tvlv_handler_free_ref(tvlv_handler);
982 tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
983 tvlv_value_len -= tvlv_value_cont_len;
984 }
985
986 if (!ogm_source)
987 return ret;
988
989 rcu_read_lock();
990 hlist_for_each_entry_rcu(tvlv_handler,
991 &bat_priv->tvlv.handler_list, list) {
992 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
993 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
994 tvlv_handler->ogm_handler(bat_priv, orig_node,
995 cifnotfound, NULL, 0);
996
997 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
998 }
999 rcu_read_unlock();
1000
1001 return NET_RX_SUCCESS;
1002}
1003
1004/**
1005 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
1006 * handlers
1007 * @bat_priv: the bat priv with all the soft interface information
1008 * @batadv_ogm_packet: ogm packet containing the tvlv containers
1009 * @orig_node: orig node emitting the ogm packet
1010 */
1011void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
1012 struct batadv_ogm_packet *batadv_ogm_packet,
1013 struct batadv_orig_node *orig_node)
1014{
1015 void *tvlv_value;
1016 uint16_t tvlv_value_len;
1017
1018 if (!batadv_ogm_packet)
1019 return;
1020
1021 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
1022 if (!tvlv_value_len)
1023 return;
1024
1025 tvlv_value = batadv_ogm_packet + 1;
1026
1027 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
1028 tvlv_value, tvlv_value_len);
1029}
1030
1031/**
1032 * batadv_tvlv_handler_register - register tvlv handler based on the provided
1033 * type and version (both need to match) for ogm tvlv payload and/or unicast
1034 * payload
1035 * @bat_priv: the bat priv with all the soft interface information
1036 * @optr: ogm tvlv handler callback function. This function receives the orig
1037 * node, flags and the tvlv content as argument to process.
1038 * @uptr: unicast tvlv handler callback function. This function receives the
1039 * source & destination of the unicast packet as well as the tvlv content
1040 * to process.
1041 * @type: tvlv handler type to be registered
1042 * @version: tvlv handler version to be registered
1043 * @flags: flags to enable or disable TVLV API behavior
1044 */
1045void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1046 void (*optr)(struct batadv_priv *bat_priv,
1047 struct batadv_orig_node *orig,
1048 uint8_t flags,
1049 void *tvlv_value,
1050 uint16_t tvlv_value_len),
1051 int (*uptr)(struct batadv_priv *bat_priv,
1052 uint8_t *src, uint8_t *dst,
1053 void *tvlv_value,
1054 uint16_t tvlv_value_len),
1055 uint8_t type, uint8_t version, uint8_t flags)
1056{
1057 struct batadv_tvlv_handler *tvlv_handler;
1058
1059 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1060 if (tvlv_handler) {
1061 batadv_tvlv_handler_free_ref(tvlv_handler);
1062 return;
1063 }
1064
1065 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1066 if (!tvlv_handler)
1067 return;
1068
1069 tvlv_handler->ogm_handler = optr;
1070 tvlv_handler->unicast_handler = uptr;
1071 tvlv_handler->type = type;
1072 tvlv_handler->version = version;
1073 tvlv_handler->flags = flags;
1074 atomic_set(&tvlv_handler->refcount, 1);
1075 INIT_HLIST_NODE(&tvlv_handler->list);
1076
1077 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1078 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1079 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1080}
1081
1082/**
1083 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1084 * provided type and version (both need to match)
1085 * @bat_priv: the bat priv with all the soft interface information
1086 * @type: tvlv handler type to be unregistered
1087 * @version: tvlv handler version to be unregistered
1088 */
1089void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
1090 uint8_t type, uint8_t version)
1091{
1092 struct batadv_tvlv_handler *tvlv_handler;
1093
1094 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1095 if (!tvlv_handler)
1096 return;
1097
1098 batadv_tvlv_handler_free_ref(tvlv_handler);
1099 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1100 hlist_del_rcu(&tvlv_handler->list);
1101 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1102 batadv_tvlv_handler_free_ref(tvlv_handler);
1103}
1104
1105/**
1106 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1107 * specified host
1108 * @bat_priv: the bat priv with all the soft interface information
1109 * @src: source mac address of the unicast packet
1110 * @dst: destination mac address of the unicast packet
1111 * @type: tvlv type
1112 * @version: tvlv version
1113 * @tvlv_value: tvlv content
1114 * @tvlv_value_len: tvlv content length
1115 */
1116void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
1117 uint8_t *dst, uint8_t type, uint8_t version,
1118 void *tvlv_value, uint16_t tvlv_value_len)
1119{
1120 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1121 struct batadv_tvlv_hdr *tvlv_hdr;
1122 struct batadv_orig_node *orig_node;
1123 struct sk_buff *skb = NULL;
1124 unsigned char *tvlv_buff;
1125 unsigned int tvlv_len;
1126 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
1127 bool ret = false;
1128
1129 orig_node = batadv_orig_hash_find(bat_priv, dst);
1130 if (!orig_node)
1131 goto out;
1132
1133 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1134
1135 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1136 if (!skb)
1137 goto out;
1138
1139 skb->priority = TC_PRIO_CONTROL;
1140 skb_reserve(skb, ETH_HLEN);
1141 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1142 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +01001143 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1144 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1145 unicast_tvlv_packet->ttl = BATADV_TTL;
Marek Lindneref261572013-04-23 21:39:57 +08001146 unicast_tvlv_packet->reserved = 0;
1147 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1148 unicast_tvlv_packet->align = 0;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +01001149 ether_addr_copy(unicast_tvlv_packet->src, src);
1150 ether_addr_copy(unicast_tvlv_packet->dst, dst);
Marek Lindneref261572013-04-23 21:39:57 +08001151
1152 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1153 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1154 tvlv_hdr->version = version;
1155 tvlv_hdr->type = type;
1156 tvlv_hdr->len = htons(tvlv_value_len);
1157 tvlv_buff += sizeof(*tvlv_hdr);
1158 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1159
1160 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
1161 ret = true;
1162
1163out:
1164 if (skb && !ret)
1165 kfree_skb(skb);
1166 if (orig_node)
1167 batadv_orig_node_free_ref(orig_node);
1168}
1169
Antonio Quartullic018ad32013-06-04 12:11:39 +02001170/**
1171 * batadv_get_vid - extract the VLAN identifier from skb if any
1172 * @skb: the buffer containing the packet
1173 * @header_len: length of the batman header preceding the ethernet header
1174 *
1175 * If the packet embedded in the skb is vlan tagged this function returns the
1176 * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
1177 */
1178unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
1179{
1180 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
1181 struct vlan_ethhdr *vhdr;
1182 unsigned short vid;
1183
1184 if (ethhdr->h_proto != htons(ETH_P_8021Q))
1185 return BATADV_NO_FLAGS;
1186
1187 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
1188 return BATADV_NO_FLAGS;
1189
1190 vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
1191 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1192 vid |= BATADV_VLAN_HAS_TAG;
1193
1194 return vid;
1195}
1196
Antonio Quartullieceb22a2013-11-16 12:03:51 +01001197/**
1198 * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
1199 * @bat_priv: the bat priv with all the soft interface information
1200 * @vid: the VLAN identifier for which the AP isolation attributed as to be
1201 * looked up
1202 *
1203 * Returns true if AP isolation is on for the VLAN idenfied by vid, false
1204 * otherwise
1205 */
1206bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
1207{
1208 bool ap_isolation_enabled = false;
1209 struct batadv_softif_vlan *vlan;
1210
1211 /* if the AP isolation is requested on a VLAN, then check for its
1212 * setting in the proper VLAN private data structure
1213 */
1214 vlan = batadv_softif_vlan_get(bat_priv, vid);
1215 if (vlan) {
1216 ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
1217 batadv_softif_vlan_free_ref(vlan);
1218 }
1219
1220 return ap_isolation_enabled;
1221}
1222
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001223static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
Marek Lindnerd419be12011-12-10 19:45:53 +08001224{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001225 struct batadv_algo_ops *bat_algo_ops;
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001226 char *algo_name = (char *)val;
1227 size_t name_len = strlen(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001228
Marek Lindner293c9c12013-04-27 16:22:28 +08001229 if (name_len > 0 && algo_name[name_len - 1] == '\n')
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001230 algo_name[name_len - 1] = '\0';
1231
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001232 bat_algo_ops = batadv_algo_get(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001233 if (!bat_algo_ops) {
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001234 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001235 return -EINVAL;
1236 }
1237
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001238 return param_set_copystring(algo_name, kp);
Marek Lindnerd419be12011-12-10 19:45:53 +08001239}
1240
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001241static const struct kernel_param_ops batadv_param_ops_ra = {
1242 .set = batadv_param_set_ra,
Marek Lindnerd419be12011-12-10 19:45:53 +08001243 .get = param_get_string,
1244};
1245
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001246static struct kparam_string batadv_param_string_ra = {
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001247 .maxlen = sizeof(batadv_routing_algo),
1248 .string = batadv_routing_algo,
Marek Lindnerd419be12011-12-10 19:45:53 +08001249};
1250
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001251module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1252 0644);
1253module_init(batadv_init);
1254module_exit(batadv_exit);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001255
1256MODULE_LICENSE("GPL");
1257
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001258MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
1259MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
1260MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
1261MODULE_VERSION(BATADV_SOURCE_VERSION);