blob: d1183e882167c3cd75aa842c94c612818e250d30 [file] [log] [blame]
Simon Wunderliche19f9752014-01-04 18:04:25 +01001/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmann95a066d2012-10-17 21:10:39 +020018#include <linux/crc32c.h>
19#include <linux/highmem.h>
Simon Wunderlichc54f38c2013-07-29 17:56:44 +020020#include <linux/if_vlan.h>
21#include <net/ip.h>
22#include <net/ipv6.h>
23#include <net/dsfield.h>
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000024#include "main.h"
Sven Eckelmannb706b132012-06-10 23:58:51 +020025#include "sysfs.h"
26#include "debugfs.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000027#include "routing.h"
28#include "send.h"
29#include "originator.h"
30#include "soft-interface.h"
31#include "icmp_socket.h"
32#include "translation-table.h"
33#include "hard-interface.h"
34#include "gateway_client.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010035#include "bridge_loop_avoidance.h"
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +020036#include "distributed-arp-table.h"
Linus Lüssingc5caf4e2014-02-15 17:47:49 +010037#include "multicast.h"
Marek Lindner414254e2013-04-23 21:39:58 +080038#include "gateway_common.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000039#include "hash.h"
Marek Lindner1c280472011-11-28 17:40:17 +080040#include "bat_algo.h"
Martin Hundebølld353d8d2013-01-25 11:12:38 +010041#include "network-coding.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020042#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000043
Sven Eckelmannc3caf512011-05-03 11:51:38 +020044
45/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020046 * list traversals just rcu-locked
47 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020048struct list_head batadv_hardif_list;
Sven Eckelmannee11ad62012-05-16 20:23:19 +020049static int (*batadv_rx_handler[256])(struct sk_buff *,
Sven Eckelmann56303d32012-06-05 22:31:31 +020050 struct batadv_hard_iface *);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020051char batadv_routing_algo[20] = "BATMAN_IV";
Sven Eckelmannee11ad62012-05-16 20:23:19 +020052static struct hlist_head batadv_algo_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000053
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020054unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000055
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020056struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000057
Sven Eckelmannee11ad62012-05-16 20:23:19 +020058static void batadv_recv_handler_init(void);
Marek Lindnerffa995e2012-03-01 15:35:17 +080059
Sven Eckelmannee11ad62012-05-16 20:23:19 +020060static int __init batadv_init(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000061{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020062 INIT_LIST_HEAD(&batadv_hardif_list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +020063 INIT_HLIST_HEAD(&batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +080064
Sven Eckelmannee11ad62012-05-16 20:23:19 +020065 batadv_recv_handler_init();
Marek Lindnerffa995e2012-03-01 15:35:17 +080066
Sven Eckelmann81c524f2012-05-12 02:09:22 +020067 batadv_iv_init();
Matthias Schiffer6c519ba2013-09-27 18:03:39 +020068 batadv_nc_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000069
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020070 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000071
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020072 if (!batadv_event_workqueue)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000073 return -ENOMEM;
74
Sven Eckelmann9039dc72012-05-12 02:09:33 +020075 batadv_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +020076 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000077
Sven Eckelmann95638772012-05-12 02:09:31 +020078 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080079 rtnl_link_register(&batadv_link_ops);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000080
Sven Eckelmann86ceb362012-03-07 09:07:45 +010081 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +020082 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000083
84 return 0;
85}
86
Sven Eckelmannee11ad62012-05-16 20:23:19 +020087static void __exit batadv_exit(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000088{
Sven Eckelmann40a072d2012-05-12 02:09:23 +020089 batadv_debugfs_destroy();
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080090 rtnl_link_unregister(&batadv_link_ops);
Sven Eckelmann95638772012-05-12 02:09:31 +020091 unregister_netdevice_notifier(&batadv_hard_if_notifier);
92 batadv_hardif_remove_interfaces();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000093
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020094 flush_workqueue(batadv_event_workqueue);
95 destroy_workqueue(batadv_event_workqueue);
96 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000097
98 rcu_barrier();
99}
100
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200101int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000102{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200103 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200104 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000105
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000106 spin_lock_init(&bat_priv->forw_bat_list_lock);
107 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200108 spin_lock_init(&bat_priv->tt.changes_list_lock);
109 spin_lock_init(&bat_priv->tt.req_list_lock);
110 spin_lock_init(&bat_priv->tt.roam_list_lock);
111 spin_lock_init(&bat_priv->tt.last_changeset_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200112 spin_lock_init(&bat_priv->tt.commit_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200113 spin_lock_init(&bat_priv->gw.list_lock);
Linus Lüssingab498862014-02-15 17:47:53 +0100114#ifdef CONFIG_BATMAN_ADV_MCAST
115 spin_lock_init(&bat_priv->mcast.want_lists_lock);
116#endif
Marek Lindneref261572013-04-23 21:39:57 +0800117 spin_lock_init(&bat_priv->tvlv.container_list_lock);
118 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200119 spin_lock_init(&bat_priv->softif_vlan_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000120
121 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
122 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200123 INIT_HLIST_HEAD(&bat_priv->gw.list);
Linus Lüssingab498862014-02-15 17:47:53 +0100124#ifdef CONFIG_BATMAN_ADV_MCAST
125 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
Linus Lüssing4c8755d2014-02-15 17:47:54 +0100126 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
127 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
Linus Lüssingab498862014-02-15 17:47:53 +0100128#endif
Sven Eckelmann807736f2012-07-15 22:26:51 +0200129 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
130 INIT_LIST_HEAD(&bat_priv->tt.req_list);
131 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100132#ifdef CONFIG_BATMAN_ADV_MCAST
133 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
134#endif
Marek Lindneref261572013-04-23 21:39:57 +0800135 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
136 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200137 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000138
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200139 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200140 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000141 goto err;
142
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200143 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200144 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000145 goto err;
146
Sven Eckelmann08adf152012-05-12 13:38:47 +0200147 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200148 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100149 goto err;
150
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200151 ret = batadv_dat_init(bat_priv);
152 if (ret < 0)
153 goto err;
154
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200155 ret = batadv_nc_mesh_init(bat_priv);
Martin Hundebølld353d8d2013-01-25 11:12:38 +0100156 if (ret < 0)
157 goto err;
158
Marek Lindner414254e2013-04-23 21:39:58 +0800159 batadv_gw_init(bat_priv);
Linus Lüssing60432d72014-02-15 17:47:51 +0100160 batadv_mcast_init(bat_priv);
Marek Lindner414254e2013-04-23 21:39:58 +0800161
Sven Eckelmann807736f2012-07-15 22:26:51 +0200162 atomic_set(&bat_priv->gw.reselect, 0);
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200163 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200164
165 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000166
167err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200168 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200169 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000170}
171
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200172void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000173{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200174 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000175
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200176 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000177
Sven Eckelmann9455e342012-05-12 02:09:37 +0200178 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000179
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200180 batadv_gw_node_purge(bat_priv);
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200181 batadv_nc_mesh_free(bat_priv);
Antonio Quartullia4361862013-05-07 01:06:18 +0200182 batadv_dat_free(bat_priv);
Sven Eckelmann08adf152012-05-12 13:38:47 +0200183 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100184
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100185 batadv_mcast_free(bat_priv);
186
Antonio Quartullia4361862013-05-07 01:06:18 +0200187 /* Free the TT and the originator tables only after having terminated
188 * all the other depending components which may use these structures for
189 * their purposes.
190 */
191 batadv_tt_free(bat_priv);
192
193 /* Since the originator table clean up routine is accessing the TT
194 * tables as well, it has to be invoked after the TT tables have been
195 * freed and marked as empty. This ensures that no cleanup RCU callbacks
196 * accessing the TT data are scheduled for later execution.
197 */
198 batadv_originator_free(bat_priv);
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200199
Marek Lindner414254e2013-04-23 21:39:58 +0800200 batadv_gw_free(bat_priv);
201
Martin Hundebøllf8214862012-04-20 17:02:45 +0200202 free_percpu(bat_priv->bat_counters);
Martin Hundebøllf69ae772013-04-17 21:13:16 +0200203 bat_priv->bat_counters = NULL;
Martin Hundebøllf8214862012-04-20 17:02:45 +0200204
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200205 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000206}
207
David S. Miller6e0895c2013-04-22 20:32:51 -0400208/**
209 * batadv_is_my_mac - check if the given mac address belongs to any of the real
210 * interfaces in the current mesh
211 * @bat_priv: the bat priv with all the soft interface information
212 * @addr: the address to check
213 */
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200214int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000215{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200216 const struct batadv_hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000217
218 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200219 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200220 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000221 continue;
222
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200223 if (hard_iface->soft_iface != bat_priv->soft_iface)
224 continue;
225
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200226 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000227 rcu_read_unlock();
228 return 1;
229 }
230 }
231 rcu_read_unlock();
232 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000233}
234
Marek Lindner30da63a2012-08-03 17:15:46 +0200235/**
236 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
237 * function that requires the primary interface
238 * @seq: debugfs table seq_file struct
239 *
240 * Returns primary interface if found or NULL otherwise.
241 */
242struct batadv_hard_iface *
243batadv_seq_print_text_primary_if_get(struct seq_file *seq)
244{
245 struct net_device *net_dev = (struct net_device *)seq->private;
246 struct batadv_priv *bat_priv = netdev_priv(net_dev);
247 struct batadv_hard_iface *primary_if;
248
249 primary_if = batadv_primary_if_get_selected(bat_priv);
250
251 if (!primary_if) {
252 seq_printf(seq,
253 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
254 net_dev->name);
255 goto out;
256 }
257
258 if (primary_if->if_status == BATADV_IF_ACTIVE)
259 goto out;
260
261 seq_printf(seq,
262 "BATMAN mesh %s disabled - primary interface not active\n",
263 net_dev->name);
264 batadv_hardif_free_ref(primary_if);
265 primary_if = NULL;
266
267out:
268 return primary_if;
269}
270
Simon Wunderlichc54f38c2013-07-29 17:56:44 +0200271/**
Marek Lindner411d6ed2013-05-08 13:31:59 +0800272 * batadv_max_header_len - calculate maximum encapsulation overhead for a
273 * payload packet
274 *
275 * Return the maximum encapsulation overhead in bytes.
276 */
277int batadv_max_header_len(void)
278{
279 int header_len = 0;
280
281 header_len = max_t(int, header_len,
282 sizeof(struct batadv_unicast_packet));
283 header_len = max_t(int, header_len,
284 sizeof(struct batadv_unicast_4addr_packet));
285 header_len = max_t(int, header_len,
286 sizeof(struct batadv_bcast_packet));
287
288#ifdef CONFIG_BATMAN_ADV_NC
289 header_len = max_t(int, header_len,
290 sizeof(struct batadv_coded_packet));
291#endif
292
Marek Lindner1df0cbd2014-01-15 20:31:18 +0800293 return header_len + ETH_HLEN;
Marek Lindner411d6ed2013-05-08 13:31:59 +0800294}
295
296/**
Simon Wunderlichc54f38c2013-07-29 17:56:44 +0200297 * batadv_skb_set_priority - sets skb priority according to packet content
298 * @skb: the packet to be sent
299 * @offset: offset to the packet content
300 *
301 * This function sets a value between 256 and 263 (802.1d priority), which
302 * can be interpreted by the cfg80211 or other drivers.
303 */
304void batadv_skb_set_priority(struct sk_buff *skb, int offset)
305{
306 struct iphdr ip_hdr_tmp, *ip_hdr;
307 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
308 struct ethhdr ethhdr_tmp, *ethhdr;
309 struct vlan_ethhdr *vhdr, vhdr_tmp;
310 u32 prio;
311
312 /* already set, do nothing */
313 if (skb->priority >= 256 && skb->priority <= 263)
314 return;
315
316 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
317 if (!ethhdr)
318 return;
319
320 switch (ethhdr->h_proto) {
321 case htons(ETH_P_8021Q):
322 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
323 sizeof(*vhdr), &vhdr_tmp);
324 if (!vhdr)
325 return;
326 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
327 prio = prio >> VLAN_PRIO_SHIFT;
328 break;
329 case htons(ETH_P_IP):
330 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
331 sizeof(*ip_hdr), &ip_hdr_tmp);
332 if (!ip_hdr)
333 return;
334 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
335 break;
336 case htons(ETH_P_IPV6):
337 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
338 sizeof(*ip6_hdr), &ip6_hdr_tmp);
339 if (!ip6_hdr)
340 return;
341 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
342 break;
343 default:
344 return;
345 }
346
347 skb->priority = prio + 256;
348}
349
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200350static int batadv_recv_unhandled_packet(struct sk_buff *skb,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200351 struct batadv_hard_iface *recv_if)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800352{
353 return NET_RX_DROP;
354}
355
356/* incoming packets with the batman ethertype received on any active hard
357 * interface
358 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200359int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
360 struct packet_type *ptype,
361 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800362{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200363 struct batadv_priv *bat_priv;
Sven Eckelmann96412692012-06-05 22:31:30 +0200364 struct batadv_ogm_packet *batadv_ogm_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200365 struct batadv_hard_iface *hard_iface;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800366 uint8_t idx;
367 int ret;
368
Sven Eckelmann56303d32012-06-05 22:31:31 +0200369 hard_iface = container_of(ptype, struct batadv_hard_iface,
370 batman_adv_ptype);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800371 skb = skb_share_check(skb, GFP_ATOMIC);
372
373 /* skb was released by skb_share_check() */
374 if (!skb)
375 goto err_out;
376
377 /* packet should hold at least type and version */
378 if (unlikely(!pskb_may_pull(skb, 2)))
379 goto err_free;
380
381 /* expect a valid ethernet header here. */
382 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
383 goto err_free;
384
385 if (!hard_iface->soft_iface)
386 goto err_free;
387
388 bat_priv = netdev_priv(hard_iface->soft_iface);
389
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200390 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800391 goto err_free;
392
393 /* discard frames on not active interfaces */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200394 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800395 goto err_free;
396
Sven Eckelmann96412692012-06-05 22:31:30 +0200397 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800398
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100399 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200400 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200401 "Drop packet: incompatible batman version (%i)\n",
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100402 batadv_ogm_packet->version);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800403 goto err_free;
404 }
405
406 /* all receive handlers return whether they received or reused
407 * the supplied skb. if not, we have to free the skb.
408 */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100409 idx = batadv_ogm_packet->packet_type;
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200410 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800411
412 if (ret == NET_RX_DROP)
413 kfree_skb(skb);
414
415 /* return NET_RX_SUCCESS in any case as we
416 * most probably dropped the packet for
417 * routing-logical reasons.
418 */
419 return NET_RX_SUCCESS;
420
421err_free:
422 kfree_skb(skb);
423err_out:
424 return NET_RX_DROP;
425}
426
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200427static void batadv_recv_handler_init(void)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800428{
429 int i;
430
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200431 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
432 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800433
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200434 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
435 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
436
Simon Wunderlich031ace82013-12-17 19:12:12 +0100437 /* compile time checks for sizes */
438 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
439 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
440 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
441 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
442 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
443 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
444 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
445 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
446 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
447 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
448 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
449 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
450 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
451 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
452 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
453 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
Simon Wunderlich80067c82013-04-25 10:37:22 +0200454
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200455 /* broadcast packet */
456 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
457
458 /* unicast packets ... */
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200459 /* unicast with 4 addresses packet */
460 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800461 /* unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200462 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
Marek Lindneref261572013-04-23 21:39:57 +0800463 /* unicast tvlv packet */
464 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200465 /* batman icmp packet */
466 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200467 /* Fragmented packets */
468 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800469}
470
Sven Eckelmann56303d32012-06-05 22:31:31 +0200471int
472batadv_recv_handler_register(uint8_t packet_type,
473 int (*recv_handler)(struct sk_buff *,
474 struct batadv_hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800475{
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200476 int (*curr)(struct sk_buff *,
477 struct batadv_hard_iface *);
478 curr = batadv_rx_handler[packet_type];
479
480 if ((curr != batadv_recv_unhandled_packet) &&
481 (curr != batadv_recv_unhandled_unicast_packet))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800482 return -EBUSY;
483
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200484 batadv_rx_handler[packet_type] = recv_handler;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800485 return 0;
486}
487
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200488void batadv_recv_handler_unregister(uint8_t packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800489{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200490 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800491}
492
Sven Eckelmann56303d32012-06-05 22:31:31 +0200493static struct batadv_algo_ops *batadv_algo_get(char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800494{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200495 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800496
Sasha Levinb67bfe02013-02-27 17:06:00 -0800497 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800498 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
499 continue;
500
501 bat_algo_ops = bat_algo_ops_tmp;
502 break;
503 }
504
505 return bat_algo_ops;
506}
507
Sven Eckelmann56303d32012-06-05 22:31:31 +0200508int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
Marek Lindner1c280472011-11-28 17:40:17 +0800509{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200510 struct batadv_algo_ops *bat_algo_ops_tmp;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200511 int ret;
Marek Lindner1c280472011-11-28 17:40:17 +0800512
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200513 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
Marek Lindner1c280472011-11-28 17:40:17 +0800514 if (bat_algo_ops_tmp) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100515 pr_info("Trying to register already registered routing algorithm: %s\n",
516 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200517 ret = -EEXIST;
Marek Lindner1c280472011-11-28 17:40:17 +0800518 goto out;
519 }
520
Marek Lindner01c42242011-11-28 21:31:55 +0800521 /* all algorithms must implement all ops (for now) */
Marek Lindnerc2aca022012-02-07 17:20:45 +0800522 if (!bat_algo_ops->bat_iface_enable ||
Marek Lindner00a50072012-02-07 17:20:47 +0800523 !bat_algo_ops->bat_iface_disable ||
Marek Lindnerc3229392012-03-11 06:17:50 +0800524 !bat_algo_ops->bat_iface_update_mac ||
Marek Lindnercd8b78e2012-02-07 17:20:49 +0800525 !bat_algo_ops->bat_primary_iface_set ||
Marek Lindner01c42242011-11-28 21:31:55 +0800526 !bat_algo_ops->bat_ogm_schedule ||
Antonio Quartullia3285a82013-09-02 12:15:04 +0200527 !bat_algo_ops->bat_ogm_emit ||
Antonio Quartullic43c9812013-09-02 12:15:05 +0200528 !bat_algo_ops->bat_neigh_cmp ||
529 !bat_algo_ops->bat_neigh_is_equiv_or_better) {
Marek Lindner01c42242011-11-28 21:31:55 +0800530 pr_info("Routing algo '%s' does not implement required ops\n",
531 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200532 ret = -EINVAL;
Marek Lindner01c42242011-11-28 21:31:55 +0800533 goto out;
534 }
535
Marek Lindner1c280472011-11-28 17:40:17 +0800536 INIT_HLIST_NODE(&bat_algo_ops->list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200537 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +0800538 ret = 0;
539
540out:
541 return ret;
542}
543
Sven Eckelmann56303d32012-06-05 22:31:31 +0200544int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800545{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200546 struct batadv_algo_ops *bat_algo_ops;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200547 int ret = -EINVAL;
Marek Lindner1c280472011-11-28 17:40:17 +0800548
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200549 bat_algo_ops = batadv_algo_get(name);
Marek Lindner1c280472011-11-28 17:40:17 +0800550 if (!bat_algo_ops)
551 goto out;
552
553 bat_priv->bat_algo_ops = bat_algo_ops;
554 ret = 0;
555
556out:
557 return ret;
558}
559
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200560int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
Marek Lindner1c280472011-11-28 17:40:17 +0800561{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200562 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800563
Antonio Quartulli0c814652013-03-21 09:23:29 +0100564 seq_puts(seq, "Available routing algorithms:\n");
Marek Lindner1c280472011-11-28 17:40:17 +0800565
Sasha Levinb67bfe02013-02-27 17:06:00 -0800566 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800567 seq_printf(seq, "%s\n", bat_algo_ops->name);
568 }
569
570 return 0;
571}
572
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200573/**
574 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
575 * the header
576 * @skb: skb pointing to fragmented socket buffers
577 * @payload_ptr: Pointer to position inside the head buffer of the skb
578 * marking the start of the data to be CRC'ed
579 *
580 * payload_ptr must always point to an address in the skb head buffer and not to
581 * a fragment.
582 */
583__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
584{
585 u32 crc = 0;
586 unsigned int from;
587 unsigned int to = skb->len;
588 struct skb_seq_state st;
589 const u8 *data;
590 unsigned int len;
591 unsigned int consumed = 0;
592
593 from = (unsigned int)(payload_ptr - skb->data);
594
595 skb_prepare_seq_read(skb, from, to, &st);
596 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
597 crc = crc32c(crc, data, len);
598 consumed += len;
599 }
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200600
601 return htonl(crc);
602}
603
Marek Lindneref261572013-04-23 21:39:57 +0800604/**
605 * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
606 * possibly free it
607 * @tvlv_handler: the tvlv handler to free
608 */
609static void
610batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
611{
612 if (atomic_dec_and_test(&tvlv_handler->refcount))
613 kfree_rcu(tvlv_handler, rcu);
614}
615
616/**
617 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
618 * based on the provided type and version (both need to match)
619 * @bat_priv: the bat priv with all the soft interface information
620 * @type: tvlv handler type to look for
621 * @version: tvlv handler version to look for
622 *
623 * Returns tvlv handler if found or NULL otherwise.
624 */
625static struct batadv_tvlv_handler
626*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
627 uint8_t type, uint8_t version)
628{
629 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
630
631 rcu_read_lock();
632 hlist_for_each_entry_rcu(tvlv_handler_tmp,
633 &bat_priv->tvlv.handler_list, list) {
634 if (tvlv_handler_tmp->type != type)
635 continue;
636
637 if (tvlv_handler_tmp->version != version)
638 continue;
639
640 if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
641 continue;
642
643 tvlv_handler = tvlv_handler_tmp;
644 break;
645 }
646 rcu_read_unlock();
647
648 return tvlv_handler;
649}
650
651/**
652 * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
653 * possibly free it
654 * @tvlv_handler: the tvlv container to free
655 */
656static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
657{
658 if (atomic_dec_and_test(&tvlv->refcount))
659 kfree(tvlv);
660}
661
662/**
663 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
664 * list based on the provided type and version (both need to match)
665 * @bat_priv: the bat priv with all the soft interface information
666 * @type: tvlv container type to look for
667 * @version: tvlv container version to look for
668 *
669 * Has to be called with the appropriate locks being acquired
670 * (tvlv.container_list_lock).
671 *
672 * Returns tvlv container if found or NULL otherwise.
673 */
674static struct batadv_tvlv_container
675*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
676 uint8_t type, uint8_t version)
677{
678 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
679
680 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
681 if (tvlv_tmp->tvlv_hdr.type != type)
682 continue;
683
684 if (tvlv_tmp->tvlv_hdr.version != version)
685 continue;
686
687 if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
688 continue;
689
690 tvlv = tvlv_tmp;
691 break;
692 }
693
694 return tvlv;
695}
696
697/**
698 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
699 * list entries
700 * @bat_priv: the bat priv with all the soft interface information
701 *
702 * Has to be called with the appropriate locks being acquired
703 * (tvlv.container_list_lock).
704 *
705 * Returns size of all currently registered tvlv containers in bytes.
706 */
707static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
708{
709 struct batadv_tvlv_container *tvlv;
710 uint16_t tvlv_len = 0;
711
712 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
713 tvlv_len += sizeof(struct batadv_tvlv_hdr);
714 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
715 }
716
717 return tvlv_len;
718}
719
720/**
721 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
722 * list
723 * @tvlv: the to be removed tvlv container
724 *
725 * Has to be called with the appropriate locks being acquired
726 * (tvlv.container_list_lock).
727 */
728static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
729{
730 if (!tvlv)
731 return;
732
733 hlist_del(&tvlv->list);
734
735 /* first call to decrement the counter, second call to free */
736 batadv_tvlv_container_free_ref(tvlv);
737 batadv_tvlv_container_free_ref(tvlv);
738}
739
740/**
741 * batadv_tvlv_container_unregister - unregister tvlv container based on the
742 * provided type and version (both need to match)
743 * @bat_priv: the bat priv with all the soft interface information
744 * @type: tvlv container type to unregister
745 * @version: tvlv container type to unregister
746 */
747void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
748 uint8_t type, uint8_t version)
749{
750 struct batadv_tvlv_container *tvlv;
751
752 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
753 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
754 batadv_tvlv_container_remove(tvlv);
755 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
756}
757
758/**
759 * batadv_tvlv_container_register - register tvlv type, version and content
760 * to be propagated with each (primary interface) OGM
761 * @bat_priv: the bat priv with all the soft interface information
762 * @type: tvlv container type
763 * @version: tvlv container version
764 * @tvlv_value: tvlv container content
765 * @tvlv_value_len: tvlv container content length
766 *
767 * If a container of the same type and version was already registered the new
768 * content is going to replace the old one.
769 */
770void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
771 uint8_t type, uint8_t version,
772 void *tvlv_value, uint16_t tvlv_value_len)
773{
774 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
775
776 if (!tvlv_value)
777 tvlv_value_len = 0;
778
779 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
780 if (!tvlv_new)
781 return;
782
783 tvlv_new->tvlv_hdr.version = version;
784 tvlv_new->tvlv_hdr.type = type;
785 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
786
787 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
788 INIT_HLIST_NODE(&tvlv_new->list);
789 atomic_set(&tvlv_new->refcount, 1);
790
791 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
792 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
793 batadv_tvlv_container_remove(tvlv_old);
794 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
795 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
796}
797
798/**
799 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
800 * requested packet size
801 * @packet_buff: packet buffer
802 * @packet_buff_len: packet buffer size
803 * @packet_min_len: requested packet minimum size
804 * @additional_packet_len: requested additional packet size on top of minimum
805 * size
806 *
807 * Returns true of the packet buffer could be changed to the requested size,
808 * false otherwise.
809 */
810static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
811 int *packet_buff_len,
812 int min_packet_len,
813 int additional_packet_len)
814{
815 unsigned char *new_buff;
816
817 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
818
819 /* keep old buffer if kmalloc should fail */
820 if (new_buff) {
821 memcpy(new_buff, *packet_buff, min_packet_len);
822 kfree(*packet_buff);
823 *packet_buff = new_buff;
824 *packet_buff_len = min_packet_len + additional_packet_len;
825 return true;
826 }
827
828 return false;
829}
830
831/**
832 * batadv_tvlv_container_ogm_append - append tvlv container content to given
833 * OGM packet buffer
834 * @bat_priv: the bat priv with all the soft interface information
835 * @packet_buff: ogm packet buffer
836 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
837 * content
838 * @packet_min_len: ogm header size to be preserved for the OGM itself
839 *
840 * The ogm packet might be enlarged or shrunk depending on the current size
841 * and the size of the to-be-appended tvlv containers.
842 *
843 * Returns size of all appended tvlv containers in bytes.
844 */
845uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
846 unsigned char **packet_buff,
847 int *packet_buff_len,
848 int packet_min_len)
849{
850 struct batadv_tvlv_container *tvlv;
851 struct batadv_tvlv_hdr *tvlv_hdr;
852 uint16_t tvlv_value_len;
853 void *tvlv_value;
854 bool ret;
855
856 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
857 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
858
859 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
860 packet_min_len, tvlv_value_len);
861
862 if (!ret)
863 goto end;
864
865 if (!tvlv_value_len)
866 goto end;
867
868 tvlv_value = (*packet_buff) + packet_min_len;
869
870 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
871 tvlv_hdr = tvlv_value;
872 tvlv_hdr->type = tvlv->tvlv_hdr.type;
873 tvlv_hdr->version = tvlv->tvlv_hdr.version;
874 tvlv_hdr->len = tvlv->tvlv_hdr.len;
875 tvlv_value = tvlv_hdr + 1;
876 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
877 tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
878 }
879
880end:
881 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
882 return tvlv_value_len;
883}
884
885/**
886 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
887 * appropriate handlers
888 * @bat_priv: the bat priv with all the soft interface information
889 * @tvlv_handler: tvlv callback function handling the tvlv content
890 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
891 * @orig_node: orig node emitting the ogm packet
892 * @src: source mac address of the unicast packet
893 * @dst: destination mac address of the unicast packet
894 * @tvlv_value: tvlv content
895 * @tvlv_value_len: tvlv content length
896 *
897 * Returns success if handler was not found or the return value of the handler
898 * callback.
899 */
900static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
901 struct batadv_tvlv_handler *tvlv_handler,
902 bool ogm_source,
903 struct batadv_orig_node *orig_node,
904 uint8_t *src, uint8_t *dst,
905 void *tvlv_value, uint16_t tvlv_value_len)
906{
907 if (!tvlv_handler)
908 return NET_RX_SUCCESS;
909
910 if (ogm_source) {
911 if (!tvlv_handler->ogm_handler)
912 return NET_RX_SUCCESS;
913
914 if (!orig_node)
915 return NET_RX_SUCCESS;
916
917 tvlv_handler->ogm_handler(bat_priv, orig_node,
918 BATADV_NO_FLAGS,
919 tvlv_value, tvlv_value_len);
920 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
921 } else {
922 if (!src)
923 return NET_RX_SUCCESS;
924
925 if (!dst)
926 return NET_RX_SUCCESS;
927
928 if (!tvlv_handler->unicast_handler)
929 return NET_RX_SUCCESS;
930
931 return tvlv_handler->unicast_handler(bat_priv, src,
932 dst, tvlv_value,
933 tvlv_value_len);
934 }
935
936 return NET_RX_SUCCESS;
937}
938
939/**
940 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
941 * appropriate handlers
942 * @bat_priv: the bat priv with all the soft interface information
943 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
944 * @orig_node: orig node emitting the ogm packet
945 * @src: source mac address of the unicast packet
946 * @dst: destination mac address of the unicast packet
947 * @tvlv_value: tvlv content
948 * @tvlv_value_len: tvlv content length
949 *
950 * Returns success when processing an OGM or the return value of all called
951 * handler callbacks.
952 */
953int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
954 bool ogm_source,
955 struct batadv_orig_node *orig_node,
956 uint8_t *src, uint8_t *dst,
957 void *tvlv_value, uint16_t tvlv_value_len)
958{
959 struct batadv_tvlv_handler *tvlv_handler;
960 struct batadv_tvlv_hdr *tvlv_hdr;
961 uint16_t tvlv_value_cont_len;
962 uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
963 int ret = NET_RX_SUCCESS;
964
965 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
966 tvlv_hdr = tvlv_value;
967 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
968 tvlv_value = tvlv_hdr + 1;
969 tvlv_value_len -= sizeof(*tvlv_hdr);
970
971 if (tvlv_value_cont_len > tvlv_value_len)
972 break;
973
974 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
975 tvlv_hdr->type,
976 tvlv_hdr->version);
977
978 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
979 ogm_source, orig_node,
980 src, dst, tvlv_value,
981 tvlv_value_cont_len);
982 if (tvlv_handler)
983 batadv_tvlv_handler_free_ref(tvlv_handler);
984 tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
985 tvlv_value_len -= tvlv_value_cont_len;
986 }
987
988 if (!ogm_source)
989 return ret;
990
991 rcu_read_lock();
992 hlist_for_each_entry_rcu(tvlv_handler,
993 &bat_priv->tvlv.handler_list, list) {
994 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
995 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
996 tvlv_handler->ogm_handler(bat_priv, orig_node,
997 cifnotfound, NULL, 0);
998
999 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
1000 }
1001 rcu_read_unlock();
1002
1003 return NET_RX_SUCCESS;
1004}
1005
1006/**
1007 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
1008 * handlers
1009 * @bat_priv: the bat priv with all the soft interface information
1010 * @batadv_ogm_packet: ogm packet containing the tvlv containers
1011 * @orig_node: orig node emitting the ogm packet
1012 */
1013void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
1014 struct batadv_ogm_packet *batadv_ogm_packet,
1015 struct batadv_orig_node *orig_node)
1016{
1017 void *tvlv_value;
1018 uint16_t tvlv_value_len;
1019
1020 if (!batadv_ogm_packet)
1021 return;
1022
1023 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
1024 if (!tvlv_value_len)
1025 return;
1026
1027 tvlv_value = batadv_ogm_packet + 1;
1028
1029 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
1030 tvlv_value, tvlv_value_len);
1031}
1032
1033/**
1034 * batadv_tvlv_handler_register - register tvlv handler based on the provided
1035 * type and version (both need to match) for ogm tvlv payload and/or unicast
1036 * payload
1037 * @bat_priv: the bat priv with all the soft interface information
1038 * @optr: ogm tvlv handler callback function. This function receives the orig
1039 * node, flags and the tvlv content as argument to process.
1040 * @uptr: unicast tvlv handler callback function. This function receives the
1041 * source & destination of the unicast packet as well as the tvlv content
1042 * to process.
1043 * @type: tvlv handler type to be registered
1044 * @version: tvlv handler version to be registered
1045 * @flags: flags to enable or disable TVLV API behavior
1046 */
1047void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1048 void (*optr)(struct batadv_priv *bat_priv,
1049 struct batadv_orig_node *orig,
1050 uint8_t flags,
1051 void *tvlv_value,
1052 uint16_t tvlv_value_len),
1053 int (*uptr)(struct batadv_priv *bat_priv,
1054 uint8_t *src, uint8_t *dst,
1055 void *tvlv_value,
1056 uint16_t tvlv_value_len),
1057 uint8_t type, uint8_t version, uint8_t flags)
1058{
1059 struct batadv_tvlv_handler *tvlv_handler;
1060
1061 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1062 if (tvlv_handler) {
1063 batadv_tvlv_handler_free_ref(tvlv_handler);
1064 return;
1065 }
1066
1067 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1068 if (!tvlv_handler)
1069 return;
1070
1071 tvlv_handler->ogm_handler = optr;
1072 tvlv_handler->unicast_handler = uptr;
1073 tvlv_handler->type = type;
1074 tvlv_handler->version = version;
1075 tvlv_handler->flags = flags;
1076 atomic_set(&tvlv_handler->refcount, 1);
1077 INIT_HLIST_NODE(&tvlv_handler->list);
1078
1079 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1080 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1081 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1082}
1083
1084/**
1085 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1086 * provided type and version (both need to match)
1087 * @bat_priv: the bat priv with all the soft interface information
1088 * @type: tvlv handler type to be unregistered
1089 * @version: tvlv handler version to be unregistered
1090 */
1091void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
1092 uint8_t type, uint8_t version)
1093{
1094 struct batadv_tvlv_handler *tvlv_handler;
1095
1096 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1097 if (!tvlv_handler)
1098 return;
1099
1100 batadv_tvlv_handler_free_ref(tvlv_handler);
1101 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1102 hlist_del_rcu(&tvlv_handler->list);
1103 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1104 batadv_tvlv_handler_free_ref(tvlv_handler);
1105}
1106
1107/**
1108 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1109 * specified host
1110 * @bat_priv: the bat priv with all the soft interface information
1111 * @src: source mac address of the unicast packet
1112 * @dst: destination mac address of the unicast packet
1113 * @type: tvlv type
1114 * @version: tvlv version
1115 * @tvlv_value: tvlv content
1116 * @tvlv_value_len: tvlv content length
1117 */
1118void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
1119 uint8_t *dst, uint8_t type, uint8_t version,
1120 void *tvlv_value, uint16_t tvlv_value_len)
1121{
1122 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1123 struct batadv_tvlv_hdr *tvlv_hdr;
1124 struct batadv_orig_node *orig_node;
1125 struct sk_buff *skb = NULL;
1126 unsigned char *tvlv_buff;
1127 unsigned int tvlv_len;
1128 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
1129 bool ret = false;
1130
1131 orig_node = batadv_orig_hash_find(bat_priv, dst);
1132 if (!orig_node)
1133 goto out;
1134
1135 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1136
1137 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1138 if (!skb)
1139 goto out;
1140
1141 skb->priority = TC_PRIO_CONTROL;
1142 skb_reserve(skb, ETH_HLEN);
1143 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1144 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +01001145 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1146 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1147 unicast_tvlv_packet->ttl = BATADV_TTL;
Marek Lindneref261572013-04-23 21:39:57 +08001148 unicast_tvlv_packet->reserved = 0;
1149 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1150 unicast_tvlv_packet->align = 0;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +01001151 ether_addr_copy(unicast_tvlv_packet->src, src);
1152 ether_addr_copy(unicast_tvlv_packet->dst, dst);
Marek Lindneref261572013-04-23 21:39:57 +08001153
1154 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1155 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1156 tvlv_hdr->version = version;
1157 tvlv_hdr->type = type;
1158 tvlv_hdr->len = htons(tvlv_value_len);
1159 tvlv_buff += sizeof(*tvlv_hdr);
1160 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1161
1162 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
1163 ret = true;
1164
1165out:
1166 if (skb && !ret)
1167 kfree_skb(skb);
1168 if (orig_node)
1169 batadv_orig_node_free_ref(orig_node);
1170}
1171
Antonio Quartullic018ad32013-06-04 12:11:39 +02001172/**
1173 * batadv_get_vid - extract the VLAN identifier from skb if any
1174 * @skb: the buffer containing the packet
1175 * @header_len: length of the batman header preceding the ethernet header
1176 *
1177 * If the packet embedded in the skb is vlan tagged this function returns the
1178 * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
1179 */
1180unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
1181{
1182 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
1183 struct vlan_ethhdr *vhdr;
1184 unsigned short vid;
1185
1186 if (ethhdr->h_proto != htons(ETH_P_8021Q))
1187 return BATADV_NO_FLAGS;
1188
1189 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
1190 return BATADV_NO_FLAGS;
1191
1192 vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
1193 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1194 vid |= BATADV_VLAN_HAS_TAG;
1195
1196 return vid;
1197}
1198
Antonio Quartullieceb22a2013-11-16 12:03:51 +01001199/**
1200 * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
1201 * @bat_priv: the bat priv with all the soft interface information
1202 * @vid: the VLAN identifier for which the AP isolation attributed as to be
1203 * looked up
1204 *
1205 * Returns true if AP isolation is on for the VLAN idenfied by vid, false
1206 * otherwise
1207 */
1208bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
1209{
1210 bool ap_isolation_enabled = false;
1211 struct batadv_softif_vlan *vlan;
1212
1213 /* if the AP isolation is requested on a VLAN, then check for its
1214 * setting in the proper VLAN private data structure
1215 */
1216 vlan = batadv_softif_vlan_get(bat_priv, vid);
1217 if (vlan) {
1218 ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
1219 batadv_softif_vlan_free_ref(vlan);
1220 }
1221
1222 return ap_isolation_enabled;
1223}
1224
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001225static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
Marek Lindnerd419be12011-12-10 19:45:53 +08001226{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001227 struct batadv_algo_ops *bat_algo_ops;
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001228 char *algo_name = (char *)val;
1229 size_t name_len = strlen(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001230
Marek Lindner293c9c12013-04-27 16:22:28 +08001231 if (name_len > 0 && algo_name[name_len - 1] == '\n')
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001232 algo_name[name_len - 1] = '\0';
1233
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001234 bat_algo_ops = batadv_algo_get(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001235 if (!bat_algo_ops) {
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001236 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001237 return -EINVAL;
1238 }
1239
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001240 return param_set_copystring(algo_name, kp);
Marek Lindnerd419be12011-12-10 19:45:53 +08001241}
1242
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001243static const struct kernel_param_ops batadv_param_ops_ra = {
1244 .set = batadv_param_set_ra,
Marek Lindnerd419be12011-12-10 19:45:53 +08001245 .get = param_get_string,
1246};
1247
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001248static struct kparam_string batadv_param_string_ra = {
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001249 .maxlen = sizeof(batadv_routing_algo),
1250 .string = batadv_routing_algo,
Marek Lindnerd419be12011-12-10 19:45:53 +08001251};
1252
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001253module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1254 0644);
1255module_init(batadv_init);
1256module_exit(batadv_exit);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001257
1258MODULE_LICENSE("GPL");
1259
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001260MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
1261MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
1262MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
1263MODULE_VERSION(BATADV_SOURCE_VERSION);