blob: ca6f1340d70c1c2ee1874a1a7d10cadc064ac5b7 [file] [log] [blame]
Antonio Quartulli0b873932013-01-04 03:05:31 +01001/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018 */
19
Sven Eckelmann95a066d2012-10-17 21:10:39 +020020#include <linux/crc32c.h>
21#include <linux/highmem.h>
Simon Wunderlichc54f38c92013-07-29 17:56:44 +020022#include <linux/if_vlan.h>
23#include <net/ip.h>
24#include <net/ipv6.h>
25#include <net/dsfield.h>
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000026#include "main.h"
Sven Eckelmannb706b132012-06-10 23:58:51 +020027#include "sysfs.h"
28#include "debugfs.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000029#include "routing.h"
30#include "send.h"
31#include "originator.h"
32#include "soft-interface.h"
33#include "icmp_socket.h"
34#include "translation-table.h"
35#include "hard-interface.h"
36#include "gateway_client.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010037#include "bridge_loop_avoidance.h"
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +020038#include "distributed-arp-table.h"
Marek Lindner414254e2013-04-23 21:39:58 +080039#include "gateway_common.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000040#include "hash.h"
Marek Lindner1c280472011-11-28 17:40:17 +080041#include "bat_algo.h"
Martin Hundebølld353d8d2013-01-25 11:12:38 +010042#include "network-coding.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020043#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044
Sven Eckelmannc3caf512011-05-03 11:51:38 +020045
46/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020047 * list traversals just rcu-locked
48 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020049struct list_head batadv_hardif_list;
Sven Eckelmannee11ad62012-05-16 20:23:19 +020050static int (*batadv_rx_handler[256])(struct sk_buff *,
Sven Eckelmann56303d32012-06-05 22:31:31 +020051 struct batadv_hard_iface *);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020052char batadv_routing_algo[20] = "BATMAN_IV";
Sven Eckelmannee11ad62012-05-16 20:23:19 +020053static struct hlist_head batadv_algo_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000054
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020055unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000056
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020057struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000058
Sven Eckelmannee11ad62012-05-16 20:23:19 +020059static void batadv_recv_handler_init(void);
Marek Lindnerffa995e2012-03-01 15:35:17 +080060
Sven Eckelmannee11ad62012-05-16 20:23:19 +020061static int __init batadv_init(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000062{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020063 INIT_LIST_HEAD(&batadv_hardif_list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +020064 INIT_HLIST_HEAD(&batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +080065
Sven Eckelmannee11ad62012-05-16 20:23:19 +020066 batadv_recv_handler_init();
Marek Lindnerffa995e2012-03-01 15:35:17 +080067
Sven Eckelmann81c524f2012-05-12 02:09:22 +020068 batadv_iv_init();
Matthias Schiffer6c519ba2013-09-27 18:03:39 +020069 batadv_nc_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000070
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020071 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000072
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020073 if (!batadv_event_workqueue)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000074 return -ENOMEM;
75
Sven Eckelmann9039dc72012-05-12 02:09:33 +020076 batadv_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +020077 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000078
Sven Eckelmann95638772012-05-12 02:09:31 +020079 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080080 rtnl_link_register(&batadv_link_ops);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000081
Sven Eckelmann86ceb362012-03-07 09:07:45 +010082 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +020083 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000084
85 return 0;
86}
87
Sven Eckelmannee11ad62012-05-16 20:23:19 +020088static void __exit batadv_exit(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000089{
Sven Eckelmann40a072d2012-05-12 02:09:23 +020090 batadv_debugfs_destroy();
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +080091 rtnl_link_unregister(&batadv_link_ops);
Sven Eckelmann95638772012-05-12 02:09:31 +020092 unregister_netdevice_notifier(&batadv_hard_if_notifier);
93 batadv_hardif_remove_interfaces();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000094
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020095 flush_workqueue(batadv_event_workqueue);
96 destroy_workqueue(batadv_event_workqueue);
97 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000098
99 rcu_barrier();
100}
101
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200102int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000103{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200104 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200105 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000106
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000107 spin_lock_init(&bat_priv->forw_bat_list_lock);
108 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200109 spin_lock_init(&bat_priv->tt.changes_list_lock);
110 spin_lock_init(&bat_priv->tt.req_list_lock);
111 spin_lock_init(&bat_priv->tt.roam_list_lock);
112 spin_lock_init(&bat_priv->tt.last_changeset_lock);
113 spin_lock_init(&bat_priv->gw.list_lock);
Marek Lindneref261572013-04-23 21:39:57 +0800114 spin_lock_init(&bat_priv->tvlv.container_list_lock);
115 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000116
117 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
118 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200119 INIT_HLIST_HEAD(&bat_priv->gw.list);
120 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
121 INIT_LIST_HEAD(&bat_priv->tt.req_list);
122 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
Marek Lindneref261572013-04-23 21:39:57 +0800123 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
124 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000125
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200126 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200127 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000128 goto err;
129
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200130 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200131 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000132 goto err;
133
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200134 batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
135 BATADV_NULL_IFINDEX);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000136
Sven Eckelmann08adf152012-05-12 13:38:47 +0200137 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200138 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100139 goto err;
140
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200141 ret = batadv_dat_init(bat_priv);
142 if (ret < 0)
143 goto err;
144
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200145 ret = batadv_nc_mesh_init(bat_priv);
Martin Hundebølld353d8d2013-01-25 11:12:38 +0100146 if (ret < 0)
147 goto err;
148
Marek Lindner414254e2013-04-23 21:39:58 +0800149 batadv_gw_init(bat_priv);
150
Sven Eckelmann807736f2012-07-15 22:26:51 +0200151 atomic_set(&bat_priv->gw.reselect, 0);
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200152 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200153
154 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000155
156err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200157 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200158 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000159}
160
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200161void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000162{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200163 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000164
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200165 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000166
Sven Eckelmann9455e342012-05-12 02:09:37 +0200167 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000168
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200169 batadv_gw_node_purge(bat_priv);
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200170 batadv_nc_mesh_free(bat_priv);
Antonio Quartullia4361862013-05-07 01:06:18 +0200171 batadv_dat_free(bat_priv);
Sven Eckelmann08adf152012-05-12 13:38:47 +0200172 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100173
Antonio Quartullia4361862013-05-07 01:06:18 +0200174 /* Free the TT and the originator tables only after having terminated
175 * all the other depending components which may use these structures for
176 * their purposes.
177 */
178 batadv_tt_free(bat_priv);
179
180 /* Since the originator table clean up routine is accessing the TT
181 * tables as well, it has to be invoked after the TT tables have been
182 * freed and marked as empty. This ensures that no cleanup RCU callbacks
183 * accessing the TT data are scheduled for later execution.
184 */
185 batadv_originator_free(bat_priv);
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200186
Marek Lindner414254e2013-04-23 21:39:58 +0800187 batadv_gw_free(bat_priv);
188
Martin Hundebøllf8214862012-04-20 17:02:45 +0200189 free_percpu(bat_priv->bat_counters);
Martin Hundebøllf69ae772013-04-17 21:13:16 +0200190 bat_priv->bat_counters = NULL;
Martin Hundebøllf8214862012-04-20 17:02:45 +0200191
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200192 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000193}
194
David S. Miller6e0895c2013-04-22 20:32:51 -0400195/**
196 * batadv_is_my_mac - check if the given mac address belongs to any of the real
197 * interfaces in the current mesh
198 * @bat_priv: the bat priv with all the soft interface information
199 * @addr: the address to check
200 */
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200201int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000202{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200203 const struct batadv_hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000204
205 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200206 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200207 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000208 continue;
209
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200210 if (hard_iface->soft_iface != bat_priv->soft_iface)
211 continue;
212
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200213 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000214 rcu_read_unlock();
215 return 1;
216 }
217 }
218 rcu_read_unlock();
219 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000220}
221
Marek Lindner30da63a2012-08-03 17:15:46 +0200222/**
223 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
224 * function that requires the primary interface
225 * @seq: debugfs table seq_file struct
226 *
227 * Returns primary interface if found or NULL otherwise.
228 */
229struct batadv_hard_iface *
230batadv_seq_print_text_primary_if_get(struct seq_file *seq)
231{
232 struct net_device *net_dev = (struct net_device *)seq->private;
233 struct batadv_priv *bat_priv = netdev_priv(net_dev);
234 struct batadv_hard_iface *primary_if;
235
236 primary_if = batadv_primary_if_get_selected(bat_priv);
237
238 if (!primary_if) {
239 seq_printf(seq,
240 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
241 net_dev->name);
242 goto out;
243 }
244
245 if (primary_if->if_status == BATADV_IF_ACTIVE)
246 goto out;
247
248 seq_printf(seq,
249 "BATMAN mesh %s disabled - primary interface not active\n",
250 net_dev->name);
251 batadv_hardif_free_ref(primary_if);
252 primary_if = NULL;
253
254out:
255 return primary_if;
256}
257
Simon Wunderlichc54f38c92013-07-29 17:56:44 +0200258/**
259 * batadv_skb_set_priority - sets skb priority according to packet content
260 * @skb: the packet to be sent
261 * @offset: offset to the packet content
262 *
263 * This function sets a value between 256 and 263 (802.1d priority), which
264 * can be interpreted by the cfg80211 or other drivers.
265 */
266void batadv_skb_set_priority(struct sk_buff *skb, int offset)
267{
268 struct iphdr ip_hdr_tmp, *ip_hdr;
269 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
270 struct ethhdr ethhdr_tmp, *ethhdr;
271 struct vlan_ethhdr *vhdr, vhdr_tmp;
272 u32 prio;
273
274 /* already set, do nothing */
275 if (skb->priority >= 256 && skb->priority <= 263)
276 return;
277
278 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
279 if (!ethhdr)
280 return;
281
282 switch (ethhdr->h_proto) {
283 case htons(ETH_P_8021Q):
284 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
285 sizeof(*vhdr), &vhdr_tmp);
286 if (!vhdr)
287 return;
288 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
289 prio = prio >> VLAN_PRIO_SHIFT;
290 break;
291 case htons(ETH_P_IP):
292 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
293 sizeof(*ip_hdr), &ip_hdr_tmp);
294 if (!ip_hdr)
295 return;
296 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
297 break;
298 case htons(ETH_P_IPV6):
299 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
300 sizeof(*ip6_hdr), &ip6_hdr_tmp);
301 if (!ip6_hdr)
302 return;
303 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
304 break;
305 default:
306 return;
307 }
308
309 skb->priority = prio + 256;
310}
311
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200312static int batadv_recv_unhandled_packet(struct sk_buff *skb,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200313 struct batadv_hard_iface *recv_if)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800314{
315 return NET_RX_DROP;
316}
317
318/* incoming packets with the batman ethertype received on any active hard
319 * interface
320 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200321int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
322 struct packet_type *ptype,
323 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800324{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200325 struct batadv_priv *bat_priv;
Sven Eckelmann96412692012-06-05 22:31:30 +0200326 struct batadv_ogm_packet *batadv_ogm_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200327 struct batadv_hard_iface *hard_iface;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800328 uint8_t idx;
329 int ret;
330
Sven Eckelmann56303d32012-06-05 22:31:31 +0200331 hard_iface = container_of(ptype, struct batadv_hard_iface,
332 batman_adv_ptype);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800333 skb = skb_share_check(skb, GFP_ATOMIC);
334
335 /* skb was released by skb_share_check() */
336 if (!skb)
337 goto err_out;
338
339 /* packet should hold at least type and version */
340 if (unlikely(!pskb_may_pull(skb, 2)))
341 goto err_free;
342
343 /* expect a valid ethernet header here. */
344 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
345 goto err_free;
346
347 if (!hard_iface->soft_iface)
348 goto err_free;
349
350 bat_priv = netdev_priv(hard_iface->soft_iface);
351
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200352 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800353 goto err_free;
354
355 /* discard frames on not active interfaces */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200356 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800357 goto err_free;
358
Sven Eckelmann96412692012-06-05 22:31:30 +0200359 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800360
Sven Eckelmann96412692012-06-05 22:31:30 +0200361 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200362 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200363 "Drop packet: incompatible batman version (%i)\n",
Sven Eckelmann96412692012-06-05 22:31:30 +0200364 batadv_ogm_packet->header.version);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800365 goto err_free;
366 }
367
368 /* all receive handlers return whether they received or reused
369 * the supplied skb. if not, we have to free the skb.
370 */
Sven Eckelmann96412692012-06-05 22:31:30 +0200371 idx = batadv_ogm_packet->header.packet_type;
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200372 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800373
374 if (ret == NET_RX_DROP)
375 kfree_skb(skb);
376
377 /* return NET_RX_SUCCESS in any case as we
378 * most probably dropped the packet for
379 * routing-logical reasons.
380 */
381 return NET_RX_SUCCESS;
382
383err_free:
384 kfree_skb(skb);
385err_out:
386 return NET_RX_DROP;
387}
388
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200389static void batadv_recv_handler_init(void)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800390{
391 int i;
392
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200393 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
394 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800395
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200396 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
397 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
398
Simon Wunderlich80067c82013-04-25 10:37:22 +0200399 /* compile time checks for struct member offsets */
400 BUILD_BUG_ON(offsetof(struct batadv_unicast_4addr_packet, src) != 10);
401 BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
Simon Wunderlich80067c82013-04-25 10:37:22 +0200402 BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200403 BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
Simon Wunderlich80067c82013-04-25 10:37:22 +0200404 BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4);
405 BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4);
406
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200407 /* broadcast packet */
408 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
409
410 /* unicast packets ... */
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200411 /* unicast with 4 addresses packet */
412 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800413 /* unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200414 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
Marek Lindneref261572013-04-23 21:39:57 +0800415 /* unicast tvlv packet */
416 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200417 /* batman icmp packet */
418 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200419 /* Fragmented packets */
420 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800421}
422
Sven Eckelmann56303d32012-06-05 22:31:31 +0200423int
424batadv_recv_handler_register(uint8_t packet_type,
425 int (*recv_handler)(struct sk_buff *,
426 struct batadv_hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800427{
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200428 int (*curr)(struct sk_buff *,
429 struct batadv_hard_iface *);
430 curr = batadv_rx_handler[packet_type];
431
432 if ((curr != batadv_recv_unhandled_packet) &&
433 (curr != batadv_recv_unhandled_unicast_packet))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800434 return -EBUSY;
435
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200436 batadv_rx_handler[packet_type] = recv_handler;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800437 return 0;
438}
439
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200440void batadv_recv_handler_unregister(uint8_t packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800441{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200442 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800443}
444
Sven Eckelmann56303d32012-06-05 22:31:31 +0200445static struct batadv_algo_ops *batadv_algo_get(char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800446{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200447 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800448
Sasha Levinb67bfe02013-02-27 17:06:00 -0800449 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800450 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
451 continue;
452
453 bat_algo_ops = bat_algo_ops_tmp;
454 break;
455 }
456
457 return bat_algo_ops;
458}
459
Sven Eckelmann56303d32012-06-05 22:31:31 +0200460int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
Marek Lindner1c280472011-11-28 17:40:17 +0800461{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200462 struct batadv_algo_ops *bat_algo_ops_tmp;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200463 int ret;
Marek Lindner1c280472011-11-28 17:40:17 +0800464
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200465 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
Marek Lindner1c280472011-11-28 17:40:17 +0800466 if (bat_algo_ops_tmp) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100467 pr_info("Trying to register already registered routing algorithm: %s\n",
468 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200469 ret = -EEXIST;
Marek Lindner1c280472011-11-28 17:40:17 +0800470 goto out;
471 }
472
Marek Lindner01c42242011-11-28 21:31:55 +0800473 /* all algorithms must implement all ops (for now) */
Marek Lindnerc2aca022012-02-07 17:20:45 +0800474 if (!bat_algo_ops->bat_iface_enable ||
Marek Lindner00a50072012-02-07 17:20:47 +0800475 !bat_algo_ops->bat_iface_disable ||
Marek Lindnerc3229392012-03-11 06:17:50 +0800476 !bat_algo_ops->bat_iface_update_mac ||
Marek Lindnercd8b78e2012-02-07 17:20:49 +0800477 !bat_algo_ops->bat_primary_iface_set ||
Marek Lindner01c42242011-11-28 21:31:55 +0800478 !bat_algo_ops->bat_ogm_schedule ||
Marek Lindnerc3e29312012-03-04 16:56:25 +0800479 !bat_algo_ops->bat_ogm_emit) {
Marek Lindner01c42242011-11-28 21:31:55 +0800480 pr_info("Routing algo '%s' does not implement required ops\n",
481 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200482 ret = -EINVAL;
Marek Lindner01c42242011-11-28 21:31:55 +0800483 goto out;
484 }
485
Marek Lindner1c280472011-11-28 17:40:17 +0800486 INIT_HLIST_NODE(&bat_algo_ops->list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200487 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +0800488 ret = 0;
489
490out:
491 return ret;
492}
493
Sven Eckelmann56303d32012-06-05 22:31:31 +0200494int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800495{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200496 struct batadv_algo_ops *bat_algo_ops;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200497 int ret = -EINVAL;
Marek Lindner1c280472011-11-28 17:40:17 +0800498
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200499 bat_algo_ops = batadv_algo_get(name);
Marek Lindner1c280472011-11-28 17:40:17 +0800500 if (!bat_algo_ops)
501 goto out;
502
503 bat_priv->bat_algo_ops = bat_algo_ops;
504 ret = 0;
505
506out:
507 return ret;
508}
509
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200510int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
Marek Lindner1c280472011-11-28 17:40:17 +0800511{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200512 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800513
Antonio Quartulli0c814652013-03-21 09:23:29 +0100514 seq_puts(seq, "Available routing algorithms:\n");
Marek Lindner1c280472011-11-28 17:40:17 +0800515
Sasha Levinb67bfe02013-02-27 17:06:00 -0800516 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800517 seq_printf(seq, "%s\n", bat_algo_ops->name);
518 }
519
520 return 0;
521}
522
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200523/**
524 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
525 * the header
526 * @skb: skb pointing to fragmented socket buffers
527 * @payload_ptr: Pointer to position inside the head buffer of the skb
528 * marking the start of the data to be CRC'ed
529 *
530 * payload_ptr must always point to an address in the skb head buffer and not to
531 * a fragment.
532 */
533__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
534{
535 u32 crc = 0;
536 unsigned int from;
537 unsigned int to = skb->len;
538 struct skb_seq_state st;
539 const u8 *data;
540 unsigned int len;
541 unsigned int consumed = 0;
542
543 from = (unsigned int)(payload_ptr - skb->data);
544
545 skb_prepare_seq_read(skb, from, to, &st);
546 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
547 crc = crc32c(crc, data, len);
548 consumed += len;
549 }
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200550
551 return htonl(crc);
552}
553
Marek Lindneref261572013-04-23 21:39:57 +0800554/**
555 * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
556 * possibly free it
557 * @tvlv_handler: the tvlv handler to free
558 */
559static void
560batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
561{
562 if (atomic_dec_and_test(&tvlv_handler->refcount))
563 kfree_rcu(tvlv_handler, rcu);
564}
565
566/**
567 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
568 * based on the provided type and version (both need to match)
569 * @bat_priv: the bat priv with all the soft interface information
570 * @type: tvlv handler type to look for
571 * @version: tvlv handler version to look for
572 *
573 * Returns tvlv handler if found or NULL otherwise.
574 */
575static struct batadv_tvlv_handler
576*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
577 uint8_t type, uint8_t version)
578{
579 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
580
581 rcu_read_lock();
582 hlist_for_each_entry_rcu(tvlv_handler_tmp,
583 &bat_priv->tvlv.handler_list, list) {
584 if (tvlv_handler_tmp->type != type)
585 continue;
586
587 if (tvlv_handler_tmp->version != version)
588 continue;
589
590 if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
591 continue;
592
593 tvlv_handler = tvlv_handler_tmp;
594 break;
595 }
596 rcu_read_unlock();
597
598 return tvlv_handler;
599}
600
601/**
602 * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
603 * possibly free it
604 * @tvlv_handler: the tvlv container to free
605 */
606static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
607{
608 if (atomic_dec_and_test(&tvlv->refcount))
609 kfree(tvlv);
610}
611
612/**
613 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
614 * list based on the provided type and version (both need to match)
615 * @bat_priv: the bat priv with all the soft interface information
616 * @type: tvlv container type to look for
617 * @version: tvlv container version to look for
618 *
619 * Has to be called with the appropriate locks being acquired
620 * (tvlv.container_list_lock).
621 *
622 * Returns tvlv container if found or NULL otherwise.
623 */
624static struct batadv_tvlv_container
625*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
626 uint8_t type, uint8_t version)
627{
628 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
629
630 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
631 if (tvlv_tmp->tvlv_hdr.type != type)
632 continue;
633
634 if (tvlv_tmp->tvlv_hdr.version != version)
635 continue;
636
637 if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
638 continue;
639
640 tvlv = tvlv_tmp;
641 break;
642 }
643
644 return tvlv;
645}
646
647/**
648 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
649 * list entries
650 * @bat_priv: the bat priv with all the soft interface information
651 *
652 * Has to be called with the appropriate locks being acquired
653 * (tvlv.container_list_lock).
654 *
655 * Returns size of all currently registered tvlv containers in bytes.
656 */
657static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
658{
659 struct batadv_tvlv_container *tvlv;
660 uint16_t tvlv_len = 0;
661
662 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
663 tvlv_len += sizeof(struct batadv_tvlv_hdr);
664 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
665 }
666
667 return tvlv_len;
668}
669
670/**
671 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
672 * list
673 * @tvlv: the to be removed tvlv container
674 *
675 * Has to be called with the appropriate locks being acquired
676 * (tvlv.container_list_lock).
677 */
678static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
679{
680 if (!tvlv)
681 return;
682
683 hlist_del(&tvlv->list);
684
685 /* first call to decrement the counter, second call to free */
686 batadv_tvlv_container_free_ref(tvlv);
687 batadv_tvlv_container_free_ref(tvlv);
688}
689
690/**
691 * batadv_tvlv_container_unregister - unregister tvlv container based on the
692 * provided type and version (both need to match)
693 * @bat_priv: the bat priv with all the soft interface information
694 * @type: tvlv container type to unregister
695 * @version: tvlv container type to unregister
696 */
697void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
698 uint8_t type, uint8_t version)
699{
700 struct batadv_tvlv_container *tvlv;
701
702 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
703 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
704 batadv_tvlv_container_remove(tvlv);
705 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
706}
707
708/**
709 * batadv_tvlv_container_register - register tvlv type, version and content
710 * to be propagated with each (primary interface) OGM
711 * @bat_priv: the bat priv with all the soft interface information
712 * @type: tvlv container type
713 * @version: tvlv container version
714 * @tvlv_value: tvlv container content
715 * @tvlv_value_len: tvlv container content length
716 *
717 * If a container of the same type and version was already registered the new
718 * content is going to replace the old one.
719 */
720void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
721 uint8_t type, uint8_t version,
722 void *tvlv_value, uint16_t tvlv_value_len)
723{
724 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
725
726 if (!tvlv_value)
727 tvlv_value_len = 0;
728
729 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
730 if (!tvlv_new)
731 return;
732
733 tvlv_new->tvlv_hdr.version = version;
734 tvlv_new->tvlv_hdr.type = type;
735 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
736
737 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
738 INIT_HLIST_NODE(&tvlv_new->list);
739 atomic_set(&tvlv_new->refcount, 1);
740
741 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
742 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
743 batadv_tvlv_container_remove(tvlv_old);
744 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
745 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
746}
747
748/**
749 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
750 * requested packet size
751 * @packet_buff: packet buffer
752 * @packet_buff_len: packet buffer size
753 * @packet_min_len: requested packet minimum size
754 * @additional_packet_len: requested additional packet size on top of minimum
755 * size
756 *
757 * Returns true of the packet buffer could be changed to the requested size,
758 * false otherwise.
759 */
760static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
761 int *packet_buff_len,
762 int min_packet_len,
763 int additional_packet_len)
764{
765 unsigned char *new_buff;
766
767 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
768
769 /* keep old buffer if kmalloc should fail */
770 if (new_buff) {
771 memcpy(new_buff, *packet_buff, min_packet_len);
772 kfree(*packet_buff);
773 *packet_buff = new_buff;
774 *packet_buff_len = min_packet_len + additional_packet_len;
775 return true;
776 }
777
778 return false;
779}
780
781/**
782 * batadv_tvlv_container_ogm_append - append tvlv container content to given
783 * OGM packet buffer
784 * @bat_priv: the bat priv with all the soft interface information
785 * @packet_buff: ogm packet buffer
786 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
787 * content
788 * @packet_min_len: ogm header size to be preserved for the OGM itself
789 *
790 * The ogm packet might be enlarged or shrunk depending on the current size
791 * and the size of the to-be-appended tvlv containers.
792 *
793 * Returns size of all appended tvlv containers in bytes.
794 */
795uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
796 unsigned char **packet_buff,
797 int *packet_buff_len,
798 int packet_min_len)
799{
800 struct batadv_tvlv_container *tvlv;
801 struct batadv_tvlv_hdr *tvlv_hdr;
802 uint16_t tvlv_value_len;
803 void *tvlv_value;
804 bool ret;
805
806 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
807 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
808
809 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
810 packet_min_len, tvlv_value_len);
811
812 if (!ret)
813 goto end;
814
815 if (!tvlv_value_len)
816 goto end;
817
818 tvlv_value = (*packet_buff) + packet_min_len;
819
820 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
821 tvlv_hdr = tvlv_value;
822 tvlv_hdr->type = tvlv->tvlv_hdr.type;
823 tvlv_hdr->version = tvlv->tvlv_hdr.version;
824 tvlv_hdr->len = tvlv->tvlv_hdr.len;
825 tvlv_value = tvlv_hdr + 1;
826 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
827 tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
828 }
829
830end:
831 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
832 return tvlv_value_len;
833}
834
835/**
836 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
837 * appropriate handlers
838 * @bat_priv: the bat priv with all the soft interface information
839 * @tvlv_handler: tvlv callback function handling the tvlv content
840 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
841 * @orig_node: orig node emitting the ogm packet
842 * @src: source mac address of the unicast packet
843 * @dst: destination mac address of the unicast packet
844 * @tvlv_value: tvlv content
845 * @tvlv_value_len: tvlv content length
846 *
847 * Returns success if handler was not found or the return value of the handler
848 * callback.
849 */
850static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
851 struct batadv_tvlv_handler *tvlv_handler,
852 bool ogm_source,
853 struct batadv_orig_node *orig_node,
854 uint8_t *src, uint8_t *dst,
855 void *tvlv_value, uint16_t tvlv_value_len)
856{
857 if (!tvlv_handler)
858 return NET_RX_SUCCESS;
859
860 if (ogm_source) {
861 if (!tvlv_handler->ogm_handler)
862 return NET_RX_SUCCESS;
863
864 if (!orig_node)
865 return NET_RX_SUCCESS;
866
867 tvlv_handler->ogm_handler(bat_priv, orig_node,
868 BATADV_NO_FLAGS,
869 tvlv_value, tvlv_value_len);
870 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
871 } else {
872 if (!src)
873 return NET_RX_SUCCESS;
874
875 if (!dst)
876 return NET_RX_SUCCESS;
877
878 if (!tvlv_handler->unicast_handler)
879 return NET_RX_SUCCESS;
880
881 return tvlv_handler->unicast_handler(bat_priv, src,
882 dst, tvlv_value,
883 tvlv_value_len);
884 }
885
886 return NET_RX_SUCCESS;
887}
888
889/**
890 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
891 * appropriate handlers
892 * @bat_priv: the bat priv with all the soft interface information
893 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
894 * @orig_node: orig node emitting the ogm packet
895 * @src: source mac address of the unicast packet
896 * @dst: destination mac address of the unicast packet
897 * @tvlv_value: tvlv content
898 * @tvlv_value_len: tvlv content length
899 *
900 * Returns success when processing an OGM or the return value of all called
901 * handler callbacks.
902 */
903int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
904 bool ogm_source,
905 struct batadv_orig_node *orig_node,
906 uint8_t *src, uint8_t *dst,
907 void *tvlv_value, uint16_t tvlv_value_len)
908{
909 struct batadv_tvlv_handler *tvlv_handler;
910 struct batadv_tvlv_hdr *tvlv_hdr;
911 uint16_t tvlv_value_cont_len;
912 uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
913 int ret = NET_RX_SUCCESS;
914
915 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
916 tvlv_hdr = tvlv_value;
917 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
918 tvlv_value = tvlv_hdr + 1;
919 tvlv_value_len -= sizeof(*tvlv_hdr);
920
921 if (tvlv_value_cont_len > tvlv_value_len)
922 break;
923
924 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
925 tvlv_hdr->type,
926 tvlv_hdr->version);
927
928 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
929 ogm_source, orig_node,
930 src, dst, tvlv_value,
931 tvlv_value_cont_len);
932 if (tvlv_handler)
933 batadv_tvlv_handler_free_ref(tvlv_handler);
934 tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
935 tvlv_value_len -= tvlv_value_cont_len;
936 }
937
938 if (!ogm_source)
939 return ret;
940
941 rcu_read_lock();
942 hlist_for_each_entry_rcu(tvlv_handler,
943 &bat_priv->tvlv.handler_list, list) {
944 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
945 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
946 tvlv_handler->ogm_handler(bat_priv, orig_node,
947 cifnotfound, NULL, 0);
948
949 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
950 }
951 rcu_read_unlock();
952
953 return NET_RX_SUCCESS;
954}
955
956/**
957 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
958 * handlers
959 * @bat_priv: the bat priv with all the soft interface information
960 * @batadv_ogm_packet: ogm packet containing the tvlv containers
961 * @orig_node: orig node emitting the ogm packet
962 */
963void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
964 struct batadv_ogm_packet *batadv_ogm_packet,
965 struct batadv_orig_node *orig_node)
966{
967 void *tvlv_value;
968 uint16_t tvlv_value_len;
969
970 if (!batadv_ogm_packet)
971 return;
972
973 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
974 if (!tvlv_value_len)
975 return;
976
977 tvlv_value = batadv_ogm_packet + 1;
978
979 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
980 tvlv_value, tvlv_value_len);
981}
982
983/**
984 * batadv_tvlv_handler_register - register tvlv handler based on the provided
985 * type and version (both need to match) for ogm tvlv payload and/or unicast
986 * payload
987 * @bat_priv: the bat priv with all the soft interface information
988 * @optr: ogm tvlv handler callback function. This function receives the orig
989 * node, flags and the tvlv content as argument to process.
990 * @uptr: unicast tvlv handler callback function. This function receives the
991 * source & destination of the unicast packet as well as the tvlv content
992 * to process.
993 * @type: tvlv handler type to be registered
994 * @version: tvlv handler version to be registered
995 * @flags: flags to enable or disable TVLV API behavior
996 */
997void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
998 void (*optr)(struct batadv_priv *bat_priv,
999 struct batadv_orig_node *orig,
1000 uint8_t flags,
1001 void *tvlv_value,
1002 uint16_t tvlv_value_len),
1003 int (*uptr)(struct batadv_priv *bat_priv,
1004 uint8_t *src, uint8_t *dst,
1005 void *tvlv_value,
1006 uint16_t tvlv_value_len),
1007 uint8_t type, uint8_t version, uint8_t flags)
1008{
1009 struct batadv_tvlv_handler *tvlv_handler;
1010
1011 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1012 if (tvlv_handler) {
1013 batadv_tvlv_handler_free_ref(tvlv_handler);
1014 return;
1015 }
1016
1017 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1018 if (!tvlv_handler)
1019 return;
1020
1021 tvlv_handler->ogm_handler = optr;
1022 tvlv_handler->unicast_handler = uptr;
1023 tvlv_handler->type = type;
1024 tvlv_handler->version = version;
1025 tvlv_handler->flags = flags;
1026 atomic_set(&tvlv_handler->refcount, 1);
1027 INIT_HLIST_NODE(&tvlv_handler->list);
1028
1029 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1030 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1031 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1032}
1033
1034/**
1035 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1036 * provided type and version (both need to match)
1037 * @bat_priv: the bat priv with all the soft interface information
1038 * @type: tvlv handler type to be unregistered
1039 * @version: tvlv handler version to be unregistered
1040 */
1041void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
1042 uint8_t type, uint8_t version)
1043{
1044 struct batadv_tvlv_handler *tvlv_handler;
1045
1046 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1047 if (!tvlv_handler)
1048 return;
1049
1050 batadv_tvlv_handler_free_ref(tvlv_handler);
1051 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1052 hlist_del_rcu(&tvlv_handler->list);
1053 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1054 batadv_tvlv_handler_free_ref(tvlv_handler);
1055}
1056
1057/**
1058 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1059 * specified host
1060 * @bat_priv: the bat priv with all the soft interface information
1061 * @src: source mac address of the unicast packet
1062 * @dst: destination mac address of the unicast packet
1063 * @type: tvlv type
1064 * @version: tvlv version
1065 * @tvlv_value: tvlv content
1066 * @tvlv_value_len: tvlv content length
1067 */
1068void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
1069 uint8_t *dst, uint8_t type, uint8_t version,
1070 void *tvlv_value, uint16_t tvlv_value_len)
1071{
1072 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1073 struct batadv_tvlv_hdr *tvlv_hdr;
1074 struct batadv_orig_node *orig_node;
1075 struct sk_buff *skb = NULL;
1076 unsigned char *tvlv_buff;
1077 unsigned int tvlv_len;
1078 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
1079 bool ret = false;
1080
1081 orig_node = batadv_orig_hash_find(bat_priv, dst);
1082 if (!orig_node)
1083 goto out;
1084
1085 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1086
1087 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1088 if (!skb)
1089 goto out;
1090
1091 skb->priority = TC_PRIO_CONTROL;
1092 skb_reserve(skb, ETH_HLEN);
1093 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1094 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
1095 unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV;
1096 unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION;
1097 unicast_tvlv_packet->header.ttl = BATADV_TTL;
1098 unicast_tvlv_packet->reserved = 0;
1099 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1100 unicast_tvlv_packet->align = 0;
1101 memcpy(unicast_tvlv_packet->src, src, ETH_ALEN);
1102 memcpy(unicast_tvlv_packet->dst, dst, ETH_ALEN);
1103
1104 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1105 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1106 tvlv_hdr->version = version;
1107 tvlv_hdr->type = type;
1108 tvlv_hdr->len = htons(tvlv_value_len);
1109 tvlv_buff += sizeof(*tvlv_hdr);
1110 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1111
1112 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
1113 ret = true;
1114
1115out:
1116 if (skb && !ret)
1117 kfree_skb(skb);
1118 if (orig_node)
1119 batadv_orig_node_free_ref(orig_node);
1120}
1121
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001122static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
Marek Lindnerd419be12011-12-10 19:45:53 +08001123{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001124 struct batadv_algo_ops *bat_algo_ops;
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001125 char *algo_name = (char *)val;
1126 size_t name_len = strlen(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001127
Marek Lindner293c9c12013-04-27 16:22:28 +08001128 if (name_len > 0 && algo_name[name_len - 1] == '\n')
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001129 algo_name[name_len - 1] = '\0';
1130
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001131 bat_algo_ops = batadv_algo_get(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001132 if (!bat_algo_ops) {
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001133 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001134 return -EINVAL;
1135 }
1136
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001137 return param_set_copystring(algo_name, kp);
Marek Lindnerd419be12011-12-10 19:45:53 +08001138}
1139
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001140static const struct kernel_param_ops batadv_param_ops_ra = {
1141 .set = batadv_param_set_ra,
Marek Lindnerd419be12011-12-10 19:45:53 +08001142 .get = param_get_string,
1143};
1144
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001145static struct kparam_string batadv_param_string_ra = {
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001146 .maxlen = sizeof(batadv_routing_algo),
1147 .string = batadv_routing_algo,
Marek Lindnerd419be12011-12-10 19:45:53 +08001148};
1149
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001150module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1151 0644);
1152module_init(batadv_init);
1153module_exit(batadv_exit);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001154
1155MODULE_LICENSE("GPL");
1156
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001157MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
1158MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
1159MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
1160MODULE_VERSION(BATADV_SOURCE_VERSION);