blob: d56d6b2e19241fb72e993fc79a1e5c954a3c9acb [file] [log] [blame]
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018 */
19
20#include "main.h"
21#include "bat_sysfs.h"
22#include "bat_debugfs.h"
23#include "routing.h"
24#include "send.h"
25#include "originator.h"
26#include "soft-interface.h"
27#include "icmp_socket.h"
28#include "translation-table.h"
29#include "hard-interface.h"
30#include "gateway_client.h"
Simon Wunderlich23721382012-01-22 20:00:19 +010031#include "bridge_loop_avoidance.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000032#include "vis.h"
33#include "hash.h"
Marek Lindner1c280472011-11-28 17:40:17 +080034#include "bat_algo.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000035
Sven Eckelmannc3caf512011-05-03 11:51:38 +020036
37/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020038 * list traversals just rcu-locked
39 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020040struct list_head batadv_hardif_list;
Marek Lindnerffa995e2012-03-01 15:35:17 +080041static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020042char batadv_routing_algo[20] = "BATMAN_IV";
Marek Lindner1c280472011-11-28 17:40:17 +080043static struct hlist_head bat_algo_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020045unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000046
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020047struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000048
Marek Lindnerffa995e2012-03-01 15:35:17 +080049static void recv_handler_init(void);
50
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000051static int __init batman_init(void)
52{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020053 INIT_LIST_HEAD(&batadv_hardif_list);
Marek Lindner1c280472011-11-28 17:40:17 +080054 INIT_HLIST_HEAD(&bat_algo_list);
55
Marek Lindnerffa995e2012-03-01 15:35:17 +080056 recv_handler_init();
57
Sven Eckelmann81c524f2012-05-12 02:09:22 +020058 batadv_iv_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000059
60 /* the name should not be longer than 10 chars - see
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020061 * http://lwn.net/Articles/23634/
62 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020063 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000064
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020065 if (!batadv_event_workqueue)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000066 return -ENOMEM;
67
Sven Eckelmann9039dc72012-05-12 02:09:33 +020068 batadv_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +020069 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000070
Sven Eckelmann95638772012-05-12 02:09:31 +020071 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000072
Sven Eckelmann86ceb362012-03-07 09:07:45 +010073 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
74 SOURCE_VERSION, COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000075
76 return 0;
77}
78
79static void __exit batman_exit(void)
80{
Sven Eckelmann40a072d2012-05-12 02:09:23 +020081 batadv_debugfs_destroy();
Sven Eckelmann95638772012-05-12 02:09:31 +020082 unregister_netdevice_notifier(&batadv_hard_if_notifier);
83 batadv_hardif_remove_interfaces();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000084
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020085 flush_workqueue(batadv_event_workqueue);
86 destroy_workqueue(batadv_event_workqueue);
87 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000088
89 rcu_barrier();
90}
91
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020092int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000093{
94 struct bat_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +020095 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000096
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000097 spin_lock_init(&bat_priv->forw_bat_list_lock);
98 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +020099 spin_lock_init(&bat_priv->tt_changes_list_lock);
100 spin_lock_init(&bat_priv->tt_req_list_lock);
Antonio Quartullicc47f662011-04-27 14:27:57 +0200101 spin_lock_init(&bat_priv->tt_roam_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200102 spin_lock_init(&bat_priv->tt_buff_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000103 spin_lock_init(&bat_priv->gw_list_lock);
104 spin_lock_init(&bat_priv->vis_hash_lock);
105 spin_lock_init(&bat_priv->vis_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000106
107 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
108 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
109 INIT_HLIST_HEAD(&bat_priv->gw_list);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200110 INIT_LIST_HEAD(&bat_priv->tt_changes_list);
111 INIT_LIST_HEAD(&bat_priv->tt_req_list);
Antonio Quartullicc47f662011-04-27 14:27:57 +0200112 INIT_LIST_HEAD(&bat_priv->tt_roam_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000113
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200114 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200115 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000116 goto err;
117
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200118 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200119 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000120 goto err;
121
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200122 batadv_tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000123
Sven Eckelmannd0f714f2012-05-12 02:09:41 +0200124 ret = batadv_vis_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200125 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000126 goto err;
127
Sven Eckelmann08adf152012-05-12 13:38:47 +0200128 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200129 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100130 goto err;
131
Antonio Quartulli2265c142011-04-27 00:22:00 +0200132 atomic_set(&bat_priv->gw_reselect, 0);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000133 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200134
135 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000136
137err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200138 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200139 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000140}
141
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200142void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000143{
144 struct bat_priv *bat_priv = netdev_priv(soft_iface);
145
146 atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
147
Sven Eckelmann9455e342012-05-12 02:09:37 +0200148 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000149
Sven Eckelmannd0f714f2012-05-12 02:09:41 +0200150 batadv_vis_quit(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000151
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200152 batadv_gw_node_purge(bat_priv);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200153 batadv_originator_free(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000154
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200155 batadv_tt_free(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000156
Sven Eckelmann08adf152012-05-12 13:38:47 +0200157 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100158
Martin Hundebøllf8214862012-04-20 17:02:45 +0200159 free_percpu(bat_priv->bat_counters);
160
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000161 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
162}
163
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200164void batadv_inc_module_count(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000165{
166 try_module_get(THIS_MODULE);
167}
168
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200169void batadv_dec_module_count(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000170{
171 module_put(THIS_MODULE);
172}
173
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200174int batadv_is_my_mac(const uint8_t *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000175{
Sven Eckelmann747e4222011-05-14 23:14:50 +0200176 const struct hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000177
178 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200179 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000180 if (hard_iface->if_status != IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000181 continue;
182
Marek Lindnere6c10f42011-02-18 12:33:20 +0000183 if (compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000184 rcu_read_unlock();
185 return 1;
186 }
187 }
188 rcu_read_unlock();
189 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190}
191
Marek Lindnerffa995e2012-03-01 15:35:17 +0800192static int recv_unhandled_packet(struct sk_buff *skb,
193 struct hard_iface *recv_if)
194{
195 return NET_RX_DROP;
196}
197
198/* incoming packets with the batman ethertype received on any active hard
199 * interface
200 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200201int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
202 struct packet_type *ptype,
203 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800204{
205 struct bat_priv *bat_priv;
206 struct batman_ogm_packet *batman_ogm_packet;
207 struct hard_iface *hard_iface;
208 uint8_t idx;
209 int ret;
210
211 hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
212 skb = skb_share_check(skb, GFP_ATOMIC);
213
214 /* skb was released by skb_share_check() */
215 if (!skb)
216 goto err_out;
217
218 /* packet should hold at least type and version */
219 if (unlikely(!pskb_may_pull(skb, 2)))
220 goto err_free;
221
222 /* expect a valid ethernet header here. */
223 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
224 goto err_free;
225
226 if (!hard_iface->soft_iface)
227 goto err_free;
228
229 bat_priv = netdev_priv(hard_iface->soft_iface);
230
231 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
232 goto err_free;
233
234 /* discard frames on not active interfaces */
235 if (hard_iface->if_status != IF_ACTIVE)
236 goto err_free;
237
238 batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
239
240 if (batman_ogm_packet->header.version != COMPAT_VERSION) {
241 bat_dbg(DBG_BATMAN, bat_priv,
242 "Drop packet: incompatible batman version (%i)\n",
243 batman_ogm_packet->header.version);
244 goto err_free;
245 }
246
247 /* all receive handlers return whether they received or reused
248 * the supplied skb. if not, we have to free the skb.
249 */
250 idx = batman_ogm_packet->header.packet_type;
251 ret = (*recv_packet_handler[idx])(skb, hard_iface);
252
253 if (ret == NET_RX_DROP)
254 kfree_skb(skb);
255
256 /* return NET_RX_SUCCESS in any case as we
257 * most probably dropped the packet for
258 * routing-logical reasons.
259 */
260 return NET_RX_SUCCESS;
261
262err_free:
263 kfree_skb(skb);
264err_out:
265 return NET_RX_DROP;
266}
267
268static void recv_handler_init(void)
269{
270 int i;
271
272 for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++)
273 recv_packet_handler[i] = recv_unhandled_packet;
274
Marek Lindnerffa995e2012-03-01 15:35:17 +0800275 /* batman icmp packet */
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200276 recv_packet_handler[BAT_ICMP] = batadv_recv_icmp_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800277 /* unicast packet */
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200278 recv_packet_handler[BAT_UNICAST] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800279 /* fragmented unicast packet */
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200280 recv_packet_handler[BAT_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800281 /* broadcast packet */
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200282 recv_packet_handler[BAT_BCAST] = batadv_recv_bcast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800283 /* vis packet */
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200284 recv_packet_handler[BAT_VIS] = batadv_recv_vis_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800285 /* Translation table query (request or response) */
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200286 recv_packet_handler[BAT_TT_QUERY] = batadv_recv_tt_query;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800287 /* Roaming advertisement */
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200288 recv_packet_handler[BAT_ROAM_ADV] = batadv_recv_roam_adv;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800289}
290
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200291int batadv_recv_handler_register(uint8_t packet_type,
292 int (*recv_handler)(struct sk_buff *,
293 struct hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800294{
295 if (recv_packet_handler[packet_type] != &recv_unhandled_packet)
296 return -EBUSY;
297
298 recv_packet_handler[packet_type] = recv_handler;
299 return 0;
300}
301
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200302void batadv_recv_handler_unregister(uint8_t packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800303{
304 recv_packet_handler[packet_type] = recv_unhandled_packet;
305}
306
Marek Lindner1c280472011-11-28 17:40:17 +0800307static struct bat_algo_ops *bat_algo_get(char *name)
308{
309 struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
310 struct hlist_node *node;
311
312 hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) {
313 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
314 continue;
315
316 bat_algo_ops = bat_algo_ops_tmp;
317 break;
318 }
319
320 return bat_algo_ops;
321}
322
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200323int batadv_algo_register(struct bat_algo_ops *bat_algo_ops)
Marek Lindner1c280472011-11-28 17:40:17 +0800324{
325 struct bat_algo_ops *bat_algo_ops_tmp;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200326 int ret;
Marek Lindner1c280472011-11-28 17:40:17 +0800327
328 bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name);
329 if (bat_algo_ops_tmp) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100330 pr_info("Trying to register already registered routing algorithm: %s\n",
331 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200332 ret = -EEXIST;
Marek Lindner1c280472011-11-28 17:40:17 +0800333 goto out;
334 }
335
Marek Lindner01c42242011-11-28 21:31:55 +0800336 /* all algorithms must implement all ops (for now) */
Marek Lindnerc2aca022012-02-07 17:20:45 +0800337 if (!bat_algo_ops->bat_iface_enable ||
Marek Lindner00a50072012-02-07 17:20:47 +0800338 !bat_algo_ops->bat_iface_disable ||
Marek Lindnerc3229392012-03-11 06:17:50 +0800339 !bat_algo_ops->bat_iface_update_mac ||
Marek Lindnercd8b78e2012-02-07 17:20:49 +0800340 !bat_algo_ops->bat_primary_iface_set ||
Marek Lindner01c42242011-11-28 21:31:55 +0800341 !bat_algo_ops->bat_ogm_schedule ||
Marek Lindnerc3e29312012-03-04 16:56:25 +0800342 !bat_algo_ops->bat_ogm_emit) {
Marek Lindner01c42242011-11-28 21:31:55 +0800343 pr_info("Routing algo '%s' does not implement required ops\n",
344 bat_algo_ops->name);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200345 ret = -EINVAL;
Marek Lindner01c42242011-11-28 21:31:55 +0800346 goto out;
347 }
348
Marek Lindner1c280472011-11-28 17:40:17 +0800349 INIT_HLIST_NODE(&bat_algo_ops->list);
350 hlist_add_head(&bat_algo_ops->list, &bat_algo_list);
351 ret = 0;
352
353out:
354 return ret;
355}
356
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200357int batadv_algo_select(struct bat_priv *bat_priv, char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800358{
359 struct bat_algo_ops *bat_algo_ops;
Sven Eckelmann5346c352012-05-05 13:27:28 +0200360 int ret = -EINVAL;
Marek Lindner1c280472011-11-28 17:40:17 +0800361
362 bat_algo_ops = bat_algo_get(name);
363 if (!bat_algo_ops)
364 goto out;
365
366 bat_priv->bat_algo_ops = bat_algo_ops;
367 ret = 0;
368
369out:
370 return ret;
371}
372
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200373int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
Marek Lindner1c280472011-11-28 17:40:17 +0800374{
375 struct bat_algo_ops *bat_algo_ops;
376 struct hlist_node *node;
377
378 seq_printf(seq, "Available routing algorithms:\n");
379
380 hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) {
381 seq_printf(seq, "%s\n", bat_algo_ops->name);
382 }
383
384 return 0;
385}
386
Marek Lindnerd419be12011-12-10 19:45:53 +0800387static int param_set_ra(const char *val, const struct kernel_param *kp)
388{
389 struct bat_algo_ops *bat_algo_ops;
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800390 char *algo_name = (char *)val;
391 size_t name_len = strlen(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +0800392
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800393 if (algo_name[name_len - 1] == '\n')
394 algo_name[name_len - 1] = '\0';
395
396 bat_algo_ops = bat_algo_get(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +0800397 if (!bat_algo_ops) {
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800398 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +0800399 return -EINVAL;
400 }
401
Marek Lindnerd8cb54862012-04-18 17:16:39 +0800402 return param_set_copystring(algo_name, kp);
Marek Lindnerd419be12011-12-10 19:45:53 +0800403}
404
405static const struct kernel_param_ops param_ops_ra = {
406 .set = param_set_ra,
407 .get = param_get_string,
408};
409
410static struct kparam_string __param_string_ra = {
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200411 .maxlen = sizeof(batadv_routing_algo),
412 .string = batadv_routing_algo,
Marek Lindnerd419be12011-12-10 19:45:53 +0800413};
414
415module_param_cb(routing_algo, &param_ops_ra, &__param_string_ra, 0644);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000416module_init(batman_init);
417module_exit(batman_exit);
418
419MODULE_LICENSE("GPL");
420
421MODULE_AUTHOR(DRIVER_AUTHOR);
422MODULE_DESCRIPTION(DRIVER_DESC);
423MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000424MODULE_VERSION(SOURCE_VERSION);