blob: 4a2e6e33ebc0949c7a8e04305b601c4b3dd6f3c4 [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann64afe352011-01-27 10:38:15 +01002 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "hard-interface.h"
24#include "soft-interface.h"
25#include "send.h"
26#include "translation-table.h"
27#include "routing.h"
28#include "bat_sysfs.h"
29#include "originator.h"
30#include "hash.h"
31
32#include <linux/if_arp.h>
33
Marek Lindner4389e472011-02-18 12:33:19 +000034/* protect update critical side of hardif_list - but not the content */
35static DEFINE_SPINLOCK(hardif_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000036
Sven Eckelmannfb86d762011-01-27 13:16:08 +010037
38static int batman_skb_recv(struct sk_buff *skb,
39 struct net_device *dev,
40 struct packet_type *ptype,
41 struct net_device *orig_dev);
42
Marek Lindnered75ccb2011-02-10 14:33:51 +000043void hardif_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044{
45 struct batman_if *batman_if;
46
47 batman_if = container_of(rcu, struct batman_if, rcu);
48 dev_put(batman_if->net_dev);
Marek Lindnered75ccb2011-02-10 14:33:51 +000049 kfree(batman_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000050}
51
52struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
53{
54 struct batman_if *batman_if;
55
56 rcu_read_lock();
Marek Lindner4389e472011-02-18 12:33:19 +000057 list_for_each_entry_rcu(batman_if, &hardif_list, list) {
Marek Lindnered75ccb2011-02-10 14:33:51 +000058 if (batman_if->net_dev == net_dev &&
59 atomic_inc_not_zero(&batman_if->refcount))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000060 goto out;
61 }
62
63 batman_if = NULL;
64
65out:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000066 rcu_read_unlock();
67 return batman_if;
68}
69
70static int is_valid_iface(struct net_device *net_dev)
71{
72 if (net_dev->flags & IFF_LOOPBACK)
73 return 0;
74
75 if (net_dev->type != ARPHRD_ETHER)
76 return 0;
77
78 if (net_dev->addr_len != ETH_ALEN)
79 return 0;
80
81 /* no batman over batman */
82#ifdef HAVE_NET_DEVICE_OPS
83 if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
84 return 0;
85#else
86 if (net_dev->hard_start_xmit == interface_tx)
87 return 0;
88#endif
89
90 /* Device is being bridged */
91 /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
92 return 0; */
93
94 return 1;
95}
96
97static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
98{
99 struct batman_if *batman_if;
100
101 rcu_read_lock();
Marek Lindner4389e472011-02-18 12:33:19 +0000102 list_for_each_entry_rcu(batman_if, &hardif_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000103 if (batman_if->soft_iface != soft_iface)
104 continue;
105
Marek Lindnered75ccb2011-02-10 14:33:51 +0000106 if (batman_if->if_status == IF_ACTIVE &&
107 atomic_inc_not_zero(&batman_if->refcount))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000108 goto out;
109 }
110
111 batman_if = NULL;
112
113out:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000114 rcu_read_unlock();
115 return batman_if;
116}
117
118static void update_primary_addr(struct bat_priv *bat_priv)
119{
120 struct vis_packet *vis_packet;
121
122 vis_packet = (struct vis_packet *)
123 bat_priv->my_vis_info->skb_packet->data;
124 memcpy(vis_packet->vis_orig,
125 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
126 memcpy(vis_packet->sender_orig,
127 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
128}
129
130static void set_primary_if(struct bat_priv *bat_priv,
131 struct batman_if *batman_if)
132{
133 struct batman_packet *batman_packet;
134 struct batman_if *old_if;
135
Marek Lindnered75ccb2011-02-10 14:33:51 +0000136 if (batman_if && !atomic_inc_not_zero(&batman_if->refcount))
137 batman_if = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000138
139 old_if = bat_priv->primary_if;
140 bat_priv->primary_if = batman_if;
141
142 if (old_if)
Marek Lindnered75ccb2011-02-10 14:33:51 +0000143 hardif_free_ref(old_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000144
145 if (!bat_priv->primary_if)
146 return;
147
148 batman_packet = (struct batman_packet *)(batman_if->packet_buff);
149 batman_packet->flags = PRIMARIES_FIRST_HOP;
150 batman_packet->ttl = TTL;
151
152 update_primary_addr(bat_priv);
153
154 /***
155 * hacky trick to make sure that we send the HNA information via
156 * our new primary interface
157 */
158 atomic_set(&bat_priv->hna_local_changed, 1);
159}
160
161static bool hardif_is_iface_up(struct batman_if *batman_if)
162{
163 if (batman_if->net_dev->flags & IFF_UP)
164 return true;
165
166 return false;
167}
168
169static void update_mac_addresses(struct batman_if *batman_if)
170{
171 memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
172 batman_if->net_dev->dev_addr, ETH_ALEN);
173 memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
174 batman_if->net_dev->dev_addr, ETH_ALEN);
175}
176
177static void check_known_mac_addr(struct net_device *net_dev)
178{
179 struct batman_if *batman_if;
180
181 rcu_read_lock();
Marek Lindner4389e472011-02-18 12:33:19 +0000182 list_for_each_entry_rcu(batman_if, &hardif_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000183 if ((batman_if->if_status != IF_ACTIVE) &&
184 (batman_if->if_status != IF_TO_BE_ACTIVATED))
185 continue;
186
187 if (batman_if->net_dev == net_dev)
188 continue;
189
Marek Lindner39901e72011-02-18 12:28:08 +0000190 if (!compare_eth(batman_if->net_dev->dev_addr,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000191 net_dev->dev_addr))
192 continue;
193
194 pr_warning("The newly added mac address (%pM) already exists "
195 "on: %s\n", net_dev->dev_addr,
196 batman_if->net_dev->name);
197 pr_warning("It is strongly recommended to keep mac addresses "
198 "unique to avoid problems!\n");
199 }
200 rcu_read_unlock();
201}
202
203int hardif_min_mtu(struct net_device *soft_iface)
204{
205 struct bat_priv *bat_priv = netdev_priv(soft_iface);
206 struct batman_if *batman_if;
207 /* allow big frames if all devices are capable to do so
208 * (have MTU > 1500 + BAT_HEADER_LEN) */
209 int min_mtu = ETH_DATA_LEN;
210
211 if (atomic_read(&bat_priv->fragmentation))
212 goto out;
213
214 rcu_read_lock();
Marek Lindner4389e472011-02-18 12:33:19 +0000215 list_for_each_entry_rcu(batman_if, &hardif_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000216 if ((batman_if->if_status != IF_ACTIVE) &&
217 (batman_if->if_status != IF_TO_BE_ACTIVATED))
218 continue;
219
220 if (batman_if->soft_iface != soft_iface)
221 continue;
222
223 min_mtu = min_t(int, batman_if->net_dev->mtu - BAT_HEADER_LEN,
224 min_mtu);
225 }
226 rcu_read_unlock();
227out:
228 return min_mtu;
229}
230
231/* adjusts the MTU if a new interface with a smaller MTU appeared. */
232void update_min_mtu(struct net_device *soft_iface)
233{
234 int min_mtu;
235
236 min_mtu = hardif_min_mtu(soft_iface);
237 if (soft_iface->mtu != min_mtu)
238 soft_iface->mtu = min_mtu;
239}
240
241static void hardif_activate_interface(struct batman_if *batman_if)
242{
243 struct bat_priv *bat_priv;
244
245 if (batman_if->if_status != IF_INACTIVE)
246 return;
247
248 bat_priv = netdev_priv(batman_if->soft_iface);
249
250 update_mac_addresses(batman_if);
251 batman_if->if_status = IF_TO_BE_ACTIVATED;
252
253 /**
254 * the first active interface becomes our primary interface or
255 * the next active interface after the old primay interface was removed
256 */
257 if (!bat_priv->primary_if)
258 set_primary_if(bat_priv, batman_if);
259
260 bat_info(batman_if->soft_iface, "Interface activated: %s\n",
261 batman_if->net_dev->name);
262
263 update_min_mtu(batman_if->soft_iface);
264 return;
265}
266
267static void hardif_deactivate_interface(struct batman_if *batman_if)
268{
269 if ((batman_if->if_status != IF_ACTIVE) &&
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000270 (batman_if->if_status != IF_TO_BE_ACTIVATED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000271 return;
272
273 batman_if->if_status = IF_INACTIVE;
274
275 bat_info(batman_if->soft_iface, "Interface deactivated: %s\n",
276 batman_if->net_dev->name);
277
278 update_min_mtu(batman_if->soft_iface);
279}
280
281int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
282{
283 struct bat_priv *bat_priv;
284 struct batman_packet *batman_packet;
285
286 if (batman_if->if_status != IF_NOT_IN_USE)
287 goto out;
288
Marek Lindnered75ccb2011-02-10 14:33:51 +0000289 if (!atomic_inc_not_zero(&batman_if->refcount))
290 goto out;
291
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000292 batman_if->soft_iface = dev_get_by_name(&init_net, iface_name);
293
294 if (!batman_if->soft_iface) {
295 batman_if->soft_iface = softif_create(iface_name);
296
297 if (!batman_if->soft_iface)
298 goto err;
299
300 /* dev_get_by_name() increases the reference counter for us */
301 dev_hold(batman_if->soft_iface);
302 }
303
304 bat_priv = netdev_priv(batman_if->soft_iface);
305 batman_if->packet_len = BAT_PACKET_LEN;
306 batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
307
308 if (!batman_if->packet_buff) {
309 bat_err(batman_if->soft_iface, "Can't add interface packet "
310 "(%s): out of memory\n", batman_if->net_dev->name);
311 goto err;
312 }
313
314 batman_packet = (struct batman_packet *)(batman_if->packet_buff);
315 batman_packet->packet_type = BAT_PACKET;
316 batman_packet->version = COMPAT_VERSION;
317 batman_packet->flags = 0;
318 batman_packet->ttl = 2;
319 batman_packet->tq = TQ_MAX_VALUE;
320 batman_packet->num_hna = 0;
321
322 batman_if->if_num = bat_priv->num_ifaces;
323 bat_priv->num_ifaces++;
324 batman_if->if_status = IF_INACTIVE;
325 orig_hash_add_if(batman_if, bat_priv->num_ifaces);
326
327 batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
328 batman_if->batman_adv_ptype.func = batman_skb_recv;
329 batman_if->batman_adv_ptype.dev = batman_if->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000330 dev_add_pack(&batman_if->batman_adv_ptype);
331
332 atomic_set(&batman_if->seqno, 1);
333 atomic_set(&batman_if->frag_seqno, 1);
334 bat_info(batman_if->soft_iface, "Adding interface: %s\n",
335 batman_if->net_dev->name);
336
337 if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
338 ETH_DATA_LEN + BAT_HEADER_LEN)
339 bat_info(batman_if->soft_iface,
340 "The MTU of interface %s is too small (%i) to handle "
341 "the transport of batman-adv packets. Packets going "
342 "over this interface will be fragmented on layer2 "
343 "which could impact the performance. Setting the MTU "
344 "to %zi would solve the problem.\n",
345 batman_if->net_dev->name, batman_if->net_dev->mtu,
346 ETH_DATA_LEN + BAT_HEADER_LEN);
347
348 if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
349 ETH_DATA_LEN + BAT_HEADER_LEN)
350 bat_info(batman_if->soft_iface,
351 "The MTU of interface %s is too small (%i) to handle "
352 "the transport of batman-adv packets. If you experience"
353 " problems getting traffic through try increasing the "
354 "MTU to %zi.\n",
355 batman_if->net_dev->name, batman_if->net_dev->mtu,
356 ETH_DATA_LEN + BAT_HEADER_LEN);
357
358 if (hardif_is_iface_up(batman_if))
359 hardif_activate_interface(batman_if);
360 else
361 bat_err(batman_if->soft_iface, "Not using interface %s "
362 "(retrying later): interface not active\n",
363 batman_if->net_dev->name);
364
365 /* begin scheduling originator messages on that interface */
366 schedule_own_packet(batman_if);
367
368out:
369 return 0;
370
371err:
Marek Lindnered75ccb2011-02-10 14:33:51 +0000372 hardif_free_ref(batman_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000373 return -ENOMEM;
374}
375
376void hardif_disable_interface(struct batman_if *batman_if)
377{
378 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
379
380 if (batman_if->if_status == IF_ACTIVE)
381 hardif_deactivate_interface(batman_if);
382
383 if (batman_if->if_status != IF_INACTIVE)
384 return;
385
386 bat_info(batman_if->soft_iface, "Removing interface: %s\n",
387 batman_if->net_dev->name);
388 dev_remove_pack(&batman_if->batman_adv_ptype);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000389
390 bat_priv->num_ifaces--;
391 orig_hash_del_if(batman_if, bat_priv->num_ifaces);
392
393 if (batman_if == bat_priv->primary_if) {
394 struct batman_if *new_if;
395
396 new_if = get_active_batman_if(batman_if->soft_iface);
397 set_primary_if(bat_priv, new_if);
398
399 if (new_if)
Marek Lindnered75ccb2011-02-10 14:33:51 +0000400 hardif_free_ref(new_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000401 }
402
403 kfree(batman_if->packet_buff);
404 batman_if->packet_buff = NULL;
405 batman_if->if_status = IF_NOT_IN_USE;
406
407 /* delete all references to this batman_if */
408 purge_orig_ref(bat_priv);
409 purge_outstanding_packets(bat_priv, batman_if);
410 dev_put(batman_if->soft_iface);
411
412 /* nobody uses this interface anymore */
413 if (!bat_priv->num_ifaces)
414 softif_destroy(batman_if->soft_iface);
415
416 batman_if->soft_iface = NULL;
Marek Lindnered75ccb2011-02-10 14:33:51 +0000417 hardif_free_ref(batman_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000418}
419
420static struct batman_if *hardif_add_interface(struct net_device *net_dev)
421{
422 struct batman_if *batman_if;
423 int ret;
424
425 ret = is_valid_iface(net_dev);
426 if (ret != 1)
427 goto out;
428
429 dev_hold(net_dev);
430
431 batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
432 if (!batman_if) {
433 pr_err("Can't add interface (%s): out of memory\n",
434 net_dev->name);
435 goto release_dev;
436 }
437
438 ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev);
439 if (ret)
440 goto free_if;
441
442 batman_if->if_num = -1;
443 batman_if->net_dev = net_dev;
444 batman_if->soft_iface = NULL;
445 batman_if->if_status = IF_NOT_IN_USE;
446 INIT_LIST_HEAD(&batman_if->list);
Marek Lindnered75ccb2011-02-10 14:33:51 +0000447 /* extra reference for return */
448 atomic_set(&batman_if->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000449
450 check_known_mac_addr(batman_if->net_dev);
451
Marek Lindner4389e472011-02-18 12:33:19 +0000452 spin_lock(&hardif_list_lock);
453 list_add_tail_rcu(&batman_if->list, &hardif_list);
454 spin_unlock(&hardif_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000455
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000456 return batman_if;
457
458free_if:
459 kfree(batman_if);
460release_dev:
461 dev_put(net_dev);
462out:
463 return NULL;
464}
465
466static void hardif_remove_interface(struct batman_if *batman_if)
467{
468 /* first deactivate interface */
469 if (batman_if->if_status != IF_NOT_IN_USE)
470 hardif_disable_interface(batman_if);
471
472 if (batman_if->if_status != IF_NOT_IN_USE)
473 return;
474
475 batman_if->if_status = IF_TO_BE_REMOVED;
476 sysfs_del_hardif(&batman_if->hardif_obj);
Marek Lindnered75ccb2011-02-10 14:33:51 +0000477 hardif_free_ref(batman_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000478}
479
480void hardif_remove_interfaces(void)
481{
482 struct batman_if *batman_if, *batman_if_tmp;
483 struct list_head if_queue;
484
485 INIT_LIST_HEAD(&if_queue);
486
Marek Lindner4389e472011-02-18 12:33:19 +0000487 spin_lock(&hardif_list_lock);
488 list_for_each_entry_safe(batman_if, batman_if_tmp, &hardif_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000489 list_del_rcu(&batman_if->list);
490 list_add_tail(&batman_if->list, &if_queue);
491 }
Marek Lindner4389e472011-02-18 12:33:19 +0000492 spin_unlock(&hardif_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000493
494 rtnl_lock();
495 list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) {
496 hardif_remove_interface(batman_if);
497 }
498 rtnl_unlock();
499}
500
501static int hard_if_event(struct notifier_block *this,
502 unsigned long event, void *ptr)
503{
504 struct net_device *net_dev = (struct net_device *)ptr;
505 struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
506 struct bat_priv *bat_priv;
507
508 if (!batman_if && event == NETDEV_REGISTER)
509 batman_if = hardif_add_interface(net_dev);
510
511 if (!batman_if)
512 goto out;
513
514 switch (event) {
515 case NETDEV_UP:
516 hardif_activate_interface(batman_if);
517 break;
518 case NETDEV_GOING_DOWN:
519 case NETDEV_DOWN:
520 hardif_deactivate_interface(batman_if);
521 break;
522 case NETDEV_UNREGISTER:
Marek Lindner4389e472011-02-18 12:33:19 +0000523 spin_lock(&hardif_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000524 list_del_rcu(&batman_if->list);
Marek Lindner4389e472011-02-18 12:33:19 +0000525 spin_unlock(&hardif_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000526
527 hardif_remove_interface(batman_if);
528 break;
529 case NETDEV_CHANGEMTU:
530 if (batman_if->soft_iface)
531 update_min_mtu(batman_if->soft_iface);
532 break;
533 case NETDEV_CHANGEADDR:
534 if (batman_if->if_status == IF_NOT_IN_USE)
535 goto hardif_put;
536
537 check_known_mac_addr(batman_if->net_dev);
538 update_mac_addresses(batman_if);
539
540 bat_priv = netdev_priv(batman_if->soft_iface);
541 if (batman_if == bat_priv->primary_if)
542 update_primary_addr(bat_priv);
543 break;
544 default:
545 break;
546 };
547
548hardif_put:
Marek Lindnered75ccb2011-02-10 14:33:51 +0000549 hardif_free_ref(batman_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000550out:
551 return NOTIFY_DONE;
552}
553
554/* receive a packet with the batman ethertype coming on a hard
555 * interface */
Sven Eckelmannfb86d762011-01-27 13:16:08 +0100556static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
557 struct packet_type *ptype,
558 struct net_device *orig_dev)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000559{
560 struct bat_priv *bat_priv;
561 struct batman_packet *batman_packet;
562 struct batman_if *batman_if;
563 int ret;
564
565 batman_if = container_of(ptype, struct batman_if, batman_adv_ptype);
566 skb = skb_share_check(skb, GFP_ATOMIC);
567
568 /* skb was released by skb_share_check() */
569 if (!skb)
570 goto err_out;
571
572 /* packet should hold at least type and version */
573 if (unlikely(!pskb_may_pull(skb, 2)))
574 goto err_free;
575
576 /* expect a valid ethernet header here. */
577 if (unlikely(skb->mac_len != sizeof(struct ethhdr)
578 || !skb_mac_header(skb)))
579 goto err_free;
580
581 if (!batman_if->soft_iface)
582 goto err_free;
583
584 bat_priv = netdev_priv(batman_if->soft_iface);
585
586 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
587 goto err_free;
588
589 /* discard frames on not active interfaces */
590 if (batman_if->if_status != IF_ACTIVE)
591 goto err_free;
592
593 batman_packet = (struct batman_packet *)skb->data;
594
595 if (batman_packet->version != COMPAT_VERSION) {
596 bat_dbg(DBG_BATMAN, bat_priv,
597 "Drop packet: incompatible batman version (%i)\n",
598 batman_packet->version);
599 goto err_free;
600 }
601
602 /* all receive handlers return whether they received or reused
603 * the supplied skb. if not, we have to free the skb. */
604
605 switch (batman_packet->packet_type) {
606 /* batman originator packet */
607 case BAT_PACKET:
608 ret = recv_bat_packet(skb, batman_if);
609 break;
610
611 /* batman icmp packet */
612 case BAT_ICMP:
613 ret = recv_icmp_packet(skb, batman_if);
614 break;
615
616 /* unicast packet */
617 case BAT_UNICAST:
618 ret = recv_unicast_packet(skb, batman_if);
619 break;
620
621 /* fragmented unicast packet */
622 case BAT_UNICAST_FRAG:
623 ret = recv_ucast_frag_packet(skb, batman_if);
624 break;
625
626 /* broadcast packet */
627 case BAT_BCAST:
628 ret = recv_bcast_packet(skb, batman_if);
629 break;
630
631 /* vis packet */
632 case BAT_VIS:
633 ret = recv_vis_packet(skb, batman_if);
634 break;
635 default:
636 ret = NET_RX_DROP;
637 }
638
639 if (ret == NET_RX_DROP)
640 kfree_skb(skb);
641
642 /* return NET_RX_SUCCESS in any case as we
643 * most probably dropped the packet for
644 * routing-logical reasons. */
645
646 return NET_RX_SUCCESS;
647
648err_free:
649 kfree_skb(skb);
650err_out:
651 return NET_RX_DROP;
652}
653
654struct notifier_block hard_if_notifier = {
655 .notifier_call = hard_if_event,
656};