blob: cd8f473c1bd0299e50f04a398626a4a43353b6fb [file] [log] [blame]
Marek Lindnerfc957272011-07-30 12:04:12 +02001/*
Sven Eckelmann567db7b2012-01-01 00:41:38 +01002 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
Marek Lindnerfc957272011-07-30 12:04:12 +02003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
Marek Lindnerfc957272011-07-30 12:04:12 +020023#include "translation-table.h"
24#include "ring_buffer.h"
25#include "originator.h"
26#include "routing.h"
27#include "gateway_common.h"
28#include "gateway_client.h"
29#include "hard-interface.h"
30#include "send.h"
Marek Lindner1c280472011-11-28 17:40:17 +080031#include "bat_algo.h"
Marek Lindnerfc957272011-07-30 12:04:12 +020032
Marek Lindner77af7572012-02-07 17:20:48 +080033static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020034{
35 struct batman_ogm_packet *batman_ogm_packet;
Marek Lindnerd7d32ec2012-02-07 17:20:46 +080036 uint32_t random_seqno;
Marek Lindner77af7572012-02-07 17:20:48 +080037 int res = -1;
Marek Lindnerd7d32ec2012-02-07 17:20:46 +080038
39 /* randomize initial seqno to avoid collision */
40 get_random_bytes(&random_seqno, sizeof(random_seqno));
41 atomic_set(&hard_iface->seqno, random_seqno);
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020042
Marek Lindner76e3d7f2012-02-07 17:20:50 +080043 hard_iface->packet_len = BATMAN_OGM_HLEN;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020044 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
45
Marek Lindner77af7572012-02-07 17:20:48 +080046 if (!hard_iface->packet_buff)
47 goto out;
48
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020049 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
Marek Lindner1eeb4792012-02-07 17:20:51 +080050 batman_ogm_packet->header.packet_type = BAT_IV_OGM;
Sven Eckelmann76543d12011-11-20 15:47:38 +010051 batman_ogm_packet->header.version = COMPAT_VERSION;
52 batman_ogm_packet->header.ttl = 2;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020053 batman_ogm_packet->flags = NO_FLAGS;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020054 batman_ogm_packet->tq = TQ_MAX_VALUE;
55 batman_ogm_packet->tt_num_changes = 0;
56 batman_ogm_packet->ttvn = 0;
Marek Lindner77af7572012-02-07 17:20:48 +080057
58 res = 0;
59
60out:
61 return res;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020062}
63
Marek Lindner00a50072012-02-07 17:20:47 +080064static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface)
65{
66 kfree(hard_iface->packet_buff);
67 hard_iface->packet_buff = NULL;
68}
69
Marek Lindnercd8b78e2012-02-07 17:20:49 +080070static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020071{
72 struct batman_ogm_packet *batman_ogm_packet;
73
74 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
75 batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
Sven Eckelmann76543d12011-11-20 15:47:38 +010076 batman_ogm_packet->header.ttl = TTL;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020077}
78
Marek Lindner01c42242011-11-28 21:31:55 +080079static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface)
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020080{
81 struct batman_ogm_packet *batman_ogm_packet;
82
83 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
84 memcpy(batman_ogm_packet->orig,
85 hard_iface->net_dev->dev_addr, ETH_ALEN);
86 memcpy(batman_ogm_packet->prev_sender,
87 hard_iface->net_dev->dev_addr, ETH_ALEN);
88}
89
Marek Lindnerb9dacc52011-08-03 09:09:30 +020090/* when do we schedule our own ogm to be sent */
Marek Lindner01c42242011-11-28 21:31:55 +080091static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
Marek Lindnerb9dacc52011-08-03 09:09:30 +020092{
93 return jiffies + msecs_to_jiffies(
94 atomic_read(&bat_priv->orig_interval) -
95 JITTER + (random32() % 2*JITTER));
96}
97
98/* when do we schedule a ogm packet to be sent */
Marek Lindner01c42242011-11-28 21:31:55 +080099static unsigned long bat_iv_ogm_fwd_send_time(void)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200100{
101 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
102}
103
104/* apply hop penalty for a normal link */
105static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
106{
107 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
108 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
109}
110
Marek Lindnerfc957272011-07-30 12:04:12 +0200111/* is there another aggregated packet here? */
Marek Lindner01c42242011-11-28 21:31:55 +0800112static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
113 int tt_num_changes)
Marek Lindnerfc957272011-07-30 12:04:12 +0200114{
Marek Lindner76e3d7f2012-02-07 17:20:50 +0800115 int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes);
Marek Lindnerfc957272011-07-30 12:04:12 +0200116
117 return (next_buff_pos <= packet_len) &&
118 (next_buff_pos <= MAX_AGGREGATION_BYTES);
119}
120
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200121/* send a batman ogm to a given interface */
Marek Lindner01c42242011-11-28 21:31:55 +0800122static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
123 struct hard_iface *hard_iface)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200124{
125 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
126 char *fwd_str;
127 uint8_t packet_num;
128 int16_t buff_pos;
129 struct batman_ogm_packet *batman_ogm_packet;
130 struct sk_buff *skb;
131
132 if (hard_iface->if_status != IF_ACTIVE)
133 return;
134
135 packet_num = 0;
136 buff_pos = 0;
137 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
138
139 /* adjust all flags and log packets */
Marek Lindner01c42242011-11-28 21:31:55 +0800140 while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
141 batman_ogm_packet->tt_num_changes)) {
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200142
143 /* we might have aggregated direct link packets with an
144 * ordinary base packet */
145 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
146 (forw_packet->if_incoming == hard_iface))
147 batman_ogm_packet->flags |= DIRECTLINK;
148 else
149 batman_ogm_packet->flags &= ~DIRECTLINK;
150
151 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
152 "Sending own" :
153 "Forwarding"));
154 bat_dbg(DBG_BATMAN, bat_priv,
Antonio Quartullic97c72b2012-02-26 15:39:41 +0100155 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200156 fwd_str, (packet_num > 0 ? "aggregated " : ""),
157 batman_ogm_packet->orig,
158 ntohl(batman_ogm_packet->seqno),
Sven Eckelmann76543d12011-11-20 15:47:38 +0100159 batman_ogm_packet->tq, batman_ogm_packet->header.ttl,
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200160 (batman_ogm_packet->flags & DIRECTLINK ?
161 "on" : "off"),
162 batman_ogm_packet->ttvn, hard_iface->net_dev->name,
163 hard_iface->net_dev->dev_addr);
164
Marek Lindner76e3d7f2012-02-07 17:20:50 +0800165 buff_pos += BATMAN_OGM_HLEN +
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200166 tt_len(batman_ogm_packet->tt_num_changes);
167 packet_num++;
168 batman_ogm_packet = (struct batman_ogm_packet *)
169 (forw_packet->skb->data + buff_pos);
170 }
171
172 /* create clone because function is called more than once */
173 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
174 if (skb)
175 send_skb_packet(skb, hard_iface, broadcast_addr);
176}
177
178/* send a batman ogm packet */
Marek Lindner01c42242011-11-28 21:31:55 +0800179static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200180{
181 struct hard_iface *hard_iface;
182 struct net_device *soft_iface;
183 struct bat_priv *bat_priv;
184 struct hard_iface *primary_if = NULL;
185 struct batman_ogm_packet *batman_ogm_packet;
186 unsigned char directlink;
187
188 batman_ogm_packet = (struct batman_ogm_packet *)
189 (forw_packet->skb->data);
190 directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
191
192 if (!forw_packet->if_incoming) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100193 pr_err("Error - can't forward packet: incoming iface not specified\n");
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200194 goto out;
195 }
196
197 soft_iface = forw_packet->if_incoming->soft_iface;
198 bat_priv = netdev_priv(soft_iface);
199
200 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
201 goto out;
202
203 primary_if = primary_if_get_selected(bat_priv);
204 if (!primary_if)
205 goto out;
206
207 /* multihomed peer assumed */
208 /* non-primary OGMs are only broadcasted on their interface */
Sven Eckelmann76543d12011-11-20 15:47:38 +0100209 if ((directlink && (batman_ogm_packet->header.ttl == 1)) ||
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200210 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
211
212 /* FIXME: what about aggregated packets ? */
213 bat_dbg(DBG_BATMAN, bat_priv,
Antonio Quartullic97c72b2012-02-26 15:39:41 +0100214 "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200215 (forw_packet->own ? "Sending own" : "Forwarding"),
216 batman_ogm_packet->orig,
217 ntohl(batman_ogm_packet->seqno),
Sven Eckelmann76543d12011-11-20 15:47:38 +0100218 batman_ogm_packet->header.ttl,
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200219 forw_packet->if_incoming->net_dev->name,
220 forw_packet->if_incoming->net_dev->dev_addr);
221
222 /* skb is only used once and than forw_packet is free'd */
223 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
224 broadcast_addr);
225 forw_packet->skb = NULL;
226
227 goto out;
228 }
229
230 /* broadcast on every interface */
231 rcu_read_lock();
232 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
233 if (hard_iface->soft_iface != soft_iface)
234 continue;
235
Marek Lindner01c42242011-11-28 21:31:55 +0800236 bat_iv_ogm_send_to_if(forw_packet, hard_iface);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200237 }
238 rcu_read_unlock();
239
240out:
241 if (primary_if)
242 hardif_free_ref(primary_if);
243}
244
245/* return true if new_packet can be aggregated with forw_packet */
Marek Lindner01c42242011-11-28 21:31:55 +0800246static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200247 *new_batman_ogm_packet,
Marek Lindner01c42242011-11-28 21:31:55 +0800248 struct bat_priv *bat_priv,
249 int packet_len, unsigned long send_time,
250 bool directlink,
251 const struct hard_iface *if_incoming,
252 const struct forw_packet *forw_packet)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200253{
254 struct batman_ogm_packet *batman_ogm_packet;
255 int aggregated_bytes = forw_packet->packet_len + packet_len;
256 struct hard_iface *primary_if = NULL;
257 bool res = false;
258
259 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
260
261 /**
262 * we can aggregate the current packet to this aggregated packet
263 * if:
264 *
265 * - the send time is within our MAX_AGGREGATION_MS time
266 * - the resulting packet wont be bigger than
267 * MAX_AGGREGATION_BYTES
268 */
269
270 if (time_before(send_time, forw_packet->send_time) &&
271 time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
272 forw_packet->send_time) &&
273 (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
274
275 /**
276 * check aggregation compatibility
277 * -> direct link packets are broadcasted on
278 * their interface only
279 * -> aggregate packet if the current packet is
280 * a "global" packet as well as the base
281 * packet
282 */
283
284 primary_if = primary_if_get_selected(bat_priv);
285 if (!primary_if)
286 goto out;
287
288 /* packets without direct link flag and high TTL
289 * are flooded through the net */
290 if ((!directlink) &&
291 (!(batman_ogm_packet->flags & DIRECTLINK)) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +0100292 (batman_ogm_packet->header.ttl != 1) &&
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200293
294 /* own packets originating non-primary
295 * interfaces leave only that interface */
296 ((!forw_packet->own) ||
297 (forw_packet->if_incoming == primary_if))) {
298 res = true;
299 goto out;
300 }
301
302 /* if the incoming packet is sent via this one
303 * interface only - we still can aggregate */
304 if ((directlink) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +0100305 (new_batman_ogm_packet->header.ttl == 1) &&
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200306 (forw_packet->if_incoming == if_incoming) &&
307
308 /* packets from direct neighbors or
309 * own secondary interface packets
310 * (= secondary interface packets in general) */
311 (batman_ogm_packet->flags & DIRECTLINK ||
312 (forw_packet->own &&
313 forw_packet->if_incoming != primary_if))) {
314 res = true;
315 goto out;
316 }
317 }
318
319out:
320 if (primary_if)
321 hardif_free_ref(primary_if);
322 return res;
323}
324
325/* create a new aggregated packet and add this packet to it */
Marek Lindner01c42242011-11-28 21:31:55 +0800326static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
327 int packet_len, unsigned long send_time,
328 bool direct_link,
329 struct hard_iface *if_incoming,
330 int own_packet)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200331{
332 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
333 struct forw_packet *forw_packet_aggr;
334 unsigned char *skb_buff;
335
336 if (!atomic_inc_not_zero(&if_incoming->refcount))
337 return;
338
339 /* own packet should always be scheduled */
340 if (!own_packet) {
341 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
342 bat_dbg(DBG_BATMAN, bat_priv,
343 "batman packet queue full\n");
344 goto out;
345 }
346 }
347
348 forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
349 if (!forw_packet_aggr) {
350 if (!own_packet)
351 atomic_inc(&bat_priv->batman_queue_left);
352 goto out;
353 }
354
355 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
356 (packet_len < MAX_AGGREGATION_BYTES))
357 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
Antonio Quartulli0d125072012-02-18 11:27:34 +0100358 ETH_HLEN);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200359 else
Antonio Quartulli0d125072012-02-18 11:27:34 +0100360 forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200361
362 if (!forw_packet_aggr->skb) {
363 if (!own_packet)
364 atomic_inc(&bat_priv->batman_queue_left);
365 kfree(forw_packet_aggr);
366 goto out;
367 }
Antonio Quartulli0d125072012-02-18 11:27:34 +0100368 skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200369
370 INIT_HLIST_NODE(&forw_packet_aggr->list);
371
372 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
373 forw_packet_aggr->packet_len = packet_len;
374 memcpy(skb_buff, packet_buff, packet_len);
375
376 forw_packet_aggr->own = own_packet;
377 forw_packet_aggr->if_incoming = if_incoming;
378 forw_packet_aggr->num_packets = 0;
379 forw_packet_aggr->direct_link_flags = NO_FLAGS;
380 forw_packet_aggr->send_time = send_time;
381
382 /* save packet direct link flag status */
383 if (direct_link)
384 forw_packet_aggr->direct_link_flags |= 1;
385
386 /* add new packet to packet list */
387 spin_lock_bh(&bat_priv->forw_bat_list_lock);
388 hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
389 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
390
391 /* start timer for this packet */
392 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
393 send_outstanding_bat_ogm_packet);
394 queue_delayed_work(bat_event_workqueue,
395 &forw_packet_aggr->delayed_work,
396 send_time - jiffies);
397
398 return;
399out:
400 hardif_free_ref(if_incoming);
401}
402
403/* aggregate a new packet into the existing ogm packet */
Marek Lindner01c42242011-11-28 21:31:55 +0800404static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
405 const unsigned char *packet_buff,
406 int packet_len, bool direct_link)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200407{
408 unsigned char *skb_buff;
409
410 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
411 memcpy(skb_buff, packet_buff, packet_len);
412 forw_packet_aggr->packet_len += packet_len;
413 forw_packet_aggr->num_packets++;
414
415 /* save packet direct link flag status */
416 if (direct_link)
417 forw_packet_aggr->direct_link_flags |=
418 (1 << forw_packet_aggr->num_packets);
419}
420
Marek Lindner01c42242011-11-28 21:31:55 +0800421static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
422 unsigned char *packet_buff,
423 int packet_len, struct hard_iface *if_incoming,
424 int own_packet, unsigned long send_time)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200425{
426 /**
427 * _aggr -> pointer to the packet we want to aggregate with
428 * _pos -> pointer to the position in the queue
429 */
430 struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
431 struct hlist_node *tmp_node;
432 struct batman_ogm_packet *batman_ogm_packet;
433 bool direct_link;
434
435 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
436 direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0;
437
438 /* find position for the packet in the forward queue */
439 spin_lock_bh(&bat_priv->forw_bat_list_lock);
440 /* own packets are not to be aggregated */
441 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
442 hlist_for_each_entry(forw_packet_pos, tmp_node,
443 &bat_priv->forw_bat_list, list) {
Marek Lindner01c42242011-11-28 21:31:55 +0800444 if (bat_iv_ogm_can_aggregate(batman_ogm_packet,
445 bat_priv, packet_len,
446 send_time, direct_link,
447 if_incoming,
448 forw_packet_pos)) {
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200449 forw_packet_aggr = forw_packet_pos;
450 break;
451 }
452 }
453 }
454
455 /* nothing to aggregate with - either aggregation disabled or no
456 * suitable aggregation packet found */
457 if (!forw_packet_aggr) {
458 /* the following section can run without the lock */
459 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
460
461 /**
462 * if we could not aggregate this packet with one of the others
463 * we hold it back for a while, so that it might be aggregated
464 * later on
465 */
466 if ((!own_packet) &&
467 (atomic_read(&bat_priv->aggregated_ogms)))
468 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
469
Marek Lindner01c42242011-11-28 21:31:55 +0800470 bat_iv_ogm_aggregate_new(packet_buff, packet_len,
471 send_time, direct_link,
472 if_incoming, own_packet);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200473 } else {
Marek Lindner01c42242011-11-28 21:31:55 +0800474 bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
475 packet_len, direct_link);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200476 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
477 }
478}
479
Marek Lindner01c42242011-11-28 21:31:55 +0800480static void bat_iv_ogm_forward(struct orig_node *orig_node,
481 const struct ethhdr *ethhdr,
482 struct batman_ogm_packet *batman_ogm_packet,
Marek Lindner75cd33f2012-03-01 15:35:16 +0800483 bool is_single_hop_neigh,
484 struct hard_iface *if_incoming)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200485{
486 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
487 struct neigh_node *router;
488 uint8_t in_tq, in_ttl, tq_avg = 0;
489 uint8_t tt_num_changes;
490
Sven Eckelmann76543d12011-11-20 15:47:38 +0100491 if (batman_ogm_packet->header.ttl <= 1) {
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200492 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
493 return;
494 }
495
496 router = orig_node_get_router(orig_node);
497
498 in_tq = batman_ogm_packet->tq;
Sven Eckelmann76543d12011-11-20 15:47:38 +0100499 in_ttl = batman_ogm_packet->header.ttl;
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200500 tt_num_changes = batman_ogm_packet->tt_num_changes;
501
Sven Eckelmann76543d12011-11-20 15:47:38 +0100502 batman_ogm_packet->header.ttl--;
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200503 memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
504
505 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
506 * of our best tq value */
507 if (router && router->tq_avg != 0) {
508
509 /* rebroadcast ogm of best ranking neighbor as is */
510 if (!compare_eth(router->addr, ethhdr->h_source)) {
511 batman_ogm_packet->tq = router->tq_avg;
512
513 if (router->last_ttl)
Sven Eckelmann76543d12011-11-20 15:47:38 +0100514 batman_ogm_packet->header.ttl =
515 router->last_ttl - 1;
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200516 }
517
518 tq_avg = router->tq_avg;
519 }
520
521 if (router)
522 neigh_node_free_ref(router);
523
524 /* apply hop penalty */
525 batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
526
527 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100528 "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200529 in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
Sven Eckelmann76543d12011-11-20 15:47:38 +0100530 batman_ogm_packet->header.ttl);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200531
532 batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
533 batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
534
535 /* switch of primaries first hop flag when forwarding */
536 batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
Marek Lindner75cd33f2012-03-01 15:35:16 +0800537 if (is_single_hop_neigh)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200538 batman_ogm_packet->flags |= DIRECTLINK;
539 else
540 batman_ogm_packet->flags &= ~DIRECTLINK;
541
Marek Lindner01c42242011-11-28 21:31:55 +0800542 bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
Marek Lindner76e3d7f2012-02-07 17:20:50 +0800543 BATMAN_OGM_HLEN + tt_len(tt_num_changes),
Marek Lindner01c42242011-11-28 21:31:55 +0800544 if_incoming, 0, bat_iv_ogm_fwd_send_time());
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200545}
546
Marek Lindner01c42242011-11-28 21:31:55 +0800547static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
548 int tt_num_changes)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200549{
550 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
551 struct batman_ogm_packet *batman_ogm_packet;
552 struct hard_iface *primary_if;
553 int vis_server;
554
555 vis_server = atomic_read(&bat_priv->vis_mode);
556 primary_if = primary_if_get_selected(bat_priv);
557
558 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
559
560 /* change sequence number to network order */
561 batman_ogm_packet->seqno =
562 htonl((uint32_t)atomic_read(&hard_iface->seqno));
563
564 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
565 batman_ogm_packet->tt_crc = htons((uint16_t)
566 atomic_read(&bat_priv->tt_crc));
567 if (tt_num_changes >= 0)
568 batman_ogm_packet->tt_num_changes = tt_num_changes;
569
570 if (vis_server == VIS_TYPE_SERVER_SYNC)
571 batman_ogm_packet->flags |= VIS_SERVER;
572 else
573 batman_ogm_packet->flags &= ~VIS_SERVER;
574
575 if ((hard_iface == primary_if) &&
576 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
577 batman_ogm_packet->gw_flags =
578 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
579 else
580 batman_ogm_packet->gw_flags = NO_FLAGS;
581
582 atomic_inc(&hard_iface->seqno);
583
584 slide_own_bcast_window(hard_iface);
Marek Lindner01c42242011-11-28 21:31:55 +0800585 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
586 hard_iface->packet_len, hard_iface, 1,
587 bat_iv_ogm_emit_send_time(bat_priv));
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200588
589 if (primary_if)
590 hardif_free_ref(primary_if);
591}
592
Marek Lindner01c42242011-11-28 21:31:55 +0800593static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
594 struct orig_node *orig_node,
595 const struct ethhdr *ethhdr,
596 const struct batman_ogm_packet
Marek Lindnerfc957272011-07-30 12:04:12 +0200597 *batman_ogm_packet,
Marek Lindner01c42242011-11-28 21:31:55 +0800598 struct hard_iface *if_incoming,
599 const unsigned char *tt_buff,
600 int is_duplicate)
Marek Lindnerfc957272011-07-30 12:04:12 +0200601{
602 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
603 struct neigh_node *router = NULL;
604 struct orig_node *orig_node_tmp;
605 struct hlist_node *node;
606 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
607
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100608 bat_dbg(DBG_BATMAN, bat_priv,
609 "update_originator(): Searching and updating originator entry of received packet\n");
Marek Lindnerfc957272011-07-30 12:04:12 +0200610
611 rcu_read_lock();
612 hlist_for_each_entry_rcu(tmp_neigh_node, node,
613 &orig_node->neigh_list, list) {
614 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
615 (tmp_neigh_node->if_incoming == if_incoming) &&
616 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
617 if (neigh_node)
618 neigh_node_free_ref(neigh_node);
619 neigh_node = tmp_neigh_node;
620 continue;
621 }
622
623 if (is_duplicate)
624 continue;
625
626 spin_lock_bh(&tmp_neigh_node->tq_lock);
627 ring_buffer_set(tmp_neigh_node->tq_recv,
628 &tmp_neigh_node->tq_index, 0);
629 tmp_neigh_node->tq_avg =
630 ring_buffer_avg(tmp_neigh_node->tq_recv);
631 spin_unlock_bh(&tmp_neigh_node->tq_lock);
632 }
633
634 if (!neigh_node) {
635 struct orig_node *orig_tmp;
636
637 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
638 if (!orig_tmp)
639 goto unlock;
640
641 neigh_node = create_neighbor(orig_node, orig_tmp,
642 ethhdr->h_source, if_incoming);
643
644 orig_node_free_ref(orig_tmp);
645 if (!neigh_node)
646 goto unlock;
647 } else
648 bat_dbg(DBG_BATMAN, bat_priv,
649 "Updating existing last-hop neighbor of originator\n");
650
651 rcu_read_unlock();
652
653 orig_node->flags = batman_ogm_packet->flags;
654 neigh_node->last_valid = jiffies;
655
656 spin_lock_bh(&neigh_node->tq_lock);
657 ring_buffer_set(neigh_node->tq_recv,
658 &neigh_node->tq_index,
659 batman_ogm_packet->tq);
660 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
661 spin_unlock_bh(&neigh_node->tq_lock);
662
663 if (!is_duplicate) {
Sven Eckelmann76543d12011-11-20 15:47:38 +0100664 orig_node->last_ttl = batman_ogm_packet->header.ttl;
665 neigh_node->last_ttl = batman_ogm_packet->header.ttl;
Marek Lindnerfc957272011-07-30 12:04:12 +0200666 }
667
668 bonding_candidate_add(orig_node, neigh_node);
669
670 /* if this neighbor already is our next hop there is nothing
671 * to change */
672 router = orig_node_get_router(orig_node);
673 if (router == neigh_node)
674 goto update_tt;
675
676 /* if this neighbor does not offer a better TQ we won't consider it */
677 if (router && (router->tq_avg > neigh_node->tq_avg))
678 goto update_tt;
679
680 /* if the TQ is the same and the link not more symmetric we
681 * won't consider it either */
682 if (router && (neigh_node->tq_avg == router->tq_avg)) {
683 orig_node_tmp = router->orig_node;
684 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
685 bcast_own_sum_orig =
686 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
687 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
688
689 orig_node_tmp = neigh_node->orig_node;
690 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
691 bcast_own_sum_neigh =
692 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
693 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
694
695 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
696 goto update_tt;
697 }
698
699 update_route(bat_priv, orig_node, neigh_node);
700
701update_tt:
702 /* I have to check for transtable changes only if the OGM has been
703 * sent through a primary interface */
704 if (((batman_ogm_packet->orig != ethhdr->h_source) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +0100705 (batman_ogm_packet->header.ttl > 2)) ||
Marek Lindnerfc957272011-07-30 12:04:12 +0200706 (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
707 tt_update_orig(bat_priv, orig_node, tt_buff,
708 batman_ogm_packet->tt_num_changes,
709 batman_ogm_packet->ttvn,
710 batman_ogm_packet->tt_crc);
711
712 if (orig_node->gw_flags != batman_ogm_packet->gw_flags)
713 gw_node_update(bat_priv, orig_node,
714 batman_ogm_packet->gw_flags);
715
716 orig_node->gw_flags = batman_ogm_packet->gw_flags;
717
718 /* restart gateway selection if fast or late switching was enabled */
719 if ((orig_node->gw_flags) &&
720 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
721 (atomic_read(&bat_priv->gw_sel_class) > 2))
722 gw_check_election(bat_priv, orig_node);
723
724 goto out;
725
726unlock:
727 rcu_read_unlock();
728out:
729 if (neigh_node)
730 neigh_node_free_ref(neigh_node);
731 if (router)
732 neigh_node_free_ref(router);
733}
734
Marek Lindner01c42242011-11-28 21:31:55 +0800735static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
736 struct orig_node *orig_neigh_node,
737 struct batman_ogm_packet *batman_ogm_packet,
738 struct hard_iface *if_incoming)
Marek Lindnerfc957272011-07-30 12:04:12 +0200739{
740 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
741 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
742 struct hlist_node *node;
743 uint8_t total_count;
744 uint8_t orig_eq_count, neigh_rq_count, tq_own;
745 int tq_asym_penalty, ret = 0;
746
747 /* find corresponding one hop neighbor */
748 rcu_read_lock();
749 hlist_for_each_entry_rcu(tmp_neigh_node, node,
750 &orig_neigh_node->neigh_list, list) {
751
752 if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
753 continue;
754
755 if (tmp_neigh_node->if_incoming != if_incoming)
756 continue;
757
758 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
759 continue;
760
761 neigh_node = tmp_neigh_node;
762 break;
763 }
764 rcu_read_unlock();
765
766 if (!neigh_node)
767 neigh_node = create_neighbor(orig_neigh_node,
768 orig_neigh_node,
769 orig_neigh_node->orig,
770 if_incoming);
771
772 if (!neigh_node)
773 goto out;
774
775 /* if orig_node is direct neighbor update neigh_node last_valid */
776 if (orig_node == orig_neigh_node)
777 neigh_node->last_valid = jiffies;
778
779 orig_node->last_valid = jiffies;
780
781 /* find packet count of corresponding one hop neighbor */
782 spin_lock_bh(&orig_node->ogm_cnt_lock);
783 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
784 neigh_rq_count = neigh_node->real_packet_count;
785 spin_unlock_bh(&orig_node->ogm_cnt_lock);
786
787 /* pay attention to not get a value bigger than 100 % */
788 total_count = (orig_eq_count > neigh_rq_count ?
789 neigh_rq_count : orig_eq_count);
790
791 /* if we have too few packets (too less data) we set tq_own to zero */
792 /* if we receive too few packets it is not considered bidirectional */
793 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
794 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
795 tq_own = 0;
796 else
797 /* neigh_node->real_packet_count is never zero as we
798 * only purge old information when getting new
799 * information */
800 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
801
Sven Eckelmann21a12362012-03-07 09:07:46 +0100802 /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
Marek Lindnerfc957272011-07-30 12:04:12 +0200803 * affect the nearly-symmetric links only a little, but
804 * punishes asymmetric links more. This will give a value
805 * between 0 and TQ_MAX_VALUE
806 */
807 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
808 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
809 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
810 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
811 (TQ_LOCAL_WINDOW_SIZE *
812 TQ_LOCAL_WINDOW_SIZE *
813 TQ_LOCAL_WINDOW_SIZE);
814
815 batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own
816 * tq_asym_penalty) /
817 (TQ_MAX_VALUE * TQ_MAX_VALUE));
818
819 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100820 "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
Marek Lindnerfc957272011-07-30 12:04:12 +0200821 orig_node->orig, orig_neigh_node->orig, total_count,
822 neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
823
824 /* if link has the minimum required transmission quality
825 * consider it bidirectional */
826 if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
827 ret = 1;
828
829out:
830 if (neigh_node)
831 neigh_node_free_ref(neigh_node);
832 return ret;
833}
834
835/* processes a batman packet for all interfaces, adjusts the sequence number and
836 * finds out whether it is a duplicate.
837 * returns:
838 * 1 the packet is a duplicate
839 * 0 the packet has not yet been received
840 * -1 the packet is old and has been received while the seqno window
841 * was protected. Caller should drop it.
842 */
Marek Lindner01c42242011-11-28 21:31:55 +0800843static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
844 const struct batman_ogm_packet
Marek Lindnerfc957272011-07-30 12:04:12 +0200845 *batman_ogm_packet,
Marek Lindner01c42242011-11-28 21:31:55 +0800846 const struct hard_iface *if_incoming)
Marek Lindnerfc957272011-07-30 12:04:12 +0200847{
848 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
849 struct orig_node *orig_node;
850 struct neigh_node *tmp_neigh_node;
851 struct hlist_node *node;
852 int is_duplicate = 0;
853 int32_t seq_diff;
854 int need_update = 0;
855 int set_mark, ret = -1;
856
857 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
858 if (!orig_node)
859 return 0;
860
861 spin_lock_bh(&orig_node->ogm_cnt_lock);
862 seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
863
864 /* signalize caller that the packet is to be dropped. */
Antonio Quartulli1e5cc262012-02-26 15:39:42 +0100865 if (!hlist_empty(&orig_node->neigh_list) &&
866 window_protected(bat_priv, seq_diff,
Marek Lindnerfc957272011-07-30 12:04:12 +0200867 &orig_node->batman_seqno_reset))
868 goto out;
869
870 rcu_read_lock();
871 hlist_for_each_entry_rcu(tmp_neigh_node, node,
872 &orig_node->neigh_list, list) {
873
Sven Eckelmann0079d2c2012-02-04 17:34:52 +0100874 is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
875 orig_node->last_real_seqno,
876 batman_ogm_packet->seqno);
Marek Lindnerfc957272011-07-30 12:04:12 +0200877
878 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
879 (tmp_neigh_node->if_incoming == if_incoming))
880 set_mark = 1;
881 else
882 set_mark = 0;
883
884 /* if the window moved, set the update flag. */
885 need_update |= bit_get_packet(bat_priv,
886 tmp_neigh_node->real_bits,
887 seq_diff, set_mark);
888
889 tmp_neigh_node->real_packet_count =
Sven Eckelmann0079d2c2012-02-04 17:34:52 +0100890 bitmap_weight(tmp_neigh_node->real_bits,
891 TQ_LOCAL_WINDOW_SIZE);
Marek Lindnerfc957272011-07-30 12:04:12 +0200892 }
893 rcu_read_unlock();
894
895 if (need_update) {
896 bat_dbg(DBG_BATMAN, bat_priv,
Antonio Quartullic97c72b2012-02-26 15:39:41 +0100897 "updating last_seqno: old %u, new %u\n",
Marek Lindnerfc957272011-07-30 12:04:12 +0200898 orig_node->last_real_seqno, batman_ogm_packet->seqno);
899 orig_node->last_real_seqno = batman_ogm_packet->seqno;
900 }
901
902 ret = is_duplicate;
903
904out:
905 spin_unlock_bh(&orig_node->ogm_cnt_lock);
906 orig_node_free_ref(orig_node);
907 return ret;
908}
909
Marek Lindner01c42242011-11-28 21:31:55 +0800910static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
911 struct batman_ogm_packet *batman_ogm_packet,
912 const unsigned char *tt_buff,
913 struct hard_iface *if_incoming)
Marek Lindnerfc957272011-07-30 12:04:12 +0200914{
915 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
916 struct hard_iface *hard_iface;
917 struct orig_node *orig_neigh_node, *orig_node;
918 struct neigh_node *router = NULL, *router_router = NULL;
919 struct neigh_node *orig_neigh_router = NULL;
920 int has_directlink_flag;
921 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
Marek Lindner75cd33f2012-03-01 15:35:16 +0800922 int is_broadcast = 0, is_bidirectional;
923 bool is_single_hop_neigh = false;
Marek Lindnerfc957272011-07-30 12:04:12 +0200924 int is_duplicate;
925 uint32_t if_incoming_seqno;
926
927 /* Silently drop when the batman packet is actually not a
928 * correct packet.
929 *
930 * This might happen if a packet is padded (e.g. Ethernet has a
931 * minimum frame length of 64 byte) and the aggregation interprets
932 * it as an additional length.
933 *
934 * TODO: A more sane solution would be to have a bit in the
935 * batman_ogm_packet to detect whether the packet is the last
936 * packet in an aggregation. Here we expect that the padding
937 * is always zero (or not 0x01)
938 */
Marek Lindner1eeb4792012-02-07 17:20:51 +0800939 if (batman_ogm_packet->header.packet_type != BAT_IV_OGM)
Marek Lindnerfc957272011-07-30 12:04:12 +0200940 return;
941
942 /* could be changed by schedule_own_packet() */
943 if_incoming_seqno = atomic_read(&if_incoming->seqno);
944
945 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
946
Marek Lindner75cd33f2012-03-01 15:35:16 +0800947 if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig))
948 is_single_hop_neigh = true;
Marek Lindnerfc957272011-07-30 12:04:12 +0200949
950 bat_dbg(DBG_BATMAN, bat_priv,
Antonio Quartullic97c72b2012-02-26 15:39:41 +0100951 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
Marek Lindnerfc957272011-07-30 12:04:12 +0200952 ethhdr->h_source, if_incoming->net_dev->name,
953 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
954 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
955 batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
956 batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
Sven Eckelmann76543d12011-11-20 15:47:38 +0100957 batman_ogm_packet->header.ttl,
958 batman_ogm_packet->header.version, has_directlink_flag);
Marek Lindnerfc957272011-07-30 12:04:12 +0200959
960 rcu_read_lock();
961 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
962 if (hard_iface->if_status != IF_ACTIVE)
963 continue;
964
965 if (hard_iface->soft_iface != if_incoming->soft_iface)
966 continue;
967
968 if (compare_eth(ethhdr->h_source,
969 hard_iface->net_dev->dev_addr))
970 is_my_addr = 1;
971
972 if (compare_eth(batman_ogm_packet->orig,
973 hard_iface->net_dev->dev_addr))
974 is_my_orig = 1;
975
976 if (compare_eth(batman_ogm_packet->prev_sender,
977 hard_iface->net_dev->dev_addr))
978 is_my_oldorig = 1;
979
980 if (is_broadcast_ether_addr(ethhdr->h_source))
981 is_broadcast = 1;
982 }
983 rcu_read_unlock();
984
Sven Eckelmann76543d12011-11-20 15:47:38 +0100985 if (batman_ogm_packet->header.version != COMPAT_VERSION) {
Marek Lindnerfc957272011-07-30 12:04:12 +0200986 bat_dbg(DBG_BATMAN, bat_priv,
987 "Drop packet: incompatible batman version (%i)\n",
Sven Eckelmann76543d12011-11-20 15:47:38 +0100988 batman_ogm_packet->header.version);
Marek Lindnerfc957272011-07-30 12:04:12 +0200989 return;
990 }
991
992 if (is_my_addr) {
993 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100994 "Drop packet: received my own broadcast (sender: %pM)\n",
Marek Lindnerfc957272011-07-30 12:04:12 +0200995 ethhdr->h_source);
996 return;
997 }
998
999 if (is_broadcast) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001000 bat_dbg(DBG_BATMAN, bat_priv,
1001 "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
1002 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +02001003 return;
1004 }
1005
1006 if (is_my_orig) {
1007 unsigned long *word;
1008 int offset;
1009
1010 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
1011 if (!orig_neigh_node)
1012 return;
1013
1014 /* neighbor has to indicate direct link and it has to
1015 * come via the corresponding interface */
1016 /* save packet seqno for bidirectional check */
1017 if (has_directlink_flag &&
1018 compare_eth(if_incoming->net_dev->dev_addr,
1019 batman_ogm_packet->orig)) {
1020 offset = if_incoming->if_num * NUM_WORDS;
1021
1022 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
1023 word = &(orig_neigh_node->bcast_own[offset]);
Sven Eckelmann0079d2c2012-02-04 17:34:52 +01001024 bat_set_bit(word,
1025 if_incoming_seqno -
Marek Lindnerfc957272011-07-30 12:04:12 +02001026 batman_ogm_packet->seqno - 2);
1027 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
Sven Eckelmann0079d2c2012-02-04 17:34:52 +01001028 bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
Marek Lindnerfc957272011-07-30 12:04:12 +02001029 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
1030 }
1031
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001032 bat_dbg(DBG_BATMAN, bat_priv,
1033 "Drop packet: originator packet from myself (via neighbor)\n");
Marek Lindnerfc957272011-07-30 12:04:12 +02001034 orig_node_free_ref(orig_neigh_node);
1035 return;
1036 }
1037
1038 if (is_my_oldorig) {
1039 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001040 "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
1041 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +02001042 return;
1043 }
1044
1045 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
1046 if (!orig_node)
1047 return;
1048
Marek Lindner01c42242011-11-28 21:31:55 +08001049 is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet,
1050 if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001051
1052 if (is_duplicate == -1) {
1053 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001054 "Drop packet: packet within seqno protection time (sender: %pM)\n",
1055 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +02001056 goto out;
1057 }
1058
1059 if (batman_ogm_packet->tq == 0) {
1060 bat_dbg(DBG_BATMAN, bat_priv,
1061 "Drop packet: originator packet with tq equal 0\n");
1062 goto out;
1063 }
1064
1065 router = orig_node_get_router(orig_node);
1066 if (router)
1067 router_router = orig_node_get_router(router->orig_node);
1068
1069 /* avoid temporary routing loops */
1070 if (router && router_router &&
1071 (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
1072 !(compare_eth(batman_ogm_packet->orig,
1073 batman_ogm_packet->prev_sender)) &&
1074 (compare_eth(router->addr, router_router->addr))) {
1075 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001076 "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
1077 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +02001078 goto out;
1079 }
1080
1081 /* if sender is a direct neighbor the sender mac equals
1082 * originator mac */
1083 orig_neigh_node = (is_single_hop_neigh ?
1084 orig_node :
1085 get_orig_node(bat_priv, ethhdr->h_source));
1086 if (!orig_neigh_node)
1087 goto out;
1088
1089 orig_neigh_router = orig_node_get_router(orig_neigh_node);
1090
1091 /* drop packet if sender is not a direct neighbor and if we
1092 * don't route towards it */
1093 if (!is_single_hop_neigh && (!orig_neigh_router)) {
1094 bat_dbg(DBG_BATMAN, bat_priv,
1095 "Drop packet: OGM via unknown neighbor!\n");
1096 goto out_neigh;
1097 }
1098
Marek Lindner01c42242011-11-28 21:31:55 +08001099 is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node,
1100 batman_ogm_packet, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001101
1102 bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
1103
1104 /* update ranking if it is not a duplicate or has the same
1105 * seqno and similar ttl as the non-duplicate */
1106 if (is_bidirectional &&
1107 (!is_duplicate ||
1108 ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +01001109 (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl))))
Marek Lindner01c42242011-11-28 21:31:55 +08001110 bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
1111 batman_ogm_packet, if_incoming,
1112 tt_buff, is_duplicate);
Marek Lindnerfc957272011-07-30 12:04:12 +02001113
1114 /* is single hop (direct) neighbor */
1115 if (is_single_hop_neigh) {
1116
1117 /* mark direct link on incoming interface */
Marek Lindner01c42242011-11-28 21:31:55 +08001118 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
Marek Lindner75cd33f2012-03-01 15:35:16 +08001119 is_single_hop_neigh, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001120
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001121 bat_dbg(DBG_BATMAN, bat_priv,
1122 "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
Marek Lindnerfc957272011-07-30 12:04:12 +02001123 goto out_neigh;
1124 }
1125
1126 /* multihop originator */
1127 if (!is_bidirectional) {
1128 bat_dbg(DBG_BATMAN, bat_priv,
1129 "Drop packet: not received via bidirectional link\n");
1130 goto out_neigh;
1131 }
1132
1133 if (is_duplicate) {
1134 bat_dbg(DBG_BATMAN, bat_priv,
1135 "Drop packet: duplicate packet received\n");
1136 goto out_neigh;
1137 }
1138
1139 bat_dbg(DBG_BATMAN, bat_priv,
1140 "Forwarding packet: rebroadcast originator packet\n");
Marek Lindner01c42242011-11-28 21:31:55 +08001141 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
Marek Lindner75cd33f2012-03-01 15:35:16 +08001142 is_single_hop_neigh, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001143
1144out_neigh:
1145 if ((orig_neigh_node) && (!is_single_hop_neigh))
1146 orig_node_free_ref(orig_neigh_node);
1147out:
1148 if (router)
1149 neigh_node_free_ref(router);
1150 if (router_router)
1151 neigh_node_free_ref(router_router);
1152 if (orig_neigh_router)
1153 neigh_node_free_ref(orig_neigh_router);
1154
1155 orig_node_free_ref(orig_node);
1156}
1157
Marek Lindner01c42242011-11-28 21:31:55 +08001158static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
1159 struct sk_buff *skb)
Marek Lindnerfc957272011-07-30 12:04:12 +02001160{
1161 struct batman_ogm_packet *batman_ogm_packet;
Marek Lindner8780dad2011-12-05 04:01:51 +08001162 struct ethhdr *ethhdr;
1163 int buff_pos = 0, packet_len;
1164 unsigned char *tt_buff, *packet_buff;
Marek Lindnerfc957272011-07-30 12:04:12 +02001165
Marek Lindner8780dad2011-12-05 04:01:51 +08001166 packet_len = skb_headlen(skb);
1167 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1168 packet_buff = skb->data;
Marek Lindnerfc957272011-07-30 12:04:12 +02001169 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
1170
1171 /* unpack the aggregated packets and process them one by one */
1172 do {
1173 /* network to host order for our 32bit seqno and the
1174 orig_interval */
1175 batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
1176 batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
1177
Marek Lindner76e3d7f2012-02-07 17:20:50 +08001178 tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
Marek Lindnerfc957272011-07-30 12:04:12 +02001179
Marek Lindner01c42242011-11-28 21:31:55 +08001180 bat_iv_ogm_process(ethhdr, batman_ogm_packet,
1181 tt_buff, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001182
Marek Lindner76e3d7f2012-02-07 17:20:50 +08001183 buff_pos += BATMAN_OGM_HLEN +
Marek Lindnerfc957272011-07-30 12:04:12 +02001184 tt_len(batman_ogm_packet->tt_num_changes);
1185
1186 batman_ogm_packet = (struct batman_ogm_packet *)
1187 (packet_buff + buff_pos);
Marek Lindner01c42242011-11-28 21:31:55 +08001188 } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
1189 batman_ogm_packet->tt_num_changes));
Marek Lindnerfc957272011-07-30 12:04:12 +02001190}
Marek Lindner1c280472011-11-28 17:40:17 +08001191
1192static struct bat_algo_ops batman_iv __read_mostly = {
1193 .name = "BATMAN IV",
Marek Lindnerc2aca022012-02-07 17:20:45 +08001194 .bat_iface_enable = bat_iv_ogm_iface_enable,
Marek Lindner00a50072012-02-07 17:20:47 +08001195 .bat_iface_disable = bat_iv_ogm_iface_disable,
Marek Lindnercd8b78e2012-02-07 17:20:49 +08001196 .bat_primary_iface_set = bat_iv_ogm_primary_iface_set,
Marek Lindner01c42242011-11-28 21:31:55 +08001197 .bat_ogm_update_mac = bat_iv_ogm_update_mac,
1198 .bat_ogm_schedule = bat_iv_ogm_schedule,
1199 .bat_ogm_emit = bat_iv_ogm_emit,
1200 .bat_ogm_receive = bat_iv_ogm_receive,
Marek Lindner1c280472011-11-28 17:40:17 +08001201};
1202
1203int __init bat_iv_init(void)
1204{
1205 return bat_algo_register(&batman_iv);
1206}