blob: b54bf6ec637e1afb0c85812baf3fe750e85190cf [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann64afe352011-01-27 10:38:15 +01002 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "routing.h"
24#include "send.h"
25#include "hash.h"
26#include "soft-interface.h"
27#include "hard-interface.h"
28#include "icmp_socket.h"
29#include "translation-table.h"
30#include "originator.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000031#include "ring_buffer.h"
32#include "vis.h"
33#include "aggregation.h"
34#include "gateway_common.h"
35#include "gateway_client.h"
36#include "unicast.h"
37
38void slide_own_bcast_window(struct batman_if *batman_if)
39{
40 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
41 struct hashtable_t *hash = bat_priv->orig_hash;
42 struct hlist_node *walk;
43 struct hlist_head *head;
44 struct element_t *bucket;
45 struct orig_node *orig_node;
46 unsigned long *word;
47 int i;
48 size_t word_index;
49
50 spin_lock_bh(&bat_priv->orig_hash_lock);
51
52 for (i = 0; i < hash->size; i++) {
53 head = &hash->table[i];
54
Marek Lindnerfb778ea2011-01-19 20:01:40 +000055 rcu_read_lock();
56 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000057 orig_node = bucket->data;
Marek Lindner2ae2daf2011-01-19 20:01:42 +000058 spin_lock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000059 word_index = batman_if->if_num * NUM_WORDS;
60 word = &(orig_node->bcast_own[word_index]);
61
62 bit_get_packet(bat_priv, word, 1, 0);
63 orig_node->bcast_own_sum[batman_if->if_num] =
64 bit_packet_count(word);
Marek Lindner2ae2daf2011-01-19 20:01:42 +000065 spin_unlock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000066 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +000067 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000068 }
69
70 spin_unlock_bh(&bat_priv->orig_hash_lock);
71}
72
73static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
74 unsigned char *hna_buff, int hna_buff_len)
75{
76 if ((hna_buff_len != orig_node->hna_buff_len) ||
77 ((hna_buff_len > 0) &&
78 (orig_node->hna_buff_len > 0) &&
79 (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
80
81 if (orig_node->hna_buff_len > 0)
82 hna_global_del_orig(bat_priv, orig_node,
83 "originator changed hna");
84
85 if ((hna_buff_len > 0) && (hna_buff))
86 hna_global_add_orig(bat_priv, orig_node,
87 hna_buff, hna_buff_len);
88 }
89}
90
91static void update_route(struct bat_priv *bat_priv,
92 struct orig_node *orig_node,
93 struct neigh_node *neigh_node,
94 unsigned char *hna_buff, int hna_buff_len)
95{
Marek Lindnera8e7f4b2010-12-12 21:57:10 +000096 struct neigh_node *neigh_node_tmp;
97
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000098 /* route deleted */
99 if ((orig_node->router) && (!neigh_node)) {
100
101 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
102 orig_node->orig);
103 hna_global_del_orig(bat_priv, orig_node,
104 "originator timed out");
105
106 /* route added */
107 } else if ((!orig_node->router) && (neigh_node)) {
108
109 bat_dbg(DBG_ROUTES, bat_priv,
110 "Adding route towards: %pM (via %pM)\n",
111 orig_node->orig, neigh_node->addr);
112 hna_global_add_orig(bat_priv, orig_node,
113 hna_buff, hna_buff_len);
114
115 /* route changed */
116 } else {
117 bat_dbg(DBG_ROUTES, bat_priv,
118 "Changing route towards: %pM "
119 "(now via %pM - was via %pM)\n",
120 orig_node->orig, neigh_node->addr,
121 orig_node->router->addr);
122 }
123
Marek Lindner44524fc2011-02-10 14:33:53 +0000124 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
125 neigh_node = NULL;
Marek Lindnera8e7f4b2010-12-12 21:57:10 +0000126 neigh_node_tmp = orig_node->router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000127 orig_node->router = neigh_node;
Marek Lindnera8e7f4b2010-12-12 21:57:10 +0000128 if (neigh_node_tmp)
Marek Lindner44524fc2011-02-10 14:33:53 +0000129 neigh_node_free_ref(neigh_node_tmp);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000130}
131
132
133void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
134 struct neigh_node *neigh_node, unsigned char *hna_buff,
135 int hna_buff_len)
136{
137
138 if (!orig_node)
139 return;
140
141 if (orig_node->router != neigh_node)
142 update_route(bat_priv, orig_node, neigh_node,
143 hna_buff, hna_buff_len);
144 /* may be just HNA changed */
145 else
146 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
147}
148
149static int is_bidirectional_neigh(struct orig_node *orig_node,
150 struct orig_node *orig_neigh_node,
151 struct batman_packet *batman_packet,
152 struct batman_if *if_incoming)
153{
154 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
155 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
Marek Lindner9591a792010-12-12 21:57:11 +0000156 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000157 unsigned char total_count;
Marek Lindner0ede9f42011-01-25 21:52:10 +0000158 uint8_t orig_eq_count, neigh_rq_count, tq_own;
159 int tq_asym_penalty, ret = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000160
161 if (orig_node == orig_neigh_node) {
Marek Lindnerf987ed62010-12-12 21:57:12 +0000162 rcu_read_lock();
163 hlist_for_each_entry_rcu(tmp_neigh_node, node,
164 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000165
Marek Lindner39901e72011-02-18 12:28:08 +0000166 if (compare_eth(tmp_neigh_node->addr,
167 orig_neigh_node->orig) &&
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000168 (tmp_neigh_node->if_incoming == if_incoming))
169 neigh_node = tmp_neigh_node;
170 }
171
172 if (!neigh_node)
173 neigh_node = create_neighbor(orig_node,
174 orig_neigh_node,
175 orig_neigh_node->orig,
176 if_incoming);
177 /* create_neighbor failed, return 0 */
178 if (!neigh_node)
Marek Lindnera775eb82011-01-19 20:01:39 +0000179 goto unlock;
180
Marek Lindner44524fc2011-02-10 14:33:53 +0000181 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
182 neigh_node = NULL;
183 goto unlock;
184 }
185
Marek Lindnera775eb82011-01-19 20:01:39 +0000186 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000187
188 neigh_node->last_valid = jiffies;
189 } else {
190 /* find packet count of corresponding one hop neighbor */
Marek Lindnerf987ed62010-12-12 21:57:12 +0000191 rcu_read_lock();
192 hlist_for_each_entry_rcu(tmp_neigh_node, node,
193 &orig_neigh_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000194
Marek Lindner39901e72011-02-18 12:28:08 +0000195 if (compare_eth(tmp_neigh_node->addr,
196 orig_neigh_node->orig) &&
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000197 (tmp_neigh_node->if_incoming == if_incoming))
198 neigh_node = tmp_neigh_node;
199 }
200
201 if (!neigh_node)
202 neigh_node = create_neighbor(orig_neigh_node,
203 orig_neigh_node,
204 orig_neigh_node->orig,
205 if_incoming);
206 /* create_neighbor failed, return 0 */
207 if (!neigh_node)
Marek Lindnera775eb82011-01-19 20:01:39 +0000208 goto unlock;
209
Marek Lindner44524fc2011-02-10 14:33:53 +0000210 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
211 neigh_node = NULL;
212 goto unlock;
213 }
214
Marek Lindnera775eb82011-01-19 20:01:39 +0000215 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000216 }
217
218 orig_node->last_valid = jiffies;
219
Marek Lindner0ede9f42011-01-25 21:52:10 +0000220 spin_lock_bh(&orig_node->ogm_cnt_lock);
221 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
222 neigh_rq_count = neigh_node->real_packet_count;
223 spin_unlock_bh(&orig_node->ogm_cnt_lock);
224
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000225 /* pay attention to not get a value bigger than 100 % */
Marek Lindner0ede9f42011-01-25 21:52:10 +0000226 total_count = (orig_eq_count > neigh_rq_count ?
227 neigh_rq_count : orig_eq_count);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000228
229 /* if we have too few packets (too less data) we set tq_own to zero */
230 /* if we receive too few packets it is not considered bidirectional */
231 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
Marek Lindner0ede9f42011-01-25 21:52:10 +0000232 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
233 tq_own = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000234 else
235 /* neigh_node->real_packet_count is never zero as we
236 * only purge old information when getting new
237 * information */
Marek Lindner0ede9f42011-01-25 21:52:10 +0000238 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000239
240 /*
241 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
242 * affect the nearly-symmetric links only a little, but
243 * punishes asymmetric links more. This will give a value
244 * between 0 and TQ_MAX_VALUE
245 */
Marek Lindner0ede9f42011-01-25 21:52:10 +0000246 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
247 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
248 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
249 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
250 (TQ_LOCAL_WINDOW_SIZE *
251 TQ_LOCAL_WINDOW_SIZE *
252 TQ_LOCAL_WINDOW_SIZE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000253
Marek Lindner0ede9f42011-01-25 21:52:10 +0000254 batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
255 (TQ_MAX_VALUE * TQ_MAX_VALUE));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000256
257 bat_dbg(DBG_BATMAN, bat_priv,
258 "bidirectional: "
259 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
260 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
261 "total tq: %3i\n",
262 orig_node->orig, orig_neigh_node->orig, total_count,
Marek Lindner0ede9f42011-01-25 21:52:10 +0000263 neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000264
265 /* if link has the minimum required transmission quality
266 * consider it bidirectional */
267 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
Marek Lindnera775eb82011-01-19 20:01:39 +0000268 ret = 1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000269
Marek Lindnera775eb82011-01-19 20:01:39 +0000270 goto out;
271
272unlock:
273 rcu_read_unlock();
274out:
275 if (neigh_node)
Marek Lindner44524fc2011-02-10 14:33:53 +0000276 neigh_node_free_ref(neigh_node);
Marek Lindnera775eb82011-01-19 20:01:39 +0000277 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000278}
279
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000280/* caller must hold the neigh_list_lock */
281void bonding_candidate_del(struct orig_node *orig_node,
282 struct neigh_node *neigh_node)
283{
284 /* this neighbor is not part of our candidate list */
285 if (list_empty(&neigh_node->bonding_list))
286 goto out;
287
288 list_del_rcu(&neigh_node->bonding_list);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000289 INIT_LIST_HEAD(&neigh_node->bonding_list);
Marek Lindner44524fc2011-02-10 14:33:53 +0000290 neigh_node_free_ref(neigh_node);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000291 atomic_dec(&orig_node->bond_candidates);
292
293out:
294 return;
295}
296
297static void bonding_candidate_add(struct orig_node *orig_node,
298 struct neigh_node *neigh_node)
299{
300 struct hlist_node *node;
301 struct neigh_node *tmp_neigh_node;
302 uint8_t best_tq, interference_candidate = 0;
303
304 spin_lock_bh(&orig_node->neigh_list_lock);
305
306 /* only consider if it has the same primary address ... */
Marek Lindner39901e72011-02-18 12:28:08 +0000307 if (!compare_eth(orig_node->orig,
308 neigh_node->orig_node->primary_addr))
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000309 goto candidate_del;
310
311 if (!orig_node->router)
312 goto candidate_del;
313
314 best_tq = orig_node->router->tq_avg;
315
316 /* ... and is good enough to be considered */
317 if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
318 goto candidate_del;
319
320 /**
321 * check if we have another candidate with the same mac address or
322 * interface. If we do, we won't select this candidate because of
323 * possible interference.
324 */
325 hlist_for_each_entry_rcu(tmp_neigh_node, node,
326 &orig_node->neigh_list, list) {
327
328 if (tmp_neigh_node == neigh_node)
329 continue;
330
331 /* we only care if the other candidate is even
332 * considered as candidate. */
333 if (list_empty(&tmp_neigh_node->bonding_list))
334 continue;
335
336 if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
Marek Lindner39901e72011-02-18 12:28:08 +0000337 (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000338 interference_candidate = 1;
339 break;
340 }
341 }
342
343 /* don't care further if it is an interference candidate */
344 if (interference_candidate)
345 goto candidate_del;
346
347 /* this neighbor already is part of our candidate list */
348 if (!list_empty(&neigh_node->bonding_list))
349 goto out;
350
Marek Lindner44524fc2011-02-10 14:33:53 +0000351 if (!atomic_inc_not_zero(&neigh_node->refcount))
352 goto out;
353
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000354 list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000355 atomic_inc(&orig_node->bond_candidates);
356 goto out;
357
358candidate_del:
359 bonding_candidate_del(orig_node, neigh_node);
360
361out:
362 spin_unlock_bh(&orig_node->neigh_list_lock);
363 return;
364}
365
366/* copy primary address for bonding */
367static void bonding_save_primary(struct orig_node *orig_node,
368 struct orig_node *orig_neigh_node,
369 struct batman_packet *batman_packet)
370{
371 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
372 return;
373
374 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
375}
376
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000377static void update_orig(struct bat_priv *bat_priv,
378 struct orig_node *orig_node,
379 struct ethhdr *ethhdr,
380 struct batman_packet *batman_packet,
381 struct batman_if *if_incoming,
382 unsigned char *hna_buff, int hna_buff_len,
383 char is_duplicate)
384{
385 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000386 struct orig_node *orig_node_tmp;
Marek Lindner9591a792010-12-12 21:57:11 +0000387 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000388 int tmp_hna_buff_len;
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000389 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000390
391 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
392 "Searching and updating originator entry of received packet\n");
393
Marek Lindnerf987ed62010-12-12 21:57:12 +0000394 rcu_read_lock();
395 hlist_for_each_entry_rcu(tmp_neigh_node, node,
396 &orig_node->neigh_list, list) {
Marek Lindner39901e72011-02-18 12:28:08 +0000397 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
Marek Lindner44524fc2011-02-10 14:33:53 +0000398 (tmp_neigh_node->if_incoming == if_incoming) &&
399 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
400 if (neigh_node)
401 neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000402 neigh_node = tmp_neigh_node;
403 continue;
404 }
405
406 if (is_duplicate)
407 continue;
408
409 ring_buffer_set(tmp_neigh_node->tq_recv,
410 &tmp_neigh_node->tq_index, 0);
411 tmp_neigh_node->tq_avg =
412 ring_buffer_avg(tmp_neigh_node->tq_recv);
413 }
414
415 if (!neigh_node) {
416 struct orig_node *orig_tmp;
417
418 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
419 if (!orig_tmp)
Marek Lindnera775eb82011-01-19 20:01:39 +0000420 goto unlock;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000421
422 neigh_node = create_neighbor(orig_node, orig_tmp,
423 ethhdr->h_source, if_incoming);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000424
425 kref_put(&orig_tmp->refcount, orig_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000426 if (!neigh_node)
Marek Lindnera775eb82011-01-19 20:01:39 +0000427 goto unlock;
Marek Lindner44524fc2011-02-10 14:33:53 +0000428
429 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
430 neigh_node = NULL;
431 goto unlock;
432 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000433 } else
434 bat_dbg(DBG_BATMAN, bat_priv,
435 "Updating existing last-hop neighbor of originator\n");
436
Marek Lindnera775eb82011-01-19 20:01:39 +0000437 rcu_read_unlock();
438
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000439 orig_node->flags = batman_packet->flags;
440 neigh_node->last_valid = jiffies;
441
442 ring_buffer_set(neigh_node->tq_recv,
443 &neigh_node->tq_index,
444 batman_packet->tq);
445 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
446
447 if (!is_duplicate) {
448 orig_node->last_ttl = batman_packet->ttl;
449 neigh_node->last_ttl = batman_packet->ttl;
450 }
451
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000452 bonding_candidate_add(orig_node, neigh_node);
453
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000454 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
455 batman_packet->num_hna * ETH_ALEN : hna_buff_len);
456
457 /* if this neighbor already is our next hop there is nothing
458 * to change */
459 if (orig_node->router == neigh_node)
460 goto update_hna;
461
462 /* if this neighbor does not offer a better TQ we won't consider it */
463 if ((orig_node->router) &&
464 (orig_node->router->tq_avg > neigh_node->tq_avg))
465 goto update_hna;
466
467 /* if the TQ is the same and the link not more symetric we
468 * won't consider it either */
469 if ((orig_node->router) &&
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000470 (neigh_node->tq_avg == orig_node->router->tq_avg)) {
471 orig_node_tmp = orig_node->router->orig_node;
472 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
473 bcast_own_sum_orig =
474 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
475 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
476
477 orig_node_tmp = neigh_node->orig_node;
478 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
479 bcast_own_sum_neigh =
480 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
481 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
482
483 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
484 goto update_hna;
485 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000486
487 update_routes(bat_priv, orig_node, neigh_node,
488 hna_buff, tmp_hna_buff_len);
489 goto update_gw;
490
491update_hna:
492 update_routes(bat_priv, orig_node, orig_node->router,
493 hna_buff, tmp_hna_buff_len);
494
495update_gw:
496 if (orig_node->gw_flags != batman_packet->gw_flags)
497 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
498
499 orig_node->gw_flags = batman_packet->gw_flags;
500
501 /* restart gateway selection if fast or late switching was enabled */
502 if ((orig_node->gw_flags) &&
503 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
504 (atomic_read(&bat_priv->gw_sel_class) > 2))
505 gw_check_election(bat_priv, orig_node);
Marek Lindnera775eb82011-01-19 20:01:39 +0000506
507 goto out;
508
509unlock:
510 rcu_read_unlock();
511out:
512 if (neigh_node)
Marek Lindner44524fc2011-02-10 14:33:53 +0000513 neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000514}
515
516/* checks whether the host restarted and is in the protection time.
517 * returns:
518 * 0 if the packet is to be accepted
519 * 1 if the packet is to be ignored.
520 */
521static int window_protected(struct bat_priv *bat_priv,
522 int32_t seq_num_diff,
523 unsigned long *last_reset)
524{
525 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
526 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
527 if (time_after(jiffies, *last_reset +
528 msecs_to_jiffies(RESET_PROTECTION_MS))) {
529
530 *last_reset = jiffies;
531 bat_dbg(DBG_BATMAN, bat_priv,
532 "old packet received, start protection\n");
533
534 return 0;
535 } else
536 return 1;
537 }
538 return 0;
539}
540
541/* processes a batman packet for all interfaces, adjusts the sequence number and
542 * finds out whether it is a duplicate.
543 * returns:
544 * 1 the packet is a duplicate
545 * 0 the packet has not yet been received
546 * -1 the packet is old and has been received while the seqno window
547 * was protected. Caller should drop it.
548 */
549static char count_real_packets(struct ethhdr *ethhdr,
550 struct batman_packet *batman_packet,
551 struct batman_if *if_incoming)
552{
553 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
554 struct orig_node *orig_node;
555 struct neigh_node *tmp_neigh_node;
Marek Lindner9591a792010-12-12 21:57:11 +0000556 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000557 char is_duplicate = 0;
558 int32_t seq_diff;
559 int need_update = 0;
Marek Lindner0ede9f42011-01-25 21:52:10 +0000560 int set_mark, ret = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000561
562 orig_node = get_orig_node(bat_priv, batman_packet->orig);
563 if (!orig_node)
564 return 0;
565
Marek Lindner0ede9f42011-01-25 21:52:10 +0000566 spin_lock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000567 seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
568
569 /* signalize caller that the packet is to be dropped. */
570 if (window_protected(bat_priv, seq_diff,
571 &orig_node->batman_seqno_reset))
Marek Lindner0ede9f42011-01-25 21:52:10 +0000572 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000573
Marek Lindnerf987ed62010-12-12 21:57:12 +0000574 rcu_read_lock();
575 hlist_for_each_entry_rcu(tmp_neigh_node, node,
576 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000577
578 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
579 orig_node->last_real_seqno,
580 batman_packet->seqno);
581
Marek Lindner39901e72011-02-18 12:28:08 +0000582 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000583 (tmp_neigh_node->if_incoming == if_incoming))
584 set_mark = 1;
585 else
586 set_mark = 0;
587
588 /* if the window moved, set the update flag. */
589 need_update |= bit_get_packet(bat_priv,
590 tmp_neigh_node->real_bits,
591 seq_diff, set_mark);
592
593 tmp_neigh_node->real_packet_count =
594 bit_packet_count(tmp_neigh_node->real_bits);
595 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000596 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000597
598 if (need_update) {
599 bat_dbg(DBG_BATMAN, bat_priv,
600 "updating last_seqno: old %d, new %d\n",
601 orig_node->last_real_seqno, batman_packet->seqno);
602 orig_node->last_real_seqno = batman_packet->seqno;
603 }
604
Marek Lindner0ede9f42011-01-25 21:52:10 +0000605 ret = is_duplicate;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000606
Marek Lindner0ede9f42011-01-25 21:52:10 +0000607out:
608 spin_unlock_bh(&orig_node->ogm_cnt_lock);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000609 kref_put(&orig_node->refcount, orig_node_free_ref);
Marek Lindner0ede9f42011-01-25 21:52:10 +0000610 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000611}
612
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000613void receive_bat_packet(struct ethhdr *ethhdr,
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000614 struct batman_packet *batman_packet,
615 unsigned char *hna_buff, int hna_buff_len,
616 struct batman_if *if_incoming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000617{
618 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
619 struct batman_if *batman_if;
620 struct orig_node *orig_neigh_node, *orig_node;
621 char has_directlink_flag;
622 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
623 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
624 char is_duplicate;
625 uint32_t if_incoming_seqno;
626
627 /* Silently drop when the batman packet is actually not a
628 * correct packet.
629 *
630 * This might happen if a packet is padded (e.g. Ethernet has a
631 * minimum frame length of 64 byte) and the aggregation interprets
632 * it as an additional length.
633 *
634 * TODO: A more sane solution would be to have a bit in the
635 * batman_packet to detect whether the packet is the last
636 * packet in an aggregation. Here we expect that the padding
637 * is always zero (or not 0x01)
638 */
639 if (batman_packet->packet_type != BAT_PACKET)
640 return;
641
642 /* could be changed by schedule_own_packet() */
643 if_incoming_seqno = atomic_read(&if_incoming->seqno);
644
645 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
646
Marek Lindner39901e72011-02-18 12:28:08 +0000647 is_single_hop_neigh = (compare_eth(ethhdr->h_source,
648 batman_packet->orig) ? 1 : 0);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000649
650 bat_dbg(DBG_BATMAN, bat_priv,
651 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
652 "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
653 "TTL %d, V %d, IDF %d)\n",
654 ethhdr->h_source, if_incoming->net_dev->name,
655 if_incoming->net_dev->dev_addr, batman_packet->orig,
656 batman_packet->prev_sender, batman_packet->seqno,
657 batman_packet->tq, batman_packet->ttl, batman_packet->version,
658 has_directlink_flag);
659
660 rcu_read_lock();
661 list_for_each_entry_rcu(batman_if, &if_list, list) {
662 if (batman_if->if_status != IF_ACTIVE)
663 continue;
664
665 if (batman_if->soft_iface != if_incoming->soft_iface)
666 continue;
667
Marek Lindner39901e72011-02-18 12:28:08 +0000668 if (compare_eth(ethhdr->h_source,
669 batman_if->net_dev->dev_addr))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000670 is_my_addr = 1;
671
Marek Lindner39901e72011-02-18 12:28:08 +0000672 if (compare_eth(batman_packet->orig,
673 batman_if->net_dev->dev_addr))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000674 is_my_orig = 1;
675
Marek Lindner39901e72011-02-18 12:28:08 +0000676 if (compare_eth(batman_packet->prev_sender,
677 batman_if->net_dev->dev_addr))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000678 is_my_oldorig = 1;
679
Marek Lindner39901e72011-02-18 12:28:08 +0000680 if (compare_eth(ethhdr->h_source, broadcast_addr))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000681 is_broadcast = 1;
682 }
683 rcu_read_unlock();
684
685 if (batman_packet->version != COMPAT_VERSION) {
686 bat_dbg(DBG_BATMAN, bat_priv,
687 "Drop packet: incompatible batman version (%i)\n",
688 batman_packet->version);
689 return;
690 }
691
692 if (is_my_addr) {
693 bat_dbg(DBG_BATMAN, bat_priv,
694 "Drop packet: received my own broadcast (sender: %pM"
695 ")\n",
696 ethhdr->h_source);
697 return;
698 }
699
700 if (is_broadcast) {
701 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
702 "ignoring all packets with broadcast source addr (sender: %pM"
703 ")\n", ethhdr->h_source);
704 return;
705 }
706
707 if (is_my_orig) {
708 unsigned long *word;
709 int offset;
710
711 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000712 if (!orig_neigh_node)
713 return;
714
715 /* neighbor has to indicate direct link and it has to
716 * come via the corresponding interface */
717 /* if received seqno equals last send seqno save new
718 * seqno for bidirectional check */
719 if (has_directlink_flag &&
Marek Lindner39901e72011-02-18 12:28:08 +0000720 compare_eth(if_incoming->net_dev->dev_addr,
721 batman_packet->orig) &&
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000722 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
723 offset = if_incoming->if_num * NUM_WORDS;
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000724
725 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000726 word = &(orig_neigh_node->bcast_own[offset]);
727 bit_mark(word, 0);
728 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
729 bit_packet_count(word);
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000730 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000731 }
732
733 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
734 "originator packet from myself (via neighbor)\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000735 kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000736 return;
737 }
738
739 if (is_my_oldorig) {
740 bat_dbg(DBG_BATMAN, bat_priv,
741 "Drop packet: ignoring all rebroadcast echos (sender: "
742 "%pM)\n", ethhdr->h_source);
743 return;
744 }
745
746 orig_node = get_orig_node(bat_priv, batman_packet->orig);
747 if (!orig_node)
748 return;
749
750 is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
751
752 if (is_duplicate == -1) {
753 bat_dbg(DBG_BATMAN, bat_priv,
754 "Drop packet: packet within seqno protection time "
755 "(sender: %pM)\n", ethhdr->h_source);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000756 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000757 }
758
759 if (batman_packet->tq == 0) {
760 bat_dbg(DBG_BATMAN, bat_priv,
761 "Drop packet: originator packet with tq equal 0\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000762 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000763 }
764
765 /* avoid temporary routing loops */
766 if ((orig_node->router) &&
767 (orig_node->router->orig_node->router) &&
Marek Lindner39901e72011-02-18 12:28:08 +0000768 (compare_eth(orig_node->router->addr,
769 batman_packet->prev_sender)) &&
770 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
771 (compare_eth(orig_node->router->addr,
772 orig_node->router->orig_node->router->addr))) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000773 bat_dbg(DBG_BATMAN, bat_priv,
774 "Drop packet: ignoring all rebroadcast packets that "
775 "may make me loop (sender: %pM)\n", ethhdr->h_source);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000776 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000777 }
778
779 /* if sender is a direct neighbor the sender mac equals
780 * originator mac */
781 orig_neigh_node = (is_single_hop_neigh ?
782 orig_node :
783 get_orig_node(bat_priv, ethhdr->h_source));
784 if (!orig_neigh_node)
Marek Lindner16b1aba2011-01-19 20:01:42 +0000785 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000786
787 /* drop packet if sender is not a direct neighbor and if we
788 * don't route towards it */
789 if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
790 bat_dbg(DBG_BATMAN, bat_priv,
791 "Drop packet: OGM via unknown neighbor!\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000792 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000793 }
794
795 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
796 batman_packet, if_incoming);
797
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000798 bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
799
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000800 /* update ranking if it is not a duplicate or has the same
801 * seqno and similar ttl as the non-duplicate */
802 if (is_bidirectional &&
803 (!is_duplicate ||
804 ((orig_node->last_real_seqno == batman_packet->seqno) &&
805 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
806 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
807 if_incoming, hna_buff, hna_buff_len, is_duplicate);
808
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000809 /* is single hop (direct) neighbor */
810 if (is_single_hop_neigh) {
811
812 /* mark direct link on incoming interface */
813 schedule_forward_packet(orig_node, ethhdr, batman_packet,
814 1, hna_buff_len, if_incoming);
815
816 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
817 "rebroadcast neighbor packet with direct link flag\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000818 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000819 }
820
821 /* multihop originator */
822 if (!is_bidirectional) {
823 bat_dbg(DBG_BATMAN, bat_priv,
824 "Drop packet: not received via bidirectional link\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000825 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000826 }
827
828 if (is_duplicate) {
829 bat_dbg(DBG_BATMAN, bat_priv,
830 "Drop packet: duplicate packet received\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000831 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000832 }
833
834 bat_dbg(DBG_BATMAN, bat_priv,
835 "Forwarding packet: rebroadcast originator packet\n");
836 schedule_forward_packet(orig_node, ethhdr, batman_packet,
837 0, hna_buff_len, if_incoming);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000838
839out_neigh:
840 if (!is_single_hop_neigh)
841 kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
842out:
843 kref_put(&orig_node->refcount, orig_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000844}
845
846int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
847{
848 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
849 struct ethhdr *ethhdr;
850
851 /* drop packet if it has not necessary minimum size */
852 if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
853 return NET_RX_DROP;
854
855 ethhdr = (struct ethhdr *)skb_mac_header(skb);
856
857 /* packet with broadcast indication but unicast recipient */
858 if (!is_broadcast_ether_addr(ethhdr->h_dest))
859 return NET_RX_DROP;
860
861 /* packet with broadcast sender address */
862 if (is_broadcast_ether_addr(ethhdr->h_source))
863 return NET_RX_DROP;
864
865 /* create a copy of the skb, if needed, to modify it. */
866 if (skb_cow(skb, 0) < 0)
867 return NET_RX_DROP;
868
869 /* keep skb linear */
870 if (skb_linearize(skb) < 0)
871 return NET_RX_DROP;
872
873 ethhdr = (struct ethhdr *)skb_mac_header(skb);
874
875 spin_lock_bh(&bat_priv->orig_hash_lock);
876 receive_aggr_bat_packet(ethhdr,
877 skb->data,
878 skb_headlen(skb),
879 batman_if);
880 spin_unlock_bh(&bat_priv->orig_hash_lock);
881
882 kfree_skb(skb);
883 return NET_RX_SUCCESS;
884}
885
886static int recv_my_icmp_packet(struct bat_priv *bat_priv,
887 struct sk_buff *skb, size_t icmp_len)
888{
Marek Lindner44524fc2011-02-10 14:33:53 +0000889 struct orig_node *orig_node = NULL;
890 struct neigh_node *neigh_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000891 struct icmp_packet_rr *icmp_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000892 struct batman_if *batman_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000893 uint8_t dstaddr[ETH_ALEN];
Marek Lindner44524fc2011-02-10 14:33:53 +0000894 int ret = NET_RX_DROP;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000895
896 icmp_packet = (struct icmp_packet_rr *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000897
898 /* add data to device queue */
899 if (icmp_packet->msg_type != ECHO_REQUEST) {
900 bat_socket_receive_packet(icmp_packet, icmp_len);
Marek Lindner44524fc2011-02-10 14:33:53 +0000901 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000902 }
903
904 if (!bat_priv->primary_if)
Marek Lindner44524fc2011-02-10 14:33:53 +0000905 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000906
907 /* answer echo request (ping) */
908 /* get routing information */
909 spin_lock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000910 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000911 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
912 compare_orig, choose_orig,
913 icmp_packet->orig));
Marek Lindner44524fc2011-02-10 14:33:53 +0000914
915 if (!orig_node)
916 goto unlock;
917
918 kref_get(&orig_node->refcount);
919 neigh_node = orig_node->router;
920
921 if (!neigh_node)
922 goto unlock;
923
924 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
925 neigh_node = NULL;
926 goto unlock;
927 }
928
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000929 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000930
Marek Lindner44524fc2011-02-10 14:33:53 +0000931 /* don't lock while sending the packets ... we therefore
932 * copy the required data before sending */
933 batman_if = orig_node->router->if_incoming;
934 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
935 spin_unlock_bh(&bat_priv->orig_hash_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000936
Marek Lindner44524fc2011-02-10 14:33:53 +0000937 /* create a copy of the skb, if needed, to modify it. */
938 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
939 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000940
Marek Lindner44524fc2011-02-10 14:33:53 +0000941 icmp_packet = (struct icmp_packet_rr *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000942
Marek Lindner44524fc2011-02-10 14:33:53 +0000943 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
944 memcpy(icmp_packet->orig,
945 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
946 icmp_packet->msg_type = ECHO_REPLY;
947 icmp_packet->ttl = TTL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000948
Marek Lindner44524fc2011-02-10 14:33:53 +0000949 send_skb_packet(skb, batman_if, dstaddr);
950 ret = NET_RX_SUCCESS;
951 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000952
Marek Lindner44524fc2011-02-10 14:33:53 +0000953unlock:
954 rcu_read_unlock();
955 spin_unlock_bh(&bat_priv->orig_hash_lock);
956out:
957 if (neigh_node)
958 neigh_node_free_ref(neigh_node);
959 if (orig_node)
960 kref_put(&orig_node->refcount, orig_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000961 return ret;
962}
963
964static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
Simon Wunderlich74ef1152010-12-29 16:15:19 +0000965 struct sk_buff *skb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000966{
Marek Lindner44524fc2011-02-10 14:33:53 +0000967 struct orig_node *orig_node = NULL;
968 struct neigh_node *neigh_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000969 struct icmp_packet *icmp_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000970 struct batman_if *batman_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000971 uint8_t dstaddr[ETH_ALEN];
Marek Lindner44524fc2011-02-10 14:33:53 +0000972 int ret = NET_RX_DROP;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000973
974 icmp_packet = (struct icmp_packet *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000975
976 /* send TTL exceeded if packet is an echo request (traceroute) */
977 if (icmp_packet->msg_type != ECHO_REQUEST) {
978 pr_debug("Warning - can't forward icmp packet from %pM to "
979 "%pM: ttl exceeded\n", icmp_packet->orig,
980 icmp_packet->dst);
Marek Lindner44524fc2011-02-10 14:33:53 +0000981 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000982 }
983
984 if (!bat_priv->primary_if)
Marek Lindner44524fc2011-02-10 14:33:53 +0000985 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000986
987 /* get routing information */
988 spin_lock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000989 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000990 orig_node = ((struct orig_node *)
991 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
992 icmp_packet->orig));
Marek Lindner44524fc2011-02-10 14:33:53 +0000993
994 if (!orig_node)
995 goto unlock;
996
997 kref_get(&orig_node->refcount);
998 neigh_node = orig_node->router;
999
1000 if (!neigh_node)
1001 goto unlock;
1002
1003 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
1004 neigh_node = NULL;
1005 goto unlock;
1006 }
1007
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001008 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001009
Marek Lindner44524fc2011-02-10 14:33:53 +00001010 /* don't lock while sending the packets ... we therefore
1011 * copy the required data before sending */
1012 batman_if = orig_node->router->if_incoming;
1013 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
1014 spin_unlock_bh(&bat_priv->orig_hash_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001015
Marek Lindner44524fc2011-02-10 14:33:53 +00001016 /* create a copy of the skb, if needed, to modify it. */
1017 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1018 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001019
Marek Lindner44524fc2011-02-10 14:33:53 +00001020 icmp_packet = (struct icmp_packet *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001021
Marek Lindner44524fc2011-02-10 14:33:53 +00001022 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
1023 memcpy(icmp_packet->orig,
1024 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
1025 icmp_packet->msg_type = TTL_EXCEEDED;
1026 icmp_packet->ttl = TTL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001027
Marek Lindner44524fc2011-02-10 14:33:53 +00001028 send_skb_packet(skb, batman_if, dstaddr);
1029 ret = NET_RX_SUCCESS;
1030 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001031
Marek Lindner44524fc2011-02-10 14:33:53 +00001032unlock:
1033 rcu_read_unlock();
1034 spin_unlock_bh(&bat_priv->orig_hash_lock);
1035out:
1036 if (neigh_node)
1037 neigh_node_free_ref(neigh_node);
1038 if (orig_node)
1039 kref_put(&orig_node->refcount, orig_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001040 return ret;
1041}
1042
1043
1044int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
1045{
1046 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1047 struct icmp_packet_rr *icmp_packet;
1048 struct ethhdr *ethhdr;
Marek Lindner44524fc2011-02-10 14:33:53 +00001049 struct orig_node *orig_node = NULL;
1050 struct neigh_node *neigh_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001051 struct batman_if *batman_if;
1052 int hdr_size = sizeof(struct icmp_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001053 uint8_t dstaddr[ETH_ALEN];
Marek Lindner44524fc2011-02-10 14:33:53 +00001054 int ret = NET_RX_DROP;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001055
1056 /**
1057 * we truncate all incoming icmp packets if they don't match our size
1058 */
1059 if (skb->len >= sizeof(struct icmp_packet_rr))
1060 hdr_size = sizeof(struct icmp_packet_rr);
1061
1062 /* drop packet if it has not necessary minimum size */
1063 if (unlikely(!pskb_may_pull(skb, hdr_size)))
Marek Lindner44524fc2011-02-10 14:33:53 +00001064 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001065
1066 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1067
1068 /* packet with unicast indication but broadcast recipient */
1069 if (is_broadcast_ether_addr(ethhdr->h_dest))
Marek Lindner44524fc2011-02-10 14:33:53 +00001070 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001071
1072 /* packet with broadcast sender address */
1073 if (is_broadcast_ether_addr(ethhdr->h_source))
Marek Lindner44524fc2011-02-10 14:33:53 +00001074 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001075
1076 /* not for me */
1077 if (!is_my_mac(ethhdr->h_dest))
Marek Lindner44524fc2011-02-10 14:33:53 +00001078 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001079
1080 icmp_packet = (struct icmp_packet_rr *)skb->data;
1081
1082 /* add record route information if not full */
1083 if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
1084 (icmp_packet->rr_cur < BAT_RR_LEN)) {
1085 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
1086 ethhdr->h_dest, ETH_ALEN);
1087 icmp_packet->rr_cur++;
1088 }
1089
1090 /* packet for me */
1091 if (is_my_mac(icmp_packet->dst))
1092 return recv_my_icmp_packet(bat_priv, skb, hdr_size);
1093
1094 /* TTL exceeded */
1095 if (icmp_packet->ttl < 2)
Simon Wunderlich74ef1152010-12-29 16:15:19 +00001096 return recv_icmp_ttl_exceeded(bat_priv, skb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001097
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001098 /* get routing information */
1099 spin_lock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001100 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001101 orig_node = ((struct orig_node *)
1102 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1103 icmp_packet->dst));
Marek Lindner44524fc2011-02-10 14:33:53 +00001104 if (!orig_node)
1105 goto unlock;
1106
1107 kref_get(&orig_node->refcount);
1108 neigh_node = orig_node->router;
1109
1110 if (!neigh_node)
1111 goto unlock;
1112
1113 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
1114 neigh_node = NULL;
1115 goto unlock;
1116 }
1117
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001118 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001119
Marek Lindner44524fc2011-02-10 14:33:53 +00001120 /* don't lock while sending the packets ... we therefore
1121 * copy the required data before sending */
1122 batman_if = orig_node->router->if_incoming;
1123 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
1124 spin_unlock_bh(&bat_priv->orig_hash_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001125
Marek Lindner44524fc2011-02-10 14:33:53 +00001126 /* create a copy of the skb, if needed, to modify it. */
1127 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1128 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001129
Marek Lindner44524fc2011-02-10 14:33:53 +00001130 icmp_packet = (struct icmp_packet_rr *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001131
Marek Lindner44524fc2011-02-10 14:33:53 +00001132 /* decrement ttl */
1133 icmp_packet->ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001134
Marek Lindner44524fc2011-02-10 14:33:53 +00001135 /* route it */
1136 send_skb_packet(skb, batman_if, dstaddr);
1137 ret = NET_RX_SUCCESS;
1138 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001139
Marek Lindner44524fc2011-02-10 14:33:53 +00001140unlock:
1141 rcu_read_unlock();
1142 spin_unlock_bh(&bat_priv->orig_hash_lock);
1143out:
1144 if (neigh_node)
1145 neigh_node_free_ref(neigh_node);
1146 if (orig_node)
1147 kref_put(&orig_node->refcount, orig_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001148 return ret;
1149}
1150
1151/* find a suitable router for this originator, and use
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001152 * bonding if possible. increases the found neighbors
1153 * refcount.*/
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001154struct neigh_node *find_router(struct bat_priv *bat_priv,
1155 struct orig_node *orig_node,
1156 struct batman_if *recv_if)
1157{
1158 struct orig_node *primary_orig_node;
1159 struct orig_node *router_orig;
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001160 struct neigh_node *router, *first_candidate, *tmp_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001161 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1162 int bonding_enabled;
1163
1164 if (!orig_node)
1165 return NULL;
1166
1167 if (!orig_node->router)
1168 return NULL;
1169
1170 /* without bonding, the first node should
1171 * always choose the default router. */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001172 bonding_enabled = atomic_read(&bat_priv->bonding);
1173
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001174 rcu_read_lock();
1175 /* select default router to output */
1176 router = orig_node->router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001177 router_orig = orig_node->router->orig_node;
Marek Lindner44524fc2011-02-10 14:33:53 +00001178 if (!router_orig || !atomic_inc_not_zero(&router->refcount)) {
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001179 rcu_read_unlock();
1180 return NULL;
1181 }
1182
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001183 if ((!recv_if) && (!bonding_enabled))
1184 goto return_router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001185
1186 /* if we have something in the primary_addr, we can search
1187 * for a potential bonding candidate. */
Marek Lindner39901e72011-02-18 12:28:08 +00001188 if (compare_eth(router_orig->primary_addr, zero_mac))
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001189 goto return_router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001190
1191 /* find the orig_node which has the primary interface. might
1192 * even be the same as our router_orig in many cases */
1193
Marek Lindner39901e72011-02-18 12:28:08 +00001194 if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001195 primary_orig_node = router_orig;
1196 } else {
1197 primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
1198 choose_orig,
1199 router_orig->primary_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001200 if (!primary_orig_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001201 goto return_router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001202 }
1203
1204 /* with less than 2 candidates, we can't do any
1205 * bonding and prefer the original router. */
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001206 if (atomic_read(&primary_orig_node->bond_candidates) < 2)
1207 goto return_router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001208
1209
1210 /* all nodes between should choose a candidate which
1211 * is is not on the interface where the packet came
1212 * in. */
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001213
Marek Lindner44524fc2011-02-10 14:33:53 +00001214 neigh_node_free_ref(router);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001215 first_candidate = NULL;
1216 router = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001217
1218 if (bonding_enabled) {
1219 /* in the bonding case, send the packets in a round
1220 * robin fashion over the remaining interfaces. */
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001221
1222 list_for_each_entry_rcu(tmp_neigh_node,
1223 &primary_orig_node->bond_list, bonding_list) {
1224 if (!first_candidate)
1225 first_candidate = tmp_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001226 /* recv_if == NULL on the first node. */
Marek Lindner44524fc2011-02-10 14:33:53 +00001227 if (tmp_neigh_node->if_incoming != recv_if &&
1228 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001229 router = tmp_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001230 break;
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001231 }
1232 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001233
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001234 /* use the first candidate if nothing was found. */
Marek Lindner44524fc2011-02-10 14:33:53 +00001235 if (!router && first_candidate &&
1236 atomic_inc_not_zero(&first_candidate->refcount))
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001237 router = first_candidate;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001238
Marek Lindner44524fc2011-02-10 14:33:53 +00001239 if (!router) {
1240 rcu_read_unlock();
1241 return NULL;
1242 }
1243
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001244 /* selected should point to the next element
1245 * after the current router */
1246 spin_lock_bh(&primary_orig_node->neigh_list_lock);
1247 /* this is a list_move(), which unfortunately
1248 * does not exist as rcu version */
1249 list_del_rcu(&primary_orig_node->bond_list);
1250 list_add_rcu(&primary_orig_node->bond_list,
1251 &router->bonding_list);
1252 spin_unlock_bh(&primary_orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001253
1254 } else {
1255 /* if bonding is disabled, use the best of the
1256 * remaining candidates which are not using
1257 * this interface. */
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001258 list_for_each_entry_rcu(tmp_neigh_node,
1259 &primary_orig_node->bond_list, bonding_list) {
1260 if (!first_candidate)
1261 first_candidate = tmp_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001262
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001263 /* recv_if == NULL on the first node. */
Marek Lindner44524fc2011-02-10 14:33:53 +00001264 if (tmp_neigh_node->if_incoming == recv_if)
1265 continue;
1266
1267 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1268 continue;
1269
1270 /* if we don't have a router yet
1271 * or this one is better, choose it. */
1272 if ((!router) ||
1273 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1274 /* decrement refcount of
1275 * previously selected router */
1276 if (router)
1277 neigh_node_free_ref(router);
1278
1279 router = tmp_neigh_node;
1280 atomic_inc_not_zero(&router->refcount);
1281 }
1282
1283 neigh_node_free_ref(tmp_neigh_node);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001284 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001285
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001286 /* use the first candidate if nothing was found. */
Marek Lindner44524fc2011-02-10 14:33:53 +00001287 if (!router && first_candidate &&
1288 atomic_inc_not_zero(&first_candidate->refcount))
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001289 router = first_candidate;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001290 }
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001291return_router:
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001292 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001293 return router;
1294}
1295
1296static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
1297{
1298 struct ethhdr *ethhdr;
1299
1300 /* drop packet if it has not necessary minimum size */
1301 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1302 return -1;
1303
1304 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1305
1306 /* packet with unicast indication but broadcast recipient */
1307 if (is_broadcast_ether_addr(ethhdr->h_dest))
1308 return -1;
1309
1310 /* packet with broadcast sender address */
1311 if (is_broadcast_ether_addr(ethhdr->h_source))
1312 return -1;
1313
1314 /* not for me */
1315 if (!is_my_mac(ethhdr->h_dest))
1316 return -1;
1317
1318 return 0;
1319}
1320
1321int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
1322 int hdr_size)
1323{
1324 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
Marek Lindner44524fc2011-02-10 14:33:53 +00001325 struct orig_node *orig_node = NULL;
1326 struct neigh_node *neigh_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001327 struct batman_if *batman_if;
1328 uint8_t dstaddr[ETH_ALEN];
1329 struct unicast_packet *unicast_packet;
1330 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
Marek Lindner44524fc2011-02-10 14:33:53 +00001331 int ret = NET_RX_DROP;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001332 struct sk_buff *new_skb;
1333
1334 unicast_packet = (struct unicast_packet *)skb->data;
1335
1336 /* TTL exceeded */
1337 if (unicast_packet->ttl < 2) {
1338 pr_debug("Warning - can't forward unicast packet from %pM to "
1339 "%pM: ttl exceeded\n", ethhdr->h_source,
1340 unicast_packet->dest);
Marek Lindner44524fc2011-02-10 14:33:53 +00001341 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001342 }
1343
1344 /* get routing information */
1345 spin_lock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001346 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001347 orig_node = ((struct orig_node *)
1348 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1349 unicast_packet->dest));
Marek Lindner44524fc2011-02-10 14:33:53 +00001350 if (!orig_node)
1351 goto unlock;
1352
1353 kref_get(&orig_node->refcount);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001354 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001355
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001356 /* find_router() increases neigh_nodes refcount if found. */
Marek Lindner44524fc2011-02-10 14:33:53 +00001357 neigh_node = find_router(bat_priv, orig_node, recv_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001358
Marek Lindner44524fc2011-02-10 14:33:53 +00001359 if (!neigh_node) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001360 spin_unlock_bh(&bat_priv->orig_hash_lock);
Marek Lindner44524fc2011-02-10 14:33:53 +00001361 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001362 }
1363
1364 /* don't lock while sending the packets ... we therefore
1365 * copy the required data before sending */
Marek Lindner44524fc2011-02-10 14:33:53 +00001366 batman_if = neigh_node->if_incoming;
1367 memcpy(dstaddr, neigh_node->addr, ETH_ALEN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001368 spin_unlock_bh(&bat_priv->orig_hash_lock);
1369
1370 /* create a copy of the skb, if needed, to modify it. */
1371 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
Marek Lindner44524fc2011-02-10 14:33:53 +00001372 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001373
1374 unicast_packet = (struct unicast_packet *)skb->data;
1375
1376 if (unicast_packet->packet_type == BAT_UNICAST &&
1377 atomic_read(&bat_priv->fragmentation) &&
1378 skb->len > batman_if->net_dev->mtu)
1379 return frag_send_skb(skb, bat_priv, batman_if,
1380 dstaddr);
1381
1382 if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
Sven Eckelmannae361ce2011-01-25 22:02:31 +00001383 frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001384
1385 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1386
1387 if (ret == NET_RX_DROP)
Marek Lindner44524fc2011-02-10 14:33:53 +00001388 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001389
1390 /* packet was buffered for late merge */
Marek Lindner44524fc2011-02-10 14:33:53 +00001391 if (!new_skb) {
1392 ret = NET_RX_SUCCESS;
1393 goto out;
1394 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001395
1396 skb = new_skb;
1397 unicast_packet = (struct unicast_packet *)skb->data;
1398 }
1399
1400 /* decrement ttl */
1401 unicast_packet->ttl--;
1402
1403 /* route it */
1404 send_skb_packet(skb, batman_if, dstaddr);
Marek Lindner44524fc2011-02-10 14:33:53 +00001405 ret = NET_RX_SUCCESS;
1406 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001407
Marek Lindner44524fc2011-02-10 14:33:53 +00001408unlock:
1409 rcu_read_unlock();
1410 spin_unlock_bh(&bat_priv->orig_hash_lock);
1411out:
1412 if (neigh_node)
1413 neigh_node_free_ref(neigh_node);
1414 if (orig_node)
1415 kref_put(&orig_node->refcount, orig_node_free_ref);
1416 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001417}
1418
1419int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1420{
1421 struct unicast_packet *unicast_packet;
1422 int hdr_size = sizeof(struct unicast_packet);
1423
1424 if (check_unicast_packet(skb, hdr_size) < 0)
1425 return NET_RX_DROP;
1426
1427 unicast_packet = (struct unicast_packet *)skb->data;
1428
1429 /* packet for me */
1430 if (is_my_mac(unicast_packet->dest)) {
1431 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1432 return NET_RX_SUCCESS;
1433 }
1434
1435 return route_unicast_packet(skb, recv_if, hdr_size);
1436}
1437
1438int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
1439{
1440 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1441 struct unicast_frag_packet *unicast_packet;
1442 int hdr_size = sizeof(struct unicast_frag_packet);
1443 struct sk_buff *new_skb = NULL;
1444 int ret;
1445
1446 if (check_unicast_packet(skb, hdr_size) < 0)
1447 return NET_RX_DROP;
1448
1449 unicast_packet = (struct unicast_frag_packet *)skb->data;
1450
1451 /* packet for me */
1452 if (is_my_mac(unicast_packet->dest)) {
1453
1454 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1455
1456 if (ret == NET_RX_DROP)
1457 return NET_RX_DROP;
1458
1459 /* packet was buffered for late merge */
1460 if (!new_skb)
1461 return NET_RX_SUCCESS;
1462
1463 interface_rx(recv_if->soft_iface, new_skb, recv_if,
1464 sizeof(struct unicast_packet));
1465 return NET_RX_SUCCESS;
1466 }
1467
1468 return route_unicast_packet(skb, recv_if, hdr_size);
1469}
1470
1471
1472int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1473{
1474 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
Marek Lindnerf3e00082011-01-25 21:52:11 +00001475 struct orig_node *orig_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001476 struct bcast_packet *bcast_packet;
1477 struct ethhdr *ethhdr;
1478 int hdr_size = sizeof(struct bcast_packet);
Marek Lindnerf3e00082011-01-25 21:52:11 +00001479 int ret = NET_RX_DROP;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001480 int32_t seq_diff;
1481
1482 /* drop packet if it has not necessary minimum size */
1483 if (unlikely(!pskb_may_pull(skb, hdr_size)))
Marek Lindnerf3e00082011-01-25 21:52:11 +00001484 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001485
1486 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1487
1488 /* packet with broadcast indication but unicast recipient */
1489 if (!is_broadcast_ether_addr(ethhdr->h_dest))
Marek Lindnerf3e00082011-01-25 21:52:11 +00001490 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001491
1492 /* packet with broadcast sender address */
1493 if (is_broadcast_ether_addr(ethhdr->h_source))
Marek Lindnerf3e00082011-01-25 21:52:11 +00001494 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001495
1496 /* ignore broadcasts sent by myself */
1497 if (is_my_mac(ethhdr->h_source))
Marek Lindnerf3e00082011-01-25 21:52:11 +00001498 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001499
1500 bcast_packet = (struct bcast_packet *)skb->data;
1501
1502 /* ignore broadcasts originated by myself */
1503 if (is_my_mac(bcast_packet->orig))
Marek Lindnerf3e00082011-01-25 21:52:11 +00001504 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001505
1506 if (bcast_packet->ttl < 2)
Marek Lindnerf3e00082011-01-25 21:52:11 +00001507 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001508
1509 spin_lock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001510 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001511 orig_node = ((struct orig_node *)
1512 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1513 bcast_packet->orig));
Marek Lindnerf3e00082011-01-25 21:52:11 +00001514
1515 if (!orig_node)
1516 goto rcu_unlock;
1517
1518 kref_get(&orig_node->refcount);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001519 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001520
Marek Lindnerf3e00082011-01-25 21:52:11 +00001521 spin_lock_bh(&orig_node->bcast_seqno_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001522
1523 /* check whether the packet is a duplicate */
Marek Lindnerf3e00082011-01-25 21:52:11 +00001524 if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
1525 ntohl(bcast_packet->seqno)))
1526 goto spin_unlock;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001527
1528 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
1529
1530 /* check whether the packet is old and the host just restarted. */
1531 if (window_protected(bat_priv, seq_diff,
Marek Lindnerf3e00082011-01-25 21:52:11 +00001532 &orig_node->bcast_seqno_reset))
1533 goto spin_unlock;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001534
1535 /* mark broadcast in flood history, update window position
1536 * if required. */
1537 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1538 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
1539
Marek Lindnerf3e00082011-01-25 21:52:11 +00001540 spin_unlock_bh(&orig_node->bcast_seqno_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001541 spin_unlock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerf3e00082011-01-25 21:52:11 +00001542
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001543 /* rebroadcast packet */
1544 add_bcast_packet_to_list(bat_priv, skb);
1545
1546 /* broadcast for me */
1547 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
Marek Lindnerf3e00082011-01-25 21:52:11 +00001548 ret = NET_RX_SUCCESS;
1549 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001550
Marek Lindnerf3e00082011-01-25 21:52:11 +00001551rcu_unlock:
1552 rcu_read_unlock();
1553 spin_unlock_bh(&bat_priv->orig_hash_lock);
1554 goto out;
1555spin_unlock:
1556 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1557 spin_unlock_bh(&bat_priv->orig_hash_lock);
1558out:
1559 if (orig_node)
1560 kref_put(&orig_node->refcount, orig_node_free_ref);
1561 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001562}
1563
1564int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
1565{
1566 struct vis_packet *vis_packet;
1567 struct ethhdr *ethhdr;
1568 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1569 int hdr_size = sizeof(struct vis_packet);
1570
1571 /* keep skb linear */
1572 if (skb_linearize(skb) < 0)
1573 return NET_RX_DROP;
1574
1575 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1576 return NET_RX_DROP;
1577
1578 vis_packet = (struct vis_packet *)skb->data;
1579 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1580
1581 /* not for me */
1582 if (!is_my_mac(ethhdr->h_dest))
1583 return NET_RX_DROP;
1584
1585 /* ignore own packets */
1586 if (is_my_mac(vis_packet->vis_orig))
1587 return NET_RX_DROP;
1588
1589 if (is_my_mac(vis_packet->sender_orig))
1590 return NET_RX_DROP;
1591
1592 switch (vis_packet->vis_type) {
1593 case VIS_TYPE_SERVER_SYNC:
1594 receive_server_sync_packet(bat_priv, vis_packet,
1595 skb_headlen(skb));
1596 break;
1597
1598 case VIS_TYPE_CLIENT_UPDATE:
1599 receive_client_update_packet(bat_priv, vis_packet,
1600 skb_headlen(skb));
1601 break;
1602
1603 default: /* ignore unknown packet */
1604 break;
1605 }
1606
1607 /* We take a copy of the data in the packet, so we should
1608 always free the skbuf. */
1609 return NET_RX_DROP;
1610}