blob: c172f5d0e05a20d54d2fb68c346833eb14645825 [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann64afe352011-01-27 10:38:15 +01002 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "routing.h"
24#include "send.h"
25#include "hash.h"
26#include "soft-interface.h"
27#include "hard-interface.h"
28#include "icmp_socket.h"
29#include "translation-table.h"
30#include "originator.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000031#include "ring_buffer.h"
32#include "vis.h"
33#include "aggregation.h"
34#include "gateway_common.h"
35#include "gateway_client.h"
36#include "unicast.h"
37
Marek Lindnere6c10f42011-02-18 12:33:20 +000038void slide_own_bcast_window(struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000039{
Marek Lindnere6c10f42011-02-18 12:33:20 +000040 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000041 struct hashtable_t *hash = bat_priv->orig_hash;
Marek Lindner7aadf882011-02-18 12:28:09 +000042 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000043 struct hlist_head *head;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044 struct orig_node *orig_node;
45 unsigned long *word;
46 int i;
47 size_t word_index;
48
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000049 for (i = 0; i < hash->size; i++) {
50 head = &hash->table[i];
51
Marek Lindnerfb778ea2011-01-19 20:01:40 +000052 rcu_read_lock();
Marek Lindner7aadf882011-02-18 12:28:09 +000053 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
Marek Lindner2ae2daf2011-01-19 20:01:42 +000054 spin_lock_bh(&orig_node->ogm_cnt_lock);
Marek Lindnere6c10f42011-02-18 12:33:20 +000055 word_index = hard_iface->if_num * NUM_WORDS;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000056 word = &(orig_node->bcast_own[word_index]);
57
58 bit_get_packet(bat_priv, word, 1, 0);
Marek Lindnere6c10f42011-02-18 12:33:20 +000059 orig_node->bcast_own_sum[hard_iface->if_num] =
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000060 bit_packet_count(word);
Marek Lindner2ae2daf2011-01-19 20:01:42 +000061 spin_unlock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000062 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +000063 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000064 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000065}
66
67static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
68 unsigned char *hna_buff, int hna_buff_len)
69{
70 if ((hna_buff_len != orig_node->hna_buff_len) ||
71 ((hna_buff_len > 0) &&
72 (orig_node->hna_buff_len > 0) &&
73 (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
74
75 if (orig_node->hna_buff_len > 0)
76 hna_global_del_orig(bat_priv, orig_node,
77 "originator changed hna");
78
79 if ((hna_buff_len > 0) && (hna_buff))
80 hna_global_add_orig(bat_priv, orig_node,
81 hna_buff, hna_buff_len);
82 }
83}
84
85static void update_route(struct bat_priv *bat_priv,
86 struct orig_node *orig_node,
87 struct neigh_node *neigh_node,
88 unsigned char *hna_buff, int hna_buff_len)
89{
Marek Lindnera8e7f4b2010-12-12 21:57:10 +000090 struct neigh_node *neigh_node_tmp;
91
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000092 /* route deleted */
93 if ((orig_node->router) && (!neigh_node)) {
94
95 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
96 orig_node->orig);
97 hna_global_del_orig(bat_priv, orig_node,
98 "originator timed out");
99
100 /* route added */
101 } else if ((!orig_node->router) && (neigh_node)) {
102
103 bat_dbg(DBG_ROUTES, bat_priv,
104 "Adding route towards: %pM (via %pM)\n",
105 orig_node->orig, neigh_node->addr);
106 hna_global_add_orig(bat_priv, orig_node,
107 hna_buff, hna_buff_len);
108
109 /* route changed */
110 } else {
111 bat_dbg(DBG_ROUTES, bat_priv,
112 "Changing route towards: %pM "
113 "(now via %pM - was via %pM)\n",
114 orig_node->orig, neigh_node->addr,
115 orig_node->router->addr);
116 }
117
Marek Lindner44524fc2011-02-10 14:33:53 +0000118 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
119 neigh_node = NULL;
Marek Lindnera8e7f4b2010-12-12 21:57:10 +0000120 neigh_node_tmp = orig_node->router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000121 orig_node->router = neigh_node;
Marek Lindnera8e7f4b2010-12-12 21:57:10 +0000122 if (neigh_node_tmp)
Marek Lindner44524fc2011-02-10 14:33:53 +0000123 neigh_node_free_ref(neigh_node_tmp);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000124}
125
126
127void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
128 struct neigh_node *neigh_node, unsigned char *hna_buff,
129 int hna_buff_len)
130{
131
132 if (!orig_node)
133 return;
134
135 if (orig_node->router != neigh_node)
136 update_route(bat_priv, orig_node, neigh_node,
137 hna_buff, hna_buff_len);
138 /* may be just HNA changed */
139 else
140 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
141}
142
143static int is_bidirectional_neigh(struct orig_node *orig_node,
144 struct orig_node *orig_neigh_node,
145 struct batman_packet *batman_packet,
Marek Lindnere6c10f42011-02-18 12:33:20 +0000146 struct hard_iface *if_incoming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000147{
148 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
Marek Lindner1605d0d2011-02-18 12:28:11 +0000149 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
Marek Lindner9591a792010-12-12 21:57:11 +0000150 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000151 unsigned char total_count;
Marek Lindner0ede9f42011-01-25 21:52:10 +0000152 uint8_t orig_eq_count, neigh_rq_count, tq_own;
153 int tq_asym_penalty, ret = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000154
155 if (orig_node == orig_neigh_node) {
Marek Lindnerf987ed62010-12-12 21:57:12 +0000156 rcu_read_lock();
157 hlist_for_each_entry_rcu(tmp_neigh_node, node,
158 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000159
Marek Lindner1605d0d2011-02-18 12:28:11 +0000160 if (!compare_eth(tmp_neigh_node->addr,
161 orig_neigh_node->orig))
162 continue;
163
164 if (tmp_neigh_node->if_incoming != if_incoming)
165 continue;
166
167 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
168 continue;
169
170 neigh_node = tmp_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000171 }
Marek Lindner1605d0d2011-02-18 12:28:11 +0000172 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000173
174 if (!neigh_node)
175 neigh_node = create_neighbor(orig_node,
176 orig_neigh_node,
177 orig_neigh_node->orig,
178 if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000179 if (!neigh_node)
Marek Lindner1605d0d2011-02-18 12:28:11 +0000180 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000181
182 neigh_node->last_valid = jiffies;
183 } else {
184 /* find packet count of corresponding one hop neighbor */
Marek Lindnerf987ed62010-12-12 21:57:12 +0000185 rcu_read_lock();
186 hlist_for_each_entry_rcu(tmp_neigh_node, node,
187 &orig_neigh_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000188
Marek Lindner1605d0d2011-02-18 12:28:11 +0000189 if (!compare_eth(tmp_neigh_node->addr,
190 orig_neigh_node->orig))
191 continue;
192
193 if (tmp_neigh_node->if_incoming != if_incoming)
194 continue;
195
196 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
197 continue;
198
199 neigh_node = tmp_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000200 }
Marek Lindner1605d0d2011-02-18 12:28:11 +0000201 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000202
203 if (!neigh_node)
204 neigh_node = create_neighbor(orig_neigh_node,
205 orig_neigh_node,
206 orig_neigh_node->orig,
207 if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000208 if (!neigh_node)
Marek Lindner1605d0d2011-02-18 12:28:11 +0000209 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000210 }
211
212 orig_node->last_valid = jiffies;
213
Marek Lindner0ede9f42011-01-25 21:52:10 +0000214 spin_lock_bh(&orig_node->ogm_cnt_lock);
215 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
216 neigh_rq_count = neigh_node->real_packet_count;
217 spin_unlock_bh(&orig_node->ogm_cnt_lock);
218
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000219 /* pay attention to not get a value bigger than 100 % */
Marek Lindner0ede9f42011-01-25 21:52:10 +0000220 total_count = (orig_eq_count > neigh_rq_count ?
221 neigh_rq_count : orig_eq_count);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000222
223 /* if we have too few packets (too less data) we set tq_own to zero */
224 /* if we receive too few packets it is not considered bidirectional */
225 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
Marek Lindner0ede9f42011-01-25 21:52:10 +0000226 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
227 tq_own = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000228 else
229 /* neigh_node->real_packet_count is never zero as we
230 * only purge old information when getting new
231 * information */
Marek Lindner0ede9f42011-01-25 21:52:10 +0000232 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000233
234 /*
235 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
236 * affect the nearly-symmetric links only a little, but
237 * punishes asymmetric links more. This will give a value
238 * between 0 and TQ_MAX_VALUE
239 */
Marek Lindner0ede9f42011-01-25 21:52:10 +0000240 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
241 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
242 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
243 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
244 (TQ_LOCAL_WINDOW_SIZE *
245 TQ_LOCAL_WINDOW_SIZE *
246 TQ_LOCAL_WINDOW_SIZE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000247
Marek Lindner0ede9f42011-01-25 21:52:10 +0000248 batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
249 (TQ_MAX_VALUE * TQ_MAX_VALUE));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000250
251 bat_dbg(DBG_BATMAN, bat_priv,
252 "bidirectional: "
253 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
254 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
255 "total tq: %3i\n",
256 orig_node->orig, orig_neigh_node->orig, total_count,
Marek Lindner0ede9f42011-01-25 21:52:10 +0000257 neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000258
259 /* if link has the minimum required transmission quality
260 * consider it bidirectional */
261 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
Marek Lindnera775eb82011-01-19 20:01:39 +0000262 ret = 1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000263
Marek Lindnera775eb82011-01-19 20:01:39 +0000264out:
265 if (neigh_node)
Marek Lindner44524fc2011-02-10 14:33:53 +0000266 neigh_node_free_ref(neigh_node);
Marek Lindnera775eb82011-01-19 20:01:39 +0000267 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000268}
269
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000270/* caller must hold the neigh_list_lock */
271void bonding_candidate_del(struct orig_node *orig_node,
272 struct neigh_node *neigh_node)
273{
274 /* this neighbor is not part of our candidate list */
275 if (list_empty(&neigh_node->bonding_list))
276 goto out;
277
278 list_del_rcu(&neigh_node->bonding_list);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000279 INIT_LIST_HEAD(&neigh_node->bonding_list);
Marek Lindner44524fc2011-02-10 14:33:53 +0000280 neigh_node_free_ref(neigh_node);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000281 atomic_dec(&orig_node->bond_candidates);
282
283out:
284 return;
285}
286
287static void bonding_candidate_add(struct orig_node *orig_node,
288 struct neigh_node *neigh_node)
289{
290 struct hlist_node *node;
291 struct neigh_node *tmp_neigh_node;
292 uint8_t best_tq, interference_candidate = 0;
293
294 spin_lock_bh(&orig_node->neigh_list_lock);
295
296 /* only consider if it has the same primary address ... */
Marek Lindner39901e72011-02-18 12:28:08 +0000297 if (!compare_eth(orig_node->orig,
298 neigh_node->orig_node->primary_addr))
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000299 goto candidate_del;
300
301 if (!orig_node->router)
302 goto candidate_del;
303
304 best_tq = orig_node->router->tq_avg;
305
306 /* ... and is good enough to be considered */
307 if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
308 goto candidate_del;
309
310 /**
311 * check if we have another candidate with the same mac address or
312 * interface. If we do, we won't select this candidate because of
313 * possible interference.
314 */
315 hlist_for_each_entry_rcu(tmp_neigh_node, node,
316 &orig_node->neigh_list, list) {
317
318 if (tmp_neigh_node == neigh_node)
319 continue;
320
321 /* we only care if the other candidate is even
322 * considered as candidate. */
323 if (list_empty(&tmp_neigh_node->bonding_list))
324 continue;
325
326 if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
Marek Lindner39901e72011-02-18 12:28:08 +0000327 (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000328 interference_candidate = 1;
329 break;
330 }
331 }
332
333 /* don't care further if it is an interference candidate */
334 if (interference_candidate)
335 goto candidate_del;
336
337 /* this neighbor already is part of our candidate list */
338 if (!list_empty(&neigh_node->bonding_list))
339 goto out;
340
Marek Lindner44524fc2011-02-10 14:33:53 +0000341 if (!atomic_inc_not_zero(&neigh_node->refcount))
342 goto out;
343
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000344 list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000345 atomic_inc(&orig_node->bond_candidates);
346 goto out;
347
348candidate_del:
349 bonding_candidate_del(orig_node, neigh_node);
350
351out:
352 spin_unlock_bh(&orig_node->neigh_list_lock);
353 return;
354}
355
356/* copy primary address for bonding */
357static void bonding_save_primary(struct orig_node *orig_node,
358 struct orig_node *orig_neigh_node,
359 struct batman_packet *batman_packet)
360{
361 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
362 return;
363
364 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
365}
366
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000367static void update_orig(struct bat_priv *bat_priv,
368 struct orig_node *orig_node,
369 struct ethhdr *ethhdr,
370 struct batman_packet *batman_packet,
Marek Lindnere6c10f42011-02-18 12:33:20 +0000371 struct hard_iface *if_incoming,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000372 unsigned char *hna_buff, int hna_buff_len,
373 char is_duplicate)
374{
375 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000376 struct orig_node *orig_node_tmp;
Marek Lindner9591a792010-12-12 21:57:11 +0000377 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000378 int tmp_hna_buff_len;
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000379 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000380
381 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
382 "Searching and updating originator entry of received packet\n");
383
Marek Lindnerf987ed62010-12-12 21:57:12 +0000384 rcu_read_lock();
385 hlist_for_each_entry_rcu(tmp_neigh_node, node,
386 &orig_node->neigh_list, list) {
Marek Lindner39901e72011-02-18 12:28:08 +0000387 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
Marek Lindner44524fc2011-02-10 14:33:53 +0000388 (tmp_neigh_node->if_incoming == if_incoming) &&
389 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
390 if (neigh_node)
391 neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000392 neigh_node = tmp_neigh_node;
393 continue;
394 }
395
396 if (is_duplicate)
397 continue;
398
399 ring_buffer_set(tmp_neigh_node->tq_recv,
400 &tmp_neigh_node->tq_index, 0);
401 tmp_neigh_node->tq_avg =
402 ring_buffer_avg(tmp_neigh_node->tq_recv);
403 }
404
405 if (!neigh_node) {
406 struct orig_node *orig_tmp;
407
408 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
409 if (!orig_tmp)
Marek Lindnera775eb82011-01-19 20:01:39 +0000410 goto unlock;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000411
412 neigh_node = create_neighbor(orig_node, orig_tmp,
413 ethhdr->h_source, if_incoming);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000414
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000415 orig_node_free_ref(orig_tmp);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000416 if (!neigh_node)
Marek Lindnera775eb82011-01-19 20:01:39 +0000417 goto unlock;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000418 } else
419 bat_dbg(DBG_BATMAN, bat_priv,
420 "Updating existing last-hop neighbor of originator\n");
421
Marek Lindnera775eb82011-01-19 20:01:39 +0000422 rcu_read_unlock();
423
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000424 orig_node->flags = batman_packet->flags;
425 neigh_node->last_valid = jiffies;
426
427 ring_buffer_set(neigh_node->tq_recv,
428 &neigh_node->tq_index,
429 batman_packet->tq);
430 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
431
432 if (!is_duplicate) {
433 orig_node->last_ttl = batman_packet->ttl;
434 neigh_node->last_ttl = batman_packet->ttl;
435 }
436
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000437 bonding_candidate_add(orig_node, neigh_node);
438
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000439 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
440 batman_packet->num_hna * ETH_ALEN : hna_buff_len);
441
442 /* if this neighbor already is our next hop there is nothing
443 * to change */
444 if (orig_node->router == neigh_node)
445 goto update_hna;
446
447 /* if this neighbor does not offer a better TQ we won't consider it */
448 if ((orig_node->router) &&
449 (orig_node->router->tq_avg > neigh_node->tq_avg))
450 goto update_hna;
451
452 /* if the TQ is the same and the link not more symetric we
453 * won't consider it either */
454 if ((orig_node->router) &&
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000455 (neigh_node->tq_avg == orig_node->router->tq_avg)) {
456 orig_node_tmp = orig_node->router->orig_node;
457 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
458 bcast_own_sum_orig =
459 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
460 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
461
462 orig_node_tmp = neigh_node->orig_node;
463 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
464 bcast_own_sum_neigh =
465 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
466 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
467
468 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
469 goto update_hna;
470 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000471
472 update_routes(bat_priv, orig_node, neigh_node,
473 hna_buff, tmp_hna_buff_len);
474 goto update_gw;
475
476update_hna:
477 update_routes(bat_priv, orig_node, orig_node->router,
478 hna_buff, tmp_hna_buff_len);
479
480update_gw:
481 if (orig_node->gw_flags != batman_packet->gw_flags)
482 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
483
484 orig_node->gw_flags = batman_packet->gw_flags;
485
486 /* restart gateway selection if fast or late switching was enabled */
487 if ((orig_node->gw_flags) &&
488 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
489 (atomic_read(&bat_priv->gw_sel_class) > 2))
490 gw_check_election(bat_priv, orig_node);
Marek Lindnera775eb82011-01-19 20:01:39 +0000491
492 goto out;
493
494unlock:
495 rcu_read_unlock();
496out:
497 if (neigh_node)
Marek Lindner44524fc2011-02-10 14:33:53 +0000498 neigh_node_free_ref(neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000499}
500
501/* checks whether the host restarted and is in the protection time.
502 * returns:
503 * 0 if the packet is to be accepted
504 * 1 if the packet is to be ignored.
505 */
506static int window_protected(struct bat_priv *bat_priv,
507 int32_t seq_num_diff,
508 unsigned long *last_reset)
509{
510 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
511 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
512 if (time_after(jiffies, *last_reset +
513 msecs_to_jiffies(RESET_PROTECTION_MS))) {
514
515 *last_reset = jiffies;
516 bat_dbg(DBG_BATMAN, bat_priv,
517 "old packet received, start protection\n");
518
519 return 0;
520 } else
521 return 1;
522 }
523 return 0;
524}
525
526/* processes a batman packet for all interfaces, adjusts the sequence number and
527 * finds out whether it is a duplicate.
528 * returns:
529 * 1 the packet is a duplicate
530 * 0 the packet has not yet been received
531 * -1 the packet is old and has been received while the seqno window
532 * was protected. Caller should drop it.
533 */
534static char count_real_packets(struct ethhdr *ethhdr,
535 struct batman_packet *batman_packet,
Marek Lindnere6c10f42011-02-18 12:33:20 +0000536 struct hard_iface *if_incoming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000537{
538 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
539 struct orig_node *orig_node;
540 struct neigh_node *tmp_neigh_node;
Marek Lindner9591a792010-12-12 21:57:11 +0000541 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000542 char is_duplicate = 0;
543 int32_t seq_diff;
544 int need_update = 0;
Marek Lindner0ede9f42011-01-25 21:52:10 +0000545 int set_mark, ret = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000546
547 orig_node = get_orig_node(bat_priv, batman_packet->orig);
548 if (!orig_node)
549 return 0;
550
Marek Lindner0ede9f42011-01-25 21:52:10 +0000551 spin_lock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000552 seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
553
554 /* signalize caller that the packet is to be dropped. */
555 if (window_protected(bat_priv, seq_diff,
556 &orig_node->batman_seqno_reset))
Marek Lindner0ede9f42011-01-25 21:52:10 +0000557 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000558
Marek Lindnerf987ed62010-12-12 21:57:12 +0000559 rcu_read_lock();
560 hlist_for_each_entry_rcu(tmp_neigh_node, node,
561 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000562
563 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
564 orig_node->last_real_seqno,
565 batman_packet->seqno);
566
Marek Lindner39901e72011-02-18 12:28:08 +0000567 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000568 (tmp_neigh_node->if_incoming == if_incoming))
569 set_mark = 1;
570 else
571 set_mark = 0;
572
573 /* if the window moved, set the update flag. */
574 need_update |= bit_get_packet(bat_priv,
575 tmp_neigh_node->real_bits,
576 seq_diff, set_mark);
577
578 tmp_neigh_node->real_packet_count =
579 bit_packet_count(tmp_neigh_node->real_bits);
580 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000581 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000582
583 if (need_update) {
584 bat_dbg(DBG_BATMAN, bat_priv,
585 "updating last_seqno: old %d, new %d\n",
586 orig_node->last_real_seqno, batman_packet->seqno);
587 orig_node->last_real_seqno = batman_packet->seqno;
588 }
589
Marek Lindner0ede9f42011-01-25 21:52:10 +0000590 ret = is_duplicate;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000591
Marek Lindner0ede9f42011-01-25 21:52:10 +0000592out:
593 spin_unlock_bh(&orig_node->ogm_cnt_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000594 orig_node_free_ref(orig_node);
Marek Lindner0ede9f42011-01-25 21:52:10 +0000595 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000596}
597
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000598void receive_bat_packet(struct ethhdr *ethhdr,
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000599 struct batman_packet *batman_packet,
600 unsigned char *hna_buff, int hna_buff_len,
Marek Lindnere6c10f42011-02-18 12:33:20 +0000601 struct hard_iface *if_incoming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000602{
603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
Marek Lindnere6c10f42011-02-18 12:33:20 +0000604 struct hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000605 struct orig_node *orig_neigh_node, *orig_node;
606 char has_directlink_flag;
607 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
608 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
609 char is_duplicate;
610 uint32_t if_incoming_seqno;
611
612 /* Silently drop when the batman packet is actually not a
613 * correct packet.
614 *
615 * This might happen if a packet is padded (e.g. Ethernet has a
616 * minimum frame length of 64 byte) and the aggregation interprets
617 * it as an additional length.
618 *
619 * TODO: A more sane solution would be to have a bit in the
620 * batman_packet to detect whether the packet is the last
621 * packet in an aggregation. Here we expect that the padding
622 * is always zero (or not 0x01)
623 */
624 if (batman_packet->packet_type != BAT_PACKET)
625 return;
626
627 /* could be changed by schedule_own_packet() */
628 if_incoming_seqno = atomic_read(&if_incoming->seqno);
629
630 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
631
Marek Lindner39901e72011-02-18 12:28:08 +0000632 is_single_hop_neigh = (compare_eth(ethhdr->h_source,
633 batman_packet->orig) ? 1 : 0);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000634
635 bat_dbg(DBG_BATMAN, bat_priv,
636 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
637 "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
638 "TTL %d, V %d, IDF %d)\n",
639 ethhdr->h_source, if_incoming->net_dev->name,
640 if_incoming->net_dev->dev_addr, batman_packet->orig,
641 batman_packet->prev_sender, batman_packet->seqno,
642 batman_packet->tq, batman_packet->ttl, batman_packet->version,
643 has_directlink_flag);
644
645 rcu_read_lock();
Marek Lindnere6c10f42011-02-18 12:33:20 +0000646 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
647 if (hard_iface->if_status != IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000648 continue;
649
Marek Lindnere6c10f42011-02-18 12:33:20 +0000650 if (hard_iface->soft_iface != if_incoming->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000651 continue;
652
Marek Lindner39901e72011-02-18 12:28:08 +0000653 if (compare_eth(ethhdr->h_source,
Marek Lindnere6c10f42011-02-18 12:33:20 +0000654 hard_iface->net_dev->dev_addr))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000655 is_my_addr = 1;
656
Marek Lindner39901e72011-02-18 12:28:08 +0000657 if (compare_eth(batman_packet->orig,
Marek Lindnere6c10f42011-02-18 12:33:20 +0000658 hard_iface->net_dev->dev_addr))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000659 is_my_orig = 1;
660
Marek Lindner39901e72011-02-18 12:28:08 +0000661 if (compare_eth(batman_packet->prev_sender,
Marek Lindnere6c10f42011-02-18 12:33:20 +0000662 hard_iface->net_dev->dev_addr))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000663 is_my_oldorig = 1;
664
Marek Lindner39901e72011-02-18 12:28:08 +0000665 if (compare_eth(ethhdr->h_source, broadcast_addr))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000666 is_broadcast = 1;
667 }
668 rcu_read_unlock();
669
670 if (batman_packet->version != COMPAT_VERSION) {
671 bat_dbg(DBG_BATMAN, bat_priv,
672 "Drop packet: incompatible batman version (%i)\n",
673 batman_packet->version);
674 return;
675 }
676
677 if (is_my_addr) {
678 bat_dbg(DBG_BATMAN, bat_priv,
679 "Drop packet: received my own broadcast (sender: %pM"
680 ")\n",
681 ethhdr->h_source);
682 return;
683 }
684
685 if (is_broadcast) {
686 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
687 "ignoring all packets with broadcast source addr (sender: %pM"
688 ")\n", ethhdr->h_source);
689 return;
690 }
691
692 if (is_my_orig) {
693 unsigned long *word;
694 int offset;
695
696 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000697 if (!orig_neigh_node)
698 return;
699
700 /* neighbor has to indicate direct link and it has to
701 * come via the corresponding interface */
702 /* if received seqno equals last send seqno save new
703 * seqno for bidirectional check */
704 if (has_directlink_flag &&
Marek Lindner39901e72011-02-18 12:28:08 +0000705 compare_eth(if_incoming->net_dev->dev_addr,
706 batman_packet->orig) &&
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000707 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
708 offset = if_incoming->if_num * NUM_WORDS;
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000709
710 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000711 word = &(orig_neigh_node->bcast_own[offset]);
712 bit_mark(word, 0);
713 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
714 bit_packet_count(word);
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000715 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000716 }
717
718 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
719 "originator packet from myself (via neighbor)\n");
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000720 orig_node_free_ref(orig_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000721 return;
722 }
723
724 if (is_my_oldorig) {
725 bat_dbg(DBG_BATMAN, bat_priv,
726 "Drop packet: ignoring all rebroadcast echos (sender: "
727 "%pM)\n", ethhdr->h_source);
728 return;
729 }
730
731 orig_node = get_orig_node(bat_priv, batman_packet->orig);
732 if (!orig_node)
733 return;
734
735 is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
736
737 if (is_duplicate == -1) {
738 bat_dbg(DBG_BATMAN, bat_priv,
739 "Drop packet: packet within seqno protection time "
740 "(sender: %pM)\n", ethhdr->h_source);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000741 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000742 }
743
744 if (batman_packet->tq == 0) {
745 bat_dbg(DBG_BATMAN, bat_priv,
746 "Drop packet: originator packet with tq equal 0\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000747 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000748 }
749
750 /* avoid temporary routing loops */
751 if ((orig_node->router) &&
752 (orig_node->router->orig_node->router) &&
Marek Lindner39901e72011-02-18 12:28:08 +0000753 (compare_eth(orig_node->router->addr,
754 batman_packet->prev_sender)) &&
755 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
756 (compare_eth(orig_node->router->addr,
757 orig_node->router->orig_node->router->addr))) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000758 bat_dbg(DBG_BATMAN, bat_priv,
759 "Drop packet: ignoring all rebroadcast packets that "
760 "may make me loop (sender: %pM)\n", ethhdr->h_source);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000761 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000762 }
763
764 /* if sender is a direct neighbor the sender mac equals
765 * originator mac */
766 orig_neigh_node = (is_single_hop_neigh ?
767 orig_node :
768 get_orig_node(bat_priv, ethhdr->h_source));
769 if (!orig_neigh_node)
Marek Lindnerd0072602011-01-19 20:01:44 +0000770 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000771
772 /* drop packet if sender is not a direct neighbor and if we
773 * don't route towards it */
774 if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
775 bat_dbg(DBG_BATMAN, bat_priv,
776 "Drop packet: OGM via unknown neighbor!\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000777 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000778 }
779
780 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
781 batman_packet, if_incoming);
782
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000783 bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
784
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000785 /* update ranking if it is not a duplicate or has the same
786 * seqno and similar ttl as the non-duplicate */
787 if (is_bidirectional &&
788 (!is_duplicate ||
789 ((orig_node->last_real_seqno == batman_packet->seqno) &&
790 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
791 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
792 if_incoming, hna_buff, hna_buff_len, is_duplicate);
793
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000794 /* is single hop (direct) neighbor */
795 if (is_single_hop_neigh) {
796
797 /* mark direct link on incoming interface */
798 schedule_forward_packet(orig_node, ethhdr, batman_packet,
799 1, hna_buff_len, if_incoming);
800
801 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
802 "rebroadcast neighbor packet with direct link flag\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000803 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000804 }
805
806 /* multihop originator */
807 if (!is_bidirectional) {
808 bat_dbg(DBG_BATMAN, bat_priv,
809 "Drop packet: not received via bidirectional link\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000810 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000811 }
812
813 if (is_duplicate) {
814 bat_dbg(DBG_BATMAN, bat_priv,
815 "Drop packet: duplicate packet received\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000816 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000817 }
818
819 bat_dbg(DBG_BATMAN, bat_priv,
820 "Forwarding packet: rebroadcast originator packet\n");
821 schedule_forward_packet(orig_node, ethhdr, batman_packet,
822 0, hna_buff_len, if_incoming);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000823
824out_neigh:
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000825 if ((orig_neigh_node) && (!is_single_hop_neigh))
826 orig_node_free_ref(orig_neigh_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000827out:
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000828 orig_node_free_ref(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000829}
830
Marek Lindnere6c10f42011-02-18 12:33:20 +0000831int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000832{
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000833 struct ethhdr *ethhdr;
834
835 /* drop packet if it has not necessary minimum size */
836 if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
837 return NET_RX_DROP;
838
839 ethhdr = (struct ethhdr *)skb_mac_header(skb);
840
841 /* packet with broadcast indication but unicast recipient */
842 if (!is_broadcast_ether_addr(ethhdr->h_dest))
843 return NET_RX_DROP;
844
845 /* packet with broadcast sender address */
846 if (is_broadcast_ether_addr(ethhdr->h_source))
847 return NET_RX_DROP;
848
849 /* create a copy of the skb, if needed, to modify it. */
850 if (skb_cow(skb, 0) < 0)
851 return NET_RX_DROP;
852
853 /* keep skb linear */
854 if (skb_linearize(skb) < 0)
855 return NET_RX_DROP;
856
857 ethhdr = (struct ethhdr *)skb_mac_header(skb);
858
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000859 receive_aggr_bat_packet(ethhdr,
860 skb->data,
861 skb_headlen(skb),
Marek Lindnere6c10f42011-02-18 12:33:20 +0000862 hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000863
864 kfree_skb(skb);
865 return NET_RX_SUCCESS;
866}
867
868static int recv_my_icmp_packet(struct bat_priv *bat_priv,
869 struct sk_buff *skb, size_t icmp_len)
870{
Marek Lindner44524fc2011-02-10 14:33:53 +0000871 struct orig_node *orig_node = NULL;
872 struct neigh_node *neigh_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000873 struct icmp_packet_rr *icmp_packet;
Marek Lindner44524fc2011-02-10 14:33:53 +0000874 int ret = NET_RX_DROP;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000875
876 icmp_packet = (struct icmp_packet_rr *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000877
878 /* add data to device queue */
879 if (icmp_packet->msg_type != ECHO_REQUEST) {
880 bat_socket_receive_packet(icmp_packet, icmp_len);
Marek Lindner44524fc2011-02-10 14:33:53 +0000881 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000882 }
883
884 if (!bat_priv->primary_if)
Marek Lindner44524fc2011-02-10 14:33:53 +0000885 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000886
887 /* answer echo request (ping) */
888 /* get routing information */
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000889 rcu_read_lock();
Marek Lindner7aadf882011-02-18 12:28:09 +0000890 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
Marek Lindner44524fc2011-02-10 14:33:53 +0000891
892 if (!orig_node)
893 goto unlock;
894
Marek Lindner44524fc2011-02-10 14:33:53 +0000895 neigh_node = orig_node->router;
896
897 if (!neigh_node)
898 goto unlock;
899
900 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
901 neigh_node = NULL;
902 goto unlock;
903 }
904
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000905 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000906
Marek Lindner44524fc2011-02-10 14:33:53 +0000907 /* create a copy of the skb, if needed, to modify it. */
908 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
909 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000910
Marek Lindner44524fc2011-02-10 14:33:53 +0000911 icmp_packet = (struct icmp_packet_rr *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000912
Marek Lindner44524fc2011-02-10 14:33:53 +0000913 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
914 memcpy(icmp_packet->orig,
915 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
916 icmp_packet->msg_type = ECHO_REPLY;
917 icmp_packet->ttl = TTL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000918
Marek Lindnerd0072602011-01-19 20:01:44 +0000919 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Marek Lindner44524fc2011-02-10 14:33:53 +0000920 ret = NET_RX_SUCCESS;
921 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000922
Marek Lindner44524fc2011-02-10 14:33:53 +0000923unlock:
924 rcu_read_unlock();
Marek Lindner44524fc2011-02-10 14:33:53 +0000925out:
926 if (neigh_node)
927 neigh_node_free_ref(neigh_node);
928 if (orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000929 orig_node_free_ref(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000930 return ret;
931}
932
933static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
Simon Wunderlich74ef1152010-12-29 16:15:19 +0000934 struct sk_buff *skb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000935{
Marek Lindner44524fc2011-02-10 14:33:53 +0000936 struct orig_node *orig_node = NULL;
937 struct neigh_node *neigh_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000938 struct icmp_packet *icmp_packet;
Marek Lindner44524fc2011-02-10 14:33:53 +0000939 int ret = NET_RX_DROP;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000940
941 icmp_packet = (struct icmp_packet *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000942
943 /* send TTL exceeded if packet is an echo request (traceroute) */
944 if (icmp_packet->msg_type != ECHO_REQUEST) {
945 pr_debug("Warning - can't forward icmp packet from %pM to "
946 "%pM: ttl exceeded\n", icmp_packet->orig,
947 icmp_packet->dst);
Marek Lindner44524fc2011-02-10 14:33:53 +0000948 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000949 }
950
951 if (!bat_priv->primary_if)
Marek Lindner44524fc2011-02-10 14:33:53 +0000952 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000953
954 /* get routing information */
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000955 rcu_read_lock();
Marek Lindner7aadf882011-02-18 12:28:09 +0000956 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
Marek Lindner44524fc2011-02-10 14:33:53 +0000957
958 if (!orig_node)
959 goto unlock;
960
Marek Lindner44524fc2011-02-10 14:33:53 +0000961 neigh_node = orig_node->router;
962
963 if (!neigh_node)
964 goto unlock;
965
966 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
967 neigh_node = NULL;
968 goto unlock;
969 }
970
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000971 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000972
Marek Lindner44524fc2011-02-10 14:33:53 +0000973 /* create a copy of the skb, if needed, to modify it. */
974 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
975 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000976
Marek Lindner44524fc2011-02-10 14:33:53 +0000977 icmp_packet = (struct icmp_packet *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000978
Marek Lindner44524fc2011-02-10 14:33:53 +0000979 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
980 memcpy(icmp_packet->orig,
981 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
982 icmp_packet->msg_type = TTL_EXCEEDED;
983 icmp_packet->ttl = TTL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000984
Marek Lindnerd0072602011-01-19 20:01:44 +0000985 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Marek Lindner44524fc2011-02-10 14:33:53 +0000986 ret = NET_RX_SUCCESS;
987 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000988
Marek Lindner44524fc2011-02-10 14:33:53 +0000989unlock:
990 rcu_read_unlock();
Marek Lindner44524fc2011-02-10 14:33:53 +0000991out:
992 if (neigh_node)
993 neigh_node_free_ref(neigh_node);
994 if (orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000995 orig_node_free_ref(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000996 return ret;
997}
998
999
Marek Lindnere6c10f42011-02-18 12:33:20 +00001000int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001001{
1002 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1003 struct icmp_packet_rr *icmp_packet;
1004 struct ethhdr *ethhdr;
Marek Lindner44524fc2011-02-10 14:33:53 +00001005 struct orig_node *orig_node = NULL;
1006 struct neigh_node *neigh_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001007 int hdr_size = sizeof(struct icmp_packet);
Marek Lindner44524fc2011-02-10 14:33:53 +00001008 int ret = NET_RX_DROP;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001009
1010 /**
1011 * we truncate all incoming icmp packets if they don't match our size
1012 */
1013 if (skb->len >= sizeof(struct icmp_packet_rr))
1014 hdr_size = sizeof(struct icmp_packet_rr);
1015
1016 /* drop packet if it has not necessary minimum size */
1017 if (unlikely(!pskb_may_pull(skb, hdr_size)))
Marek Lindner44524fc2011-02-10 14:33:53 +00001018 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001019
1020 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1021
1022 /* packet with unicast indication but broadcast recipient */
1023 if (is_broadcast_ether_addr(ethhdr->h_dest))
Marek Lindner44524fc2011-02-10 14:33:53 +00001024 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001025
1026 /* packet with broadcast sender address */
1027 if (is_broadcast_ether_addr(ethhdr->h_source))
Marek Lindner44524fc2011-02-10 14:33:53 +00001028 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001029
1030 /* not for me */
1031 if (!is_my_mac(ethhdr->h_dest))
Marek Lindner44524fc2011-02-10 14:33:53 +00001032 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001033
1034 icmp_packet = (struct icmp_packet_rr *)skb->data;
1035
1036 /* add record route information if not full */
1037 if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
1038 (icmp_packet->rr_cur < BAT_RR_LEN)) {
1039 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
1040 ethhdr->h_dest, ETH_ALEN);
1041 icmp_packet->rr_cur++;
1042 }
1043
1044 /* packet for me */
1045 if (is_my_mac(icmp_packet->dst))
1046 return recv_my_icmp_packet(bat_priv, skb, hdr_size);
1047
1048 /* TTL exceeded */
1049 if (icmp_packet->ttl < 2)
Simon Wunderlich74ef1152010-12-29 16:15:19 +00001050 return recv_icmp_ttl_exceeded(bat_priv, skb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001051
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001052 /* get routing information */
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001053 rcu_read_lock();
Marek Lindner7aadf882011-02-18 12:28:09 +00001054 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
1055
Marek Lindner44524fc2011-02-10 14:33:53 +00001056 if (!orig_node)
1057 goto unlock;
1058
Marek Lindner44524fc2011-02-10 14:33:53 +00001059 neigh_node = orig_node->router;
1060
1061 if (!neigh_node)
1062 goto unlock;
1063
1064 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
1065 neigh_node = NULL;
1066 goto unlock;
1067 }
1068
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001069 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001070
Marek Lindner44524fc2011-02-10 14:33:53 +00001071 /* create a copy of the skb, if needed, to modify it. */
1072 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1073 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001074
Marek Lindner44524fc2011-02-10 14:33:53 +00001075 icmp_packet = (struct icmp_packet_rr *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001076
Marek Lindner44524fc2011-02-10 14:33:53 +00001077 /* decrement ttl */
1078 icmp_packet->ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001079
Marek Lindner44524fc2011-02-10 14:33:53 +00001080 /* route it */
Marek Lindnerd0072602011-01-19 20:01:44 +00001081 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Marek Lindner44524fc2011-02-10 14:33:53 +00001082 ret = NET_RX_SUCCESS;
1083 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001084
Marek Lindner44524fc2011-02-10 14:33:53 +00001085unlock:
1086 rcu_read_unlock();
Marek Lindner44524fc2011-02-10 14:33:53 +00001087out:
1088 if (neigh_node)
1089 neigh_node_free_ref(neigh_node);
1090 if (orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001091 orig_node_free_ref(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001092 return ret;
1093}
1094
1095/* find a suitable router for this originator, and use
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001096 * bonding if possible. increases the found neighbors
1097 * refcount.*/
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001098struct neigh_node *find_router(struct bat_priv *bat_priv,
1099 struct orig_node *orig_node,
Marek Lindnere6c10f42011-02-18 12:33:20 +00001100 struct hard_iface *recv_if)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001101{
1102 struct orig_node *primary_orig_node;
1103 struct orig_node *router_orig;
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001104 struct neigh_node *router, *first_candidate, *tmp_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001105 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1106 int bonding_enabled;
1107
1108 if (!orig_node)
1109 return NULL;
1110
1111 if (!orig_node->router)
1112 return NULL;
1113
1114 /* without bonding, the first node should
1115 * always choose the default router. */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001116 bonding_enabled = atomic_read(&bat_priv->bonding);
1117
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001118 rcu_read_lock();
1119 /* select default router to output */
1120 router = orig_node->router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001121 router_orig = orig_node->router->orig_node;
Marek Lindner44524fc2011-02-10 14:33:53 +00001122 if (!router_orig || !atomic_inc_not_zero(&router->refcount)) {
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001123 rcu_read_unlock();
1124 return NULL;
1125 }
1126
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001127 if ((!recv_if) && (!bonding_enabled))
1128 goto return_router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001129
1130 /* if we have something in the primary_addr, we can search
1131 * for a potential bonding candidate. */
Marek Lindner39901e72011-02-18 12:28:08 +00001132 if (compare_eth(router_orig->primary_addr, zero_mac))
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001133 goto return_router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001134
1135 /* find the orig_node which has the primary interface. might
1136 * even be the same as our router_orig in many cases */
1137
Marek Lindner39901e72011-02-18 12:28:08 +00001138 if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001139 primary_orig_node = router_orig;
1140 } else {
Marek Lindner7aadf882011-02-18 12:28:09 +00001141 primary_orig_node = orig_hash_find(bat_priv,
1142 router_orig->primary_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001143 if (!primary_orig_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001144 goto return_router;
Marek Lindner7aadf882011-02-18 12:28:09 +00001145
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001146 orig_node_free_ref(primary_orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001147 }
1148
1149 /* with less than 2 candidates, we can't do any
1150 * bonding and prefer the original router. */
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001151 if (atomic_read(&primary_orig_node->bond_candidates) < 2)
1152 goto return_router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001153
1154
1155 /* all nodes between should choose a candidate which
1156 * is is not on the interface where the packet came
1157 * in. */
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001158
Marek Lindner44524fc2011-02-10 14:33:53 +00001159 neigh_node_free_ref(router);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001160 first_candidate = NULL;
1161 router = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001162
1163 if (bonding_enabled) {
1164 /* in the bonding case, send the packets in a round
1165 * robin fashion over the remaining interfaces. */
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001166
1167 list_for_each_entry_rcu(tmp_neigh_node,
1168 &primary_orig_node->bond_list, bonding_list) {
1169 if (!first_candidate)
1170 first_candidate = tmp_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001171 /* recv_if == NULL on the first node. */
Marek Lindner44524fc2011-02-10 14:33:53 +00001172 if (tmp_neigh_node->if_incoming != recv_if &&
1173 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001174 router = tmp_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001175 break;
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001176 }
1177 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001178
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001179 /* use the first candidate if nothing was found. */
Marek Lindner44524fc2011-02-10 14:33:53 +00001180 if (!router && first_candidate &&
1181 atomic_inc_not_zero(&first_candidate->refcount))
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001182 router = first_candidate;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001183
Marek Lindner44524fc2011-02-10 14:33:53 +00001184 if (!router) {
1185 rcu_read_unlock();
1186 return NULL;
1187 }
1188
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001189 /* selected should point to the next element
1190 * after the current router */
1191 spin_lock_bh(&primary_orig_node->neigh_list_lock);
1192 /* this is a list_move(), which unfortunately
1193 * does not exist as rcu version */
1194 list_del_rcu(&primary_orig_node->bond_list);
1195 list_add_rcu(&primary_orig_node->bond_list,
1196 &router->bonding_list);
1197 spin_unlock_bh(&primary_orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001198
1199 } else {
1200 /* if bonding is disabled, use the best of the
1201 * remaining candidates which are not using
1202 * this interface. */
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001203 list_for_each_entry_rcu(tmp_neigh_node,
1204 &primary_orig_node->bond_list, bonding_list) {
1205 if (!first_candidate)
1206 first_candidate = tmp_neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001207
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001208 /* recv_if == NULL on the first node. */
Marek Lindner44524fc2011-02-10 14:33:53 +00001209 if (tmp_neigh_node->if_incoming == recv_if)
1210 continue;
1211
1212 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1213 continue;
1214
1215 /* if we don't have a router yet
1216 * or this one is better, choose it. */
1217 if ((!router) ||
1218 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1219 /* decrement refcount of
1220 * previously selected router */
1221 if (router)
1222 neigh_node_free_ref(router);
1223
1224 router = tmp_neigh_node;
1225 atomic_inc_not_zero(&router->refcount);
1226 }
1227
1228 neigh_node_free_ref(tmp_neigh_node);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001229 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001230
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001231 /* use the first candidate if nothing was found. */
Marek Lindner44524fc2011-02-10 14:33:53 +00001232 if (!router && first_candidate &&
1233 atomic_inc_not_zero(&first_candidate->refcount))
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001234 router = first_candidate;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001235 }
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001236return_router:
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001237 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001238 return router;
1239}
1240
1241static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
1242{
1243 struct ethhdr *ethhdr;
1244
1245 /* drop packet if it has not necessary minimum size */
1246 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1247 return -1;
1248
1249 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1250
1251 /* packet with unicast indication but broadcast recipient */
1252 if (is_broadcast_ether_addr(ethhdr->h_dest))
1253 return -1;
1254
1255 /* packet with broadcast sender address */
1256 if (is_broadcast_ether_addr(ethhdr->h_source))
1257 return -1;
1258
1259 /* not for me */
1260 if (!is_my_mac(ethhdr->h_dest))
1261 return -1;
1262
1263 return 0;
1264}
1265
Linus Lüssing7cefb142011-03-02 17:39:31 +00001266int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001267{
1268 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
Marek Lindner44524fc2011-02-10 14:33:53 +00001269 struct orig_node *orig_node = NULL;
1270 struct neigh_node *neigh_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001271 struct unicast_packet *unicast_packet;
1272 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
Marek Lindner44524fc2011-02-10 14:33:53 +00001273 int ret = NET_RX_DROP;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001274 struct sk_buff *new_skb;
1275
1276 unicast_packet = (struct unicast_packet *)skb->data;
1277
1278 /* TTL exceeded */
1279 if (unicast_packet->ttl < 2) {
1280 pr_debug("Warning - can't forward unicast packet from %pM to "
1281 "%pM: ttl exceeded\n", ethhdr->h_source,
1282 unicast_packet->dest);
Marek Lindner44524fc2011-02-10 14:33:53 +00001283 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001284 }
1285
1286 /* get routing information */
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001287 rcu_read_lock();
Marek Lindner7aadf882011-02-18 12:28:09 +00001288 orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
1289
Marek Lindner44524fc2011-02-10 14:33:53 +00001290 if (!orig_node)
1291 goto unlock;
1292
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001293 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001294
Simon Wunderlicha4c135c2011-01-19 20:01:43 +00001295 /* find_router() increases neigh_nodes refcount if found. */
Marek Lindner44524fc2011-02-10 14:33:53 +00001296 neigh_node = find_router(bat_priv, orig_node, recv_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001297
Marek Lindnerd0072602011-01-19 20:01:44 +00001298 if (!neigh_node)
Marek Lindner44524fc2011-02-10 14:33:53 +00001299 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001300
1301 /* create a copy of the skb, if needed, to modify it. */
1302 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
Marek Lindner44524fc2011-02-10 14:33:53 +00001303 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001304
1305 unicast_packet = (struct unicast_packet *)skb->data;
1306
1307 if (unicast_packet->packet_type == BAT_UNICAST &&
1308 atomic_read(&bat_priv->fragmentation) &&
Marek Lindnerd0072602011-01-19 20:01:44 +00001309 skb->len > neigh_node->if_incoming->net_dev->mtu) {
1310 ret = frag_send_skb(skb, bat_priv,
1311 neigh_node->if_incoming, neigh_node->addr);
1312 goto out;
1313 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001314
1315 if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
Marek Lindnerd0072602011-01-19 20:01:44 +00001316 frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001317
1318 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1319
1320 if (ret == NET_RX_DROP)
Marek Lindner44524fc2011-02-10 14:33:53 +00001321 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001322
1323 /* packet was buffered for late merge */
Marek Lindner44524fc2011-02-10 14:33:53 +00001324 if (!new_skb) {
1325 ret = NET_RX_SUCCESS;
1326 goto out;
1327 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001328
1329 skb = new_skb;
1330 unicast_packet = (struct unicast_packet *)skb->data;
1331 }
1332
1333 /* decrement ttl */
1334 unicast_packet->ttl--;
1335
1336 /* route it */
Marek Lindnerd0072602011-01-19 20:01:44 +00001337 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
Marek Lindner44524fc2011-02-10 14:33:53 +00001338 ret = NET_RX_SUCCESS;
1339 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001340
Marek Lindner44524fc2011-02-10 14:33:53 +00001341unlock:
1342 rcu_read_unlock();
Marek Lindner44524fc2011-02-10 14:33:53 +00001343out:
1344 if (neigh_node)
1345 neigh_node_free_ref(neigh_node);
1346 if (orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001347 orig_node_free_ref(orig_node);
Marek Lindner44524fc2011-02-10 14:33:53 +00001348 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001349}
1350
Marek Lindnere6c10f42011-02-18 12:33:20 +00001351int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001352{
1353 struct unicast_packet *unicast_packet;
1354 int hdr_size = sizeof(struct unicast_packet);
1355
1356 if (check_unicast_packet(skb, hdr_size) < 0)
1357 return NET_RX_DROP;
1358
1359 unicast_packet = (struct unicast_packet *)skb->data;
1360
1361 /* packet for me */
1362 if (is_my_mac(unicast_packet->dest)) {
1363 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1364 return NET_RX_SUCCESS;
1365 }
1366
Linus Lüssing7cefb142011-03-02 17:39:31 +00001367 return route_unicast_packet(skb, recv_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001368}
1369
Marek Lindnere6c10f42011-02-18 12:33:20 +00001370int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001371{
1372 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1373 struct unicast_frag_packet *unicast_packet;
1374 int hdr_size = sizeof(struct unicast_frag_packet);
1375 struct sk_buff *new_skb = NULL;
1376 int ret;
1377
1378 if (check_unicast_packet(skb, hdr_size) < 0)
1379 return NET_RX_DROP;
1380
1381 unicast_packet = (struct unicast_frag_packet *)skb->data;
1382
1383 /* packet for me */
1384 if (is_my_mac(unicast_packet->dest)) {
1385
1386 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1387
1388 if (ret == NET_RX_DROP)
1389 return NET_RX_DROP;
1390
1391 /* packet was buffered for late merge */
1392 if (!new_skb)
1393 return NET_RX_SUCCESS;
1394
1395 interface_rx(recv_if->soft_iface, new_skb, recv_if,
1396 sizeof(struct unicast_packet));
1397 return NET_RX_SUCCESS;
1398 }
1399
Linus Lüssing7cefb142011-03-02 17:39:31 +00001400 return route_unicast_packet(skb, recv_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001401}
1402
1403
Marek Lindnere6c10f42011-02-18 12:33:20 +00001404int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001405{
1406 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
Marek Lindnerf3e00082011-01-25 21:52:11 +00001407 struct orig_node *orig_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001408 struct bcast_packet *bcast_packet;
1409 struct ethhdr *ethhdr;
1410 int hdr_size = sizeof(struct bcast_packet);
Marek Lindnerf3e00082011-01-25 21:52:11 +00001411 int ret = NET_RX_DROP;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001412 int32_t seq_diff;
1413
1414 /* drop packet if it has not necessary minimum size */
1415 if (unlikely(!pskb_may_pull(skb, hdr_size)))
Marek Lindnerf3e00082011-01-25 21:52:11 +00001416 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001417
1418 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1419
1420 /* packet with broadcast indication but unicast recipient */
1421 if (!is_broadcast_ether_addr(ethhdr->h_dest))
Marek Lindnerf3e00082011-01-25 21:52:11 +00001422 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001423
1424 /* packet with broadcast sender address */
1425 if (is_broadcast_ether_addr(ethhdr->h_source))
Marek Lindnerf3e00082011-01-25 21:52:11 +00001426 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001427
1428 /* ignore broadcasts sent by myself */
1429 if (is_my_mac(ethhdr->h_source))
Marek Lindnerf3e00082011-01-25 21:52:11 +00001430 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001431
1432 bcast_packet = (struct bcast_packet *)skb->data;
1433
1434 /* ignore broadcasts originated by myself */
1435 if (is_my_mac(bcast_packet->orig))
Marek Lindnerf3e00082011-01-25 21:52:11 +00001436 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001437
1438 if (bcast_packet->ttl < 2)
Marek Lindnerf3e00082011-01-25 21:52:11 +00001439 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001440
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001441 rcu_read_lock();
Marek Lindner7aadf882011-02-18 12:28:09 +00001442 orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
Marek Lindnerf3e00082011-01-25 21:52:11 +00001443
1444 if (!orig_node)
1445 goto rcu_unlock;
1446
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001447 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001448
Marek Lindnerf3e00082011-01-25 21:52:11 +00001449 spin_lock_bh(&orig_node->bcast_seqno_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001450
1451 /* check whether the packet is a duplicate */
Marek Lindnerf3e00082011-01-25 21:52:11 +00001452 if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
1453 ntohl(bcast_packet->seqno)))
1454 goto spin_unlock;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001455
1456 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
1457
1458 /* check whether the packet is old and the host just restarted. */
1459 if (window_protected(bat_priv, seq_diff,
Marek Lindnerf3e00082011-01-25 21:52:11 +00001460 &orig_node->bcast_seqno_reset))
1461 goto spin_unlock;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001462
1463 /* mark broadcast in flood history, update window position
1464 * if required. */
1465 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1466 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
1467
Marek Lindnerf3e00082011-01-25 21:52:11 +00001468 spin_unlock_bh(&orig_node->bcast_seqno_lock);
Marek Lindnerf3e00082011-01-25 21:52:11 +00001469
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001470 /* rebroadcast packet */
1471 add_bcast_packet_to_list(bat_priv, skb);
1472
1473 /* broadcast for me */
1474 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
Marek Lindnerf3e00082011-01-25 21:52:11 +00001475 ret = NET_RX_SUCCESS;
1476 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001477
Marek Lindnerf3e00082011-01-25 21:52:11 +00001478rcu_unlock:
1479 rcu_read_unlock();
Marek Lindnerf3e00082011-01-25 21:52:11 +00001480 goto out;
1481spin_unlock:
1482 spin_unlock_bh(&orig_node->bcast_seqno_lock);
Marek Lindnerf3e00082011-01-25 21:52:11 +00001483out:
1484 if (orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +00001485 orig_node_free_ref(orig_node);
Marek Lindnerf3e00082011-01-25 21:52:11 +00001486 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001487}
1488
Marek Lindnere6c10f42011-02-18 12:33:20 +00001489int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001490{
1491 struct vis_packet *vis_packet;
1492 struct ethhdr *ethhdr;
1493 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1494 int hdr_size = sizeof(struct vis_packet);
1495
1496 /* keep skb linear */
1497 if (skb_linearize(skb) < 0)
1498 return NET_RX_DROP;
1499
1500 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1501 return NET_RX_DROP;
1502
1503 vis_packet = (struct vis_packet *)skb->data;
1504 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1505
1506 /* not for me */
1507 if (!is_my_mac(ethhdr->h_dest))
1508 return NET_RX_DROP;
1509
1510 /* ignore own packets */
1511 if (is_my_mac(vis_packet->vis_orig))
1512 return NET_RX_DROP;
1513
1514 if (is_my_mac(vis_packet->sender_orig))
1515 return NET_RX_DROP;
1516
1517 switch (vis_packet->vis_type) {
1518 case VIS_TYPE_SERVER_SYNC:
1519 receive_server_sync_packet(bat_priv, vis_packet,
1520 skb_headlen(skb));
1521 break;
1522
1523 case VIS_TYPE_CLIENT_UPDATE:
1524 receive_client_update_packet(bat_priv, vis_packet,
1525 skb_headlen(skb));
1526 break;
1527
1528 default: /* ignore unknown packet */
1529 break;
1530 }
1531
1532 /* We take a copy of the data in the packet, so we should
1533 always free the skbuf. */
1534 return NET_RX_DROP;
1535}