blob: 00bfeaf9ece37097eb0d4a3639048ecfcf916246 [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann64afe352011-01-27 10:38:15 +01002 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Andreas Langer
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "unicast.h"
24#include "send.h"
25#include "soft-interface.h"
26#include "gateway_client.h"
27#include "originator.h"
28#include "hash.h"
29#include "translation-table.h"
30#include "routing.h"
31#include "hard-interface.h"
32
33
34static struct sk_buff *frag_merge_packet(struct list_head *head,
35 struct frag_packet_list_entry *tfp,
36 struct sk_buff *skb)
37{
38 struct unicast_frag_packet *up =
39 (struct unicast_frag_packet *)skb->data;
40 struct sk_buff *tmp_skb;
41 struct unicast_packet *unicast_packet;
Marek Lindner14062062011-02-08 12:43:54 +000042 int hdr_len = sizeof(struct unicast_packet);
43 int uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044
45 /* set skb to the first part and tmp_skb to the second part */
46 if (up->flags & UNI_FRAG_HEAD) {
47 tmp_skb = tfp->skb;
48 } else {
49 tmp_skb = skb;
50 skb = tfp->skb;
51 }
52
53 skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
54 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
55 /* free buffered skb, skb will be freed later */
56 kfree_skb(tfp->skb);
57 return NULL;
58 }
59
60 /* move free entry to end */
61 tfp->skb = NULL;
62 tfp->seqno = 0;
63 list_move_tail(&tfp->list, head);
64
65 memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
66 kfree_skb(tmp_skb);
67
68 memmove(skb->data + uni_diff, skb->data, hdr_len);
69 unicast_packet = (struct unicast_packet *) skb_pull(skb, uni_diff);
70 unicast_packet->packet_type = BAT_UNICAST;
71
72 return skb;
73}
74
75static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
76{
77 struct frag_packet_list_entry *tfp;
78 struct unicast_frag_packet *up =
79 (struct unicast_frag_packet *)skb->data;
80
81 /* free and oldest packets stand at the end */
82 tfp = list_entry((head)->prev, typeof(*tfp), list);
83 kfree_skb(tfp->skb);
84
85 tfp->seqno = ntohs(up->seqno);
86 tfp->skb = skb;
87 list_move(&tfp->list, head);
88 return;
89}
90
91static int frag_create_buffer(struct list_head *head)
92{
93 int i;
94 struct frag_packet_list_entry *tfp;
95
96 for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
97 tfp = kmalloc(sizeof(struct frag_packet_list_entry),
98 GFP_ATOMIC);
99 if (!tfp) {
100 frag_list_free(head);
101 return -ENOMEM;
102 }
103 tfp->skb = NULL;
104 tfp->seqno = 0;
105 INIT_LIST_HEAD(&tfp->list);
106 list_add(&tfp->list, head);
107 }
108
109 return 0;
110}
111
112static struct frag_packet_list_entry *frag_search_packet(struct list_head *head,
113 struct unicast_frag_packet *up)
114{
115 struct frag_packet_list_entry *tfp;
116 struct unicast_frag_packet *tmp_up = NULL;
117 uint16_t search_seqno;
118
119 if (up->flags & UNI_FRAG_HEAD)
120 search_seqno = ntohs(up->seqno)+1;
121 else
122 search_seqno = ntohs(up->seqno)-1;
123
124 list_for_each_entry(tfp, head, list) {
125
126 if (!tfp->skb)
127 continue;
128
129 if (tfp->seqno == ntohs(up->seqno))
130 goto mov_tail;
131
132 tmp_up = (struct unicast_frag_packet *)tfp->skb->data;
133
134 if (tfp->seqno == search_seqno) {
135
136 if ((tmp_up->flags & UNI_FRAG_HEAD) !=
137 (up->flags & UNI_FRAG_HEAD))
138 return tfp;
139 else
140 goto mov_tail;
141 }
142 }
143 return NULL;
144
145mov_tail:
146 list_move_tail(&tfp->list, head);
147 return NULL;
148}
149
150void frag_list_free(struct list_head *head)
151{
152 struct frag_packet_list_entry *pf, *tmp_pf;
153
154 if (!list_empty(head)) {
155
156 list_for_each_entry_safe(pf, tmp_pf, head, list) {
157 kfree_skb(pf->skb);
158 list_del(&pf->list);
159 kfree(pf);
160 }
161 }
162 return;
163}
164
165/* frag_reassemble_skb():
166 * returns NET_RX_DROP if the operation failed - skb is left intact
167 * returns NET_RX_SUCCESS if the fragment was buffered (skb_new will be NULL)
168 * or the skb could be reassembled (skb_new will point to the new packet and
169 * skb was freed)
170 */
171int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
172 struct sk_buff **new_skb)
173{
174 struct orig_node *orig_node;
175 struct frag_packet_list_entry *tmp_frag_entry;
176 int ret = NET_RX_DROP;
177 struct unicast_frag_packet *unicast_packet =
178 (struct unicast_frag_packet *)skb->data;
179
180 *new_skb = NULL;
181 spin_lock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000182 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000183 orig_node = ((struct orig_node *)
184 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
185 unicast_packet->orig));
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000186 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000187
188 if (!orig_node) {
189 pr_debug("couldn't find originator in orig_hash\n");
190 goto out;
191 }
192
193 orig_node->last_frag_packet = jiffies;
194
195 if (list_empty(&orig_node->frag_list) &&
196 frag_create_buffer(&orig_node->frag_list)) {
197 pr_debug("couldn't create frag buffer\n");
198 goto out;
199 }
200
201 tmp_frag_entry = frag_search_packet(&orig_node->frag_list,
202 unicast_packet);
203
204 if (!tmp_frag_entry) {
205 frag_create_entry(&orig_node->frag_list, skb);
206 ret = NET_RX_SUCCESS;
207 goto out;
208 }
209
210 *new_skb = frag_merge_packet(&orig_node->frag_list, tmp_frag_entry,
211 skb);
212 /* if not, merge failed */
213 if (*new_skb)
214 ret = NET_RX_SUCCESS;
215out:
216 spin_unlock_bh(&bat_priv->orig_hash_lock);
217
218 return ret;
219}
220
221int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
222 struct batman_if *batman_if, uint8_t dstaddr[])
223{
224 struct unicast_packet tmp_uc, *unicast_packet;
225 struct sk_buff *frag_skb;
226 struct unicast_frag_packet *frag1, *frag2;
227 int uc_hdr_len = sizeof(struct unicast_packet);
228 int ucf_hdr_len = sizeof(struct unicast_frag_packet);
Sven Eckelmann5c77d8b2011-01-25 21:59:26 +0000229 int data_len = skb->len - uc_hdr_len;
Sven Eckelmannae361ce2011-01-25 22:02:31 +0000230 int large_tail = 0;
Sven Eckelmannc2f7f0e2011-02-10 14:33:56 +0000231 uint16_t seqno;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000232
233 if (!bat_priv->primary_if)
234 goto dropped;
235
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000236 frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
Jesper Juhled7809d2011-01-13 21:53:38 +0100237 if (!frag_skb)
238 goto dropped;
Sven Eckelmann5c77d8b2011-01-25 21:59:26 +0000239 skb_reserve(frag_skb, ucf_hdr_len);
Jesper Juhled7809d2011-01-13 21:53:38 +0100240
241 unicast_packet = (struct unicast_packet *) skb->data;
242 memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
Sven Eckelmann5c77d8b2011-01-25 21:59:26 +0000243 skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000244
245 if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
246 my_skb_head_push(frag_skb, ucf_hdr_len) < 0)
247 goto drop_frag;
248
249 frag1 = (struct unicast_frag_packet *)skb->data;
250 frag2 = (struct unicast_frag_packet *)frag_skb->data;
251
252 memcpy(frag1, &tmp_uc, sizeof(struct unicast_packet));
253
254 frag1->ttl--;
255 frag1->version = COMPAT_VERSION;
256 frag1->packet_type = BAT_UNICAST_FRAG;
257
258 memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
259 memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
260
Sven Eckelmannae361ce2011-01-25 22:02:31 +0000261 if (data_len & 1)
262 large_tail = UNI_FRAG_LARGETAIL;
263
264 frag1->flags = UNI_FRAG_HEAD | large_tail;
265 frag2->flags = large_tail;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000266
Sven Eckelmannc2f7f0e2011-02-10 14:33:56 +0000267 seqno = atomic_add_return(2, &batman_if->frag_seqno);
268 frag1->seqno = htons(seqno - 1);
269 frag2->seqno = htons(seqno);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000270
271 send_skb_packet(skb, batman_if, dstaddr);
272 send_skb_packet(frag_skb, batman_if, dstaddr);
273 return NET_RX_SUCCESS;
274
275drop_frag:
276 kfree_skb(frag_skb);
277dropped:
278 kfree_skb(skb);
279 return NET_RX_DROP;
280}
281
282int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
283{
284 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
285 struct unicast_packet *unicast_packet;
Linus Lüssing3878f1f2011-02-07 00:14:40 +0000286 struct orig_node *orig_node = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000287 struct batman_if *batman_if;
288 struct neigh_node *router;
289 int data_len = skb->len;
290 uint8_t dstaddr[6];
291
292 spin_lock_bh(&bat_priv->orig_hash_lock);
293
294 /* get routing information */
295 if (is_multicast_ether_addr(ethhdr->h_dest))
296 orig_node = (struct orig_node *)gw_get_selected(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000297
298 /* check for hna host */
299 if (!orig_node)
300 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
301
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000302 /* find_router() increases neigh_nodes refcount if found. */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000303 router = find_router(bat_priv, orig_node, NULL);
304
305 if (!router)
306 goto unlock;
307
308 /* don't lock while sending the packets ... we therefore
309 * copy the required data before sending */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000310 batman_if = router->if_incoming;
311 memcpy(dstaddr, router->addr, ETH_ALEN);
312
313 spin_unlock_bh(&bat_priv->orig_hash_lock);
314
315 if (batman_if->if_status != IF_ACTIVE)
316 goto dropped;
317
318 if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
319 goto dropped;
320
321 unicast_packet = (struct unicast_packet *)skb->data;
322
323 unicast_packet->version = COMPAT_VERSION;
324 /* batman packet type: unicast */
325 unicast_packet->packet_type = BAT_UNICAST;
326 /* set unicast ttl */
327 unicast_packet->ttl = TTL;
328 /* copy the destination for faster routing */
329 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
330
331 if (atomic_read(&bat_priv->fragmentation) &&
332 data_len + sizeof(struct unicast_packet) >
333 batman_if->net_dev->mtu) {
334 /* send frag skb decreases ttl */
335 unicast_packet->ttl++;
336 return frag_send_skb(skb, bat_priv, batman_if,
337 dstaddr);
338 }
339 send_skb_packet(skb, batman_if, dstaddr);
340 return 0;
341
342unlock:
343 spin_unlock_bh(&bat_priv->orig_hash_lock);
344dropped:
345 kfree_skb(skb);
346 return 1;
347}