blob: c0f0d01ab244e37e6ed056e8b9f0604c7f432eee [file] [log] [blame]
Sven Eckelmann9f6446c2015-04-23 13:16:35 +02001/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors:
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +02002 *
3 * Martin Hundebøll <martin@hundeboll.net>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020016 */
17
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020018#include "fragmentation.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
23#include <linux/etherdevice.h>
24#include <linux/fs.h>
25#include <linux/if_ether.h>
26#include <linux/jiffies.h>
27#include <linux/kernel.h>
28#include <linux/netdevice.h>
29#include <linux/pkt_sched.h>
30#include <linux/skbuff.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <linux/string.h>
34
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020035#include "hard-interface.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020036#include "originator.h"
37#include "packet.h"
38#include "routing.h"
39#include "send.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020040#include "soft-interface.h"
41
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020042/**
43 * batadv_frag_clear_chain - delete entries in the fragment buffer chain
44 * @head: head of chain with entries.
45 *
46 * Free fragments in the passed hlist. Should be called with appropriate lock.
47 */
48static void batadv_frag_clear_chain(struct hlist_head *head)
49{
50 struct batadv_frag_list_entry *entry;
51 struct hlist_node *node;
52
53 hlist_for_each_entry_safe(entry, node, head, list) {
54 hlist_del(&entry->list);
55 kfree_skb(entry->skb);
56 kfree(entry);
57 }
58}
59
60/**
61 * batadv_frag_purge_orig - free fragments associated to an orig
62 * @orig_node: originator to free fragments from
63 * @check_cb: optional function to tell if an entry should be purged
64 */
65void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
66 bool (*check_cb)(struct batadv_frag_table_entry *))
67{
68 struct batadv_frag_table_entry *chain;
69 uint8_t i;
70
71 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
72 chain = &orig_node->fragments[i];
73 spin_lock_bh(&orig_node->fragments[i].lock);
74
75 if (!check_cb || check_cb(chain)) {
76 batadv_frag_clear_chain(&orig_node->fragments[i].head);
77 orig_node->fragments[i].size = 0;
78 }
79
80 spin_unlock_bh(&orig_node->fragments[i].lock);
81 }
82}
83
84/**
85 * batadv_frag_size_limit - maximum possible size of packet to be fragmented
86 *
87 * Returns the maximum size of payload that can be fragmented.
88 */
89static int batadv_frag_size_limit(void)
90{
91 int limit = BATADV_FRAG_MAX_FRAG_SIZE;
92
93 limit -= sizeof(struct batadv_frag_packet);
94 limit *= BATADV_FRAG_MAX_FRAGMENTS;
95
96 return limit;
97}
98
99/**
100 * batadv_frag_init_chain - check and prepare fragment chain for new fragment
101 * @chain: chain in fragments table to init
102 * @seqno: sequence number of the received fragment
103 *
104 * Make chain ready for a fragment with sequence number "seqno". Delete existing
105 * entries if they have an "old" sequence number.
106 *
107 * Caller must hold chain->lock.
108 *
109 * Returns true if chain is empty and caller can just insert the new fragment
110 * without searching for the right position.
111 */
112static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
113 uint16_t seqno)
114{
115 if (chain->seqno == seqno)
116 return false;
117
118 if (!hlist_empty(&chain->head))
119 batadv_frag_clear_chain(&chain->head);
120
121 chain->size = 0;
122 chain->seqno = seqno;
123
124 return true;
125}
126
127/**
128 * batadv_frag_insert_packet - insert a fragment into a fragment chain
129 * @orig_node: originator that the fragment was received from
130 * @skb: skb to insert
131 * @chain_out: list head to attach complete chains of fragments to
132 *
133 * Insert a new fragment into the reverse ordered chain in the right table
134 * entry. The hash table entry is cleared if "old" fragments exist in it.
135 *
136 * Returns true if skb is buffered, false on error. If the chain has all the
137 * fragments needed to merge the packet, the chain is moved to the passed head
138 * to avoid locking the chain in the table.
139 */
140static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
141 struct sk_buff *skb,
142 struct hlist_head *chain_out)
143{
144 struct batadv_frag_table_entry *chain;
145 struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
Sven Eckelmannd9124262014-05-26 17:21:39 +0200146 struct batadv_frag_list_entry *frag_entry_last = NULL;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200147 struct batadv_frag_packet *frag_packet;
148 uint8_t bucket;
149 uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
150 bool ret = false;
151
152 /* Linearize packet to avoid linearizing 16 packets in a row when doing
153 * the later merge. Non-linear merge should be added to remove this
154 * linearization.
155 */
156 if (skb_linearize(skb) < 0)
157 goto err;
158
159 frag_packet = (struct batadv_frag_packet *)skb->data;
160 seqno = ntohs(frag_packet->seqno);
161 bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
162
163 frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
164 if (!frag_entry_new)
165 goto err;
166
167 frag_entry_new->skb = skb;
168 frag_entry_new->no = frag_packet->no;
169
170 /* Select entry in the "chain table" and delete any prior fragments
171 * with another sequence number. batadv_frag_init_chain() returns true,
172 * if the list is empty at return.
173 */
174 chain = &orig_node->fragments[bucket];
175 spin_lock_bh(&chain->lock);
176 if (batadv_frag_init_chain(chain, seqno)) {
177 hlist_add_head(&frag_entry_new->list, &chain->head);
178 chain->size = skb->len - hdr_size;
179 chain->timestamp = jiffies;
Sven Eckelmann53e77142014-12-01 10:37:27 +0100180 chain->total_size = ntohs(frag_packet->total_size);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200181 ret = true;
182 goto out;
183 }
184
185 /* Find the position for the new fragment. */
186 hlist_for_each_entry(frag_entry_curr, &chain->head, list) {
187 /* Drop packet if fragment already exists. */
188 if (frag_entry_curr->no == frag_entry_new->no)
189 goto err_unlock;
190
191 /* Order fragments from highest to lowest. */
192 if (frag_entry_curr->no < frag_entry_new->no) {
193 hlist_add_before(&frag_entry_new->list,
194 &frag_entry_curr->list);
195 chain->size += skb->len - hdr_size;
196 chain->timestamp = jiffies;
197 ret = true;
198 goto out;
199 }
Sven Eckelmannd9124262014-05-26 17:21:39 +0200200
201 /* store current entry because it could be the last in list */
202 frag_entry_last = frag_entry_curr;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200203 }
204
Sven Eckelmannd9124262014-05-26 17:21:39 +0200205 /* Reached the end of the list, so insert after 'frag_entry_last'. */
206 if (likely(frag_entry_last)) {
Sven Eckelmanne050dbe2014-08-15 10:19:39 +0200207 hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200208 chain->size += skb->len - hdr_size;
209 chain->timestamp = jiffies;
210 ret = true;
211 }
212
213out:
214 if (chain->size > batadv_frag_size_limit() ||
Sven Eckelmann53e77142014-12-01 10:37:27 +0100215 chain->total_size != ntohs(frag_packet->total_size) ||
216 chain->total_size > batadv_frag_size_limit()) {
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200217 /* Clear chain if total size of either the list or the packet
Sven Eckelmann53e77142014-12-01 10:37:27 +0100218 * exceeds the maximum size of one merged packet. Don't allow
219 * packets to have different total_size.
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200220 */
221 batadv_frag_clear_chain(&chain->head);
222 chain->size = 0;
223 } else if (ntohs(frag_packet->total_size) == chain->size) {
224 /* All fragments received. Hand over chain to caller. */
225 hlist_move_list(&chain->head, chain_out);
226 chain->size = 0;
227 }
228
229err_unlock:
230 spin_unlock_bh(&chain->lock);
231
232err:
233 if (!ret)
234 kfree(frag_entry_new);
235
236 return ret;
237}
238
239/**
240 * batadv_frag_merge_packets - merge a chain of fragments
241 * @chain: head of chain with fragments
242 * @skb: packet with total size of skb after merging
243 *
244 * Expand the first skb in the chain and copy the content of the remaining
245 * skb's into the expanded one. After doing so, clear the chain.
246 *
247 * Returns the merged skb or NULL on error.
248 */
249static struct sk_buff *
Sven Eckelmann83e8b872014-12-01 10:37:28 +0100250batadv_frag_merge_packets(struct hlist_head *chain)
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200251{
252 struct batadv_frag_packet *packet;
253 struct batadv_frag_list_entry *entry;
254 struct sk_buff *skb_out = NULL;
255 int size, hdr_size = sizeof(struct batadv_frag_packet);
256
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200257 /* Remove first entry, as this is the destination for the rest of the
258 * fragments.
259 */
260 entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
261 hlist_del(&entry->list);
262 skb_out = entry->skb;
263 kfree(entry);
264
Sven Eckelmann83e8b872014-12-01 10:37:28 +0100265 packet = (struct batadv_frag_packet *)skb_out->data;
266 size = ntohs(packet->total_size);
267
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200268 /* Make room for the rest of the fragments. */
Sven Eckelmann5b6698b2014-12-20 13:48:55 +0100269 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200270 kfree_skb(skb_out);
271 skb_out = NULL;
272 goto free;
273 }
274
275 /* Move the existing MAC header to just before the payload. (Override
276 * the fragment header.)
277 */
278 skb_pull_rcsum(skb_out, hdr_size);
279 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
280 skb_set_mac_header(skb_out, -ETH_HLEN);
281 skb_reset_network_header(skb_out);
282 skb_reset_transport_header(skb_out);
283
284 /* Copy the payload of the each fragment into the last skb */
285 hlist_for_each_entry(entry, chain, list) {
286 size = entry->skb->len - hdr_size;
287 memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
288 size);
289 }
290
291free:
292 /* Locking is not needed, because 'chain' is not part of any orig. */
293 batadv_frag_clear_chain(chain);
294 return skb_out;
295}
296
297/**
298 * batadv_frag_skb_buffer - buffer fragment for later merge
299 * @skb: skb to buffer
300 * @orig_node_src: originator that the skb is received from
301 *
302 * Add fragment to buffer and merge fragments if possible.
303 *
304 * There are three possible outcomes: 1) Packet is merged: Return true and
305 * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
306 * to NULL; 3) Error: Return false and leave skb as is.
307 */
308bool batadv_frag_skb_buffer(struct sk_buff **skb,
309 struct batadv_orig_node *orig_node_src)
310{
311 struct sk_buff *skb_out = NULL;
312 struct hlist_head head = HLIST_HEAD_INIT;
313 bool ret = false;
314
315 /* Add packet to buffer and table entry if merge is possible. */
316 if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
317 goto out_err;
318
319 /* Leave if more fragments are needed to merge. */
320 if (hlist_empty(&head))
321 goto out;
322
Sven Eckelmann83e8b872014-12-01 10:37:28 +0100323 skb_out = batadv_frag_merge_packets(&head);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200324 if (!skb_out)
325 goto out_err;
326
327out:
328 *skb = skb_out;
329 ret = true;
330out_err:
331 return ret;
332}
333
334/**
335 * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
336 * @skb: skb to forward
337 * @recv_if: interface that the skb is received on
338 * @orig_node_src: originator that the skb is received from
339 *
340 * Look up the next-hop of the fragments payload and check if the merged packet
341 * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
342 * without merging it.
343 *
344 * Returns true if the fragment is consumed/forwarded, false otherwise.
345 */
346bool batadv_frag_skb_fwd(struct sk_buff *skb,
347 struct batadv_hard_iface *recv_if,
348 struct batadv_orig_node *orig_node_src)
349{
350 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
351 struct batadv_orig_node *orig_node_dst = NULL;
352 struct batadv_neigh_node *neigh_node = NULL;
353 struct batadv_frag_packet *packet;
354 uint16_t total_size;
355 bool ret = false;
356
357 packet = (struct batadv_frag_packet *)skb->data;
358 orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
359 if (!orig_node_dst)
360 goto out;
361
362 neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
363 if (!neigh_node)
364 goto out;
365
366 /* Forward the fragment, if the merged packet would be too big to
367 * be assembled.
368 */
369 total_size = ntohs(packet->total_size);
370 if (total_size > neigh_node->if_incoming->net_dev->mtu) {
371 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
372 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
373 skb->len + ETH_HLEN);
374
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100375 packet->ttl--;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200376 batadv_send_skb_packet(skb, neigh_node->if_incoming,
377 neigh_node->addr);
378 ret = true;
379 }
380
381out:
382 if (orig_node_dst)
383 batadv_orig_node_free_ref(orig_node_dst);
384 if (neigh_node)
385 batadv_neigh_node_free_ref(neigh_node);
386 return ret;
387}
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200388
389/**
390 * batadv_frag_create - create a fragment from skb
391 * @skb: skb to create fragment from
392 * @frag_head: header to use in new fragment
393 * @mtu: size of new fragment
394 *
395 * Split the passed skb into two fragments: A new one with size matching the
396 * passed mtu and the old one with the rest. The new skb contains data from the
397 * tail of the old skb.
398 *
399 * Returns the new fragment, NULL on error.
400 */
401static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
402 struct batadv_frag_packet *frag_head,
403 unsigned int mtu)
404{
405 struct sk_buff *skb_fragment;
406 unsigned header_size = sizeof(*frag_head);
407 unsigned fragment_size = mtu - header_size;
408
409 skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
410 if (!skb_fragment)
411 goto err;
412
413 skb->priority = TC_PRIO_CONTROL;
414
415 /* Eat the last mtu-bytes of the skb */
416 skb_reserve(skb_fragment, header_size + ETH_HLEN);
417 skb_split(skb, skb_fragment, skb->len - fragment_size);
418
419 /* Add the header */
420 skb_push(skb_fragment, header_size);
421 memcpy(skb_fragment->data, frag_head, header_size);
422
423err:
424 return skb_fragment;
425}
426
427/**
428 * batadv_frag_send_packet - create up to 16 fragments from the passed skb
429 * @skb: skb to create fragments from
430 * @orig_node: final destination of the created fragments
431 * @neigh_node: next-hop of the created fragments
432 *
433 * Returns true on success, false otherwise.
434 */
435bool batadv_frag_send_packet(struct sk_buff *skb,
436 struct batadv_orig_node *orig_node,
437 struct batadv_neigh_node *neigh_node)
438{
439 struct batadv_priv *bat_priv;
Antonio Quartullibe181012014-04-23 14:05:16 +0200440 struct batadv_hard_iface *primary_if = NULL;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200441 struct batadv_frag_packet frag_header;
442 struct sk_buff *skb_fragment;
443 unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
444 unsigned header_size = sizeof(frag_header);
445 unsigned max_fragment_size, max_packet_size;
Antonio Quartullibe181012014-04-23 14:05:16 +0200446 bool ret = false;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200447
448 /* To avoid merge and refragmentation at next-hops we never send
449 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
450 */
451 mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
Sven Eckelmann0402e442014-12-20 13:48:56 +0100452 max_fragment_size = mtu - header_size;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200453 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
454
455 /* Don't even try to fragment, if we need more than 16 fragments */
456 if (skb->len > max_packet_size)
457 goto out_err;
458
459 bat_priv = orig_node->bat_priv;
460 primary_if = batadv_primary_if_get_selected(bat_priv);
461 if (!primary_if)
462 goto out_err;
463
464 /* Create one header to be copied to all fragments */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100465 frag_header.packet_type = BATADV_UNICAST_FRAG;
466 frag_header.version = BATADV_COMPAT_VERSION;
467 frag_header.ttl = BATADV_TTL;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200468 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
469 frag_header.reserved = 0;
470 frag_header.no = 0;
471 frag_header.total_size = htons(skb->len);
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100472 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
473 ether_addr_copy(frag_header.dest, orig_node->orig);
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200474
475 /* Eat and send fragments from the tail of skb */
476 while (skb->len > max_fragment_size) {
477 skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
478 if (!skb_fragment)
479 goto out_err;
480
481 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
482 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
483 skb_fragment->len + ETH_HLEN);
484 batadv_send_skb_packet(skb_fragment, neigh_node->if_incoming,
485 neigh_node->addr);
486 frag_header.no++;
487
488 /* The initial check in this function should cover this case */
489 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)
490 goto out_err;
491 }
492
493 /* Make room for the fragment header. */
494 if (batadv_skb_head_push(skb, header_size) < 0 ||
495 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
496 goto out_err;
497
498 memcpy(skb->data, &frag_header, header_size);
499
500 /* Send the last fragment */
501 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
502 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
503 skb->len + ETH_HLEN);
504 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
505
Antonio Quartullibe181012014-04-23 14:05:16 +0200506 ret = true;
507
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200508out_err:
Antonio Quartullibe181012014-04-23 14:05:16 +0200509 if (primary_if)
510 batadv_hardif_free_ref(primary_if);
511
512 return ret;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200513}