blob: fef21f75892e46041cf8dc83a61dde691fa3686a [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +02002 *
3 * Martin Hundebøll <martin@hundeboll.net>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020016 */
17
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020018#include "fragmentation.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
23#include <linux/etherdevice.h>
24#include <linux/fs.h>
25#include <linux/if_ether.h>
26#include <linux/jiffies.h>
27#include <linux/kernel.h>
Sven Eckelmann5274cd62015-06-21 14:45:15 +020028#include <linux/lockdep.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020029#include <linux/netdevice.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020030#include <linux/skbuff.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <linux/string.h>
34
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020035#include "hard-interface.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020036#include "originator.h"
37#include "packet.h"
38#include "routing.h"
39#include "send.h"
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020040#include "soft-interface.h"
41
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020042/**
43 * batadv_frag_clear_chain - delete entries in the fragment buffer chain
44 * @head: head of chain with entries.
45 *
46 * Free fragments in the passed hlist. Should be called with appropriate lock.
47 */
48static void batadv_frag_clear_chain(struct hlist_head *head)
49{
50 struct batadv_frag_list_entry *entry;
51 struct hlist_node *node;
52
53 hlist_for_each_entry_safe(entry, node, head, list) {
54 hlist_del(&entry->list);
55 kfree_skb(entry->skb);
56 kfree(entry);
57 }
58}
59
60/**
61 * batadv_frag_purge_orig - free fragments associated to an orig
62 * @orig_node: originator to free fragments from
63 * @check_cb: optional function to tell if an entry should be purged
64 */
65void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
66 bool (*check_cb)(struct batadv_frag_table_entry *))
67{
68 struct batadv_frag_table_entry *chain;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +020069 u8 i;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020070
71 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
72 chain = &orig_node->fragments[i];
Sven Eckelmann01f6b5c2015-08-26 10:31:50 +020073 spin_lock_bh(&chain->lock);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020074
75 if (!check_cb || check_cb(chain)) {
Sven Eckelmann01f6b5c2015-08-26 10:31:50 +020076 batadv_frag_clear_chain(&chain->head);
77 chain->size = 0;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020078 }
79
Sven Eckelmann01f6b5c2015-08-26 10:31:50 +020080 spin_unlock_bh(&chain->lock);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020081 }
82}
83
84/**
85 * batadv_frag_size_limit - maximum possible size of packet to be fragmented
86 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +020087 * Return: the maximum size of payload that can be fragmented.
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +020088 */
89static int batadv_frag_size_limit(void)
90{
91 int limit = BATADV_FRAG_MAX_FRAG_SIZE;
92
93 limit -= sizeof(struct batadv_frag_packet);
94 limit *= BATADV_FRAG_MAX_FRAGMENTS;
95
96 return limit;
97}
98
99/**
100 * batadv_frag_init_chain - check and prepare fragment chain for new fragment
101 * @chain: chain in fragments table to init
102 * @seqno: sequence number of the received fragment
103 *
104 * Make chain ready for a fragment with sequence number "seqno". Delete existing
105 * entries if they have an "old" sequence number.
106 *
107 * Caller must hold chain->lock.
108 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200109 * Return: true if chain is empty and caller can just insert the new fragment
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200110 * without searching for the right position.
111 */
112static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200113 u16 seqno)
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200114{
Sven Eckelmann5274cd62015-06-21 14:45:15 +0200115 lockdep_assert_held(&chain->lock);
116
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200117 if (chain->seqno == seqno)
118 return false;
119
120 if (!hlist_empty(&chain->head))
121 batadv_frag_clear_chain(&chain->head);
122
123 chain->size = 0;
124 chain->seqno = seqno;
125
126 return true;
127}
128
129/**
130 * batadv_frag_insert_packet - insert a fragment into a fragment chain
131 * @orig_node: originator that the fragment was received from
132 * @skb: skb to insert
133 * @chain_out: list head to attach complete chains of fragments to
134 *
135 * Insert a new fragment into the reverse ordered chain in the right table
136 * entry. The hash table entry is cleared if "old" fragments exist in it.
137 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200138 * Return: true if skb is buffered, false on error. If the chain has all the
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200139 * fragments needed to merge the packet, the chain is moved to the passed head
140 * to avoid locking the chain in the table.
141 */
142static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
143 struct sk_buff *skb,
144 struct hlist_head *chain_out)
145{
146 struct batadv_frag_table_entry *chain;
147 struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
Sven Eckelmannd9124262014-05-26 17:21:39 +0200148 struct batadv_frag_list_entry *frag_entry_last = NULL;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200149 struct batadv_frag_packet *frag_packet;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200150 u8 bucket;
151 u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200152 bool ret = false;
153
154 /* Linearize packet to avoid linearizing 16 packets in a row when doing
155 * the later merge. Non-linear merge should be added to remove this
156 * linearization.
157 */
158 if (skb_linearize(skb) < 0)
159 goto err;
160
161 frag_packet = (struct batadv_frag_packet *)skb->data;
162 seqno = ntohs(frag_packet->seqno);
163 bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
164
165 frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
166 if (!frag_entry_new)
167 goto err;
168
169 frag_entry_new->skb = skb;
170 frag_entry_new->no = frag_packet->no;
171
172 /* Select entry in the "chain table" and delete any prior fragments
173 * with another sequence number. batadv_frag_init_chain() returns true,
174 * if the list is empty at return.
175 */
176 chain = &orig_node->fragments[bucket];
177 spin_lock_bh(&chain->lock);
178 if (batadv_frag_init_chain(chain, seqno)) {
179 hlist_add_head(&frag_entry_new->list, &chain->head);
180 chain->size = skb->len - hdr_size;
181 chain->timestamp = jiffies;
Sven Eckelmann53e77142014-12-01 10:37:27 +0100182 chain->total_size = ntohs(frag_packet->total_size);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200183 ret = true;
184 goto out;
185 }
186
187 /* Find the position for the new fragment. */
188 hlist_for_each_entry(frag_entry_curr, &chain->head, list) {
189 /* Drop packet if fragment already exists. */
190 if (frag_entry_curr->no == frag_entry_new->no)
191 goto err_unlock;
192
193 /* Order fragments from highest to lowest. */
194 if (frag_entry_curr->no < frag_entry_new->no) {
195 hlist_add_before(&frag_entry_new->list,
196 &frag_entry_curr->list);
197 chain->size += skb->len - hdr_size;
198 chain->timestamp = jiffies;
199 ret = true;
200 goto out;
201 }
Sven Eckelmannd9124262014-05-26 17:21:39 +0200202
203 /* store current entry because it could be the last in list */
204 frag_entry_last = frag_entry_curr;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200205 }
206
Sven Eckelmannd9124262014-05-26 17:21:39 +0200207 /* Reached the end of the list, so insert after 'frag_entry_last'. */
208 if (likely(frag_entry_last)) {
Sven Eckelmanne050dbe2014-08-15 10:19:39 +0200209 hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200210 chain->size += skb->len - hdr_size;
211 chain->timestamp = jiffies;
212 ret = true;
213 }
214
215out:
216 if (chain->size > batadv_frag_size_limit() ||
Sven Eckelmann53e77142014-12-01 10:37:27 +0100217 chain->total_size != ntohs(frag_packet->total_size) ||
218 chain->total_size > batadv_frag_size_limit()) {
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200219 /* Clear chain if total size of either the list or the packet
Sven Eckelmann53e77142014-12-01 10:37:27 +0100220 * exceeds the maximum size of one merged packet. Don't allow
221 * packets to have different total_size.
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200222 */
223 batadv_frag_clear_chain(&chain->head);
224 chain->size = 0;
225 } else if (ntohs(frag_packet->total_size) == chain->size) {
226 /* All fragments received. Hand over chain to caller. */
227 hlist_move_list(&chain->head, chain_out);
228 chain->size = 0;
229 }
230
231err_unlock:
232 spin_unlock_bh(&chain->lock);
233
234err:
Sven Eckelmann351f7cb2020-03-16 23:30:42 +0100235 if (!ret) {
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200236 kfree(frag_entry_new);
Sven Eckelmann351f7cb2020-03-16 23:30:42 +0100237 kfree_skb(skb);
238 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200239
240 return ret;
241}
242
243/**
244 * batadv_frag_merge_packets - merge a chain of fragments
245 * @chain: head of chain with fragments
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200246 *
247 * Expand the first skb in the chain and copy the content of the remaining
248 * skb's into the expanded one. After doing so, clear the chain.
249 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200250 * Return: the merged skb or NULL on error.
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200251 */
252static struct sk_buff *
Sven Eckelmann83e8b872014-12-01 10:37:28 +0100253batadv_frag_merge_packets(struct hlist_head *chain)
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200254{
255 struct batadv_frag_packet *packet;
256 struct batadv_frag_list_entry *entry;
257 struct sk_buff *skb_out = NULL;
258 int size, hdr_size = sizeof(struct batadv_frag_packet);
259
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200260 /* Remove first entry, as this is the destination for the rest of the
261 * fragments.
262 */
263 entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
264 hlist_del(&entry->list);
265 skb_out = entry->skb;
266 kfree(entry);
267
Sven Eckelmann83e8b872014-12-01 10:37:28 +0100268 packet = (struct batadv_frag_packet *)skb_out->data;
Sven Eckelmanndc08fb52018-11-07 23:09:12 +0100269 size = ntohs(packet->total_size) + hdr_size;
Sven Eckelmann83e8b872014-12-01 10:37:28 +0100270
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200271 /* Make room for the rest of the fragments. */
Sven Eckelmann5b6698b2014-12-20 13:48:55 +0100272 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200273 kfree_skb(skb_out);
274 skb_out = NULL;
275 goto free;
276 }
277
278 /* Move the existing MAC header to just before the payload. (Override
279 * the fragment header.)
280 */
Matthias Schiffer4cf517f2018-01-23 10:59:50 +0100281 skb_pull(skb_out, hdr_size);
282 skb_out->ip_summed = CHECKSUM_NONE;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200283 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
284 skb_set_mac_header(skb_out, -ETH_HLEN);
285 skb_reset_network_header(skb_out);
286 skb_reset_transport_header(skb_out);
287
288 /* Copy the payload of the each fragment into the last skb */
289 hlist_for_each_entry(entry, chain, list) {
290 size = entry->skb->len - hdr_size;
291 memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
292 size);
293 }
294
295free:
296 /* Locking is not needed, because 'chain' is not part of any orig. */
297 batadv_frag_clear_chain(chain);
298 return skb_out;
299}
300
301/**
302 * batadv_frag_skb_buffer - buffer fragment for later merge
303 * @skb: skb to buffer
304 * @orig_node_src: originator that the skb is received from
305 *
306 * Add fragment to buffer and merge fragments if possible.
307 *
308 * There are three possible outcomes: 1) Packet is merged: Return true and
309 * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
Sven Eckelmann351f7cb2020-03-16 23:30:42 +0100310 * to NULL; 3) Error: Return false and free skb.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200311 *
312 * Return: true when packet is merged or buffered, false when skb is not not
313 * used.
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200314 */
315bool batadv_frag_skb_buffer(struct sk_buff **skb,
316 struct batadv_orig_node *orig_node_src)
317{
318 struct sk_buff *skb_out = NULL;
319 struct hlist_head head = HLIST_HEAD_INIT;
320 bool ret = false;
321
322 /* Add packet to buffer and table entry if merge is possible. */
323 if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
324 goto out_err;
325
326 /* Leave if more fragments are needed to merge. */
327 if (hlist_empty(&head))
328 goto out;
329
Sven Eckelmann83e8b872014-12-01 10:37:28 +0100330 skb_out = batadv_frag_merge_packets(&head);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200331 if (!skb_out)
332 goto out_err;
333
334out:
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200335 ret = true;
336out_err:
Sven Eckelmann351f7cb2020-03-16 23:30:42 +0100337 *skb = skb_out;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200338 return ret;
339}
340
341/**
342 * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
343 * @skb: skb to forward
344 * @recv_if: interface that the skb is received on
345 * @orig_node_src: originator that the skb is received from
346 *
347 * Look up the next-hop of the fragments payload and check if the merged packet
348 * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
349 * without merging it.
350 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200351 * Return: true if the fragment is consumed/forwarded, false otherwise.
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200352 */
353bool batadv_frag_skb_fwd(struct sk_buff *skb,
354 struct batadv_hard_iface *recv_if,
355 struct batadv_orig_node *orig_node_src)
356{
357 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
358 struct batadv_orig_node *orig_node_dst = NULL;
359 struct batadv_neigh_node *neigh_node = NULL;
360 struct batadv_frag_packet *packet;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200361 u16 total_size;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200362 bool ret = false;
363
364 packet = (struct batadv_frag_packet *)skb->data;
365 orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
366 if (!orig_node_dst)
367 goto out;
368
369 neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
370 if (!neigh_node)
371 goto out;
372
373 /* Forward the fragment, if the merged packet would be too big to
374 * be assembled.
375 */
376 total_size = ntohs(packet->total_size);
377 if (total_size > neigh_node->if_incoming->net_dev->mtu) {
378 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
379 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
380 skb->len + ETH_HLEN);
381
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100382 packet->ttl--;
Antonio Quartulli95d39272016-01-16 16:40:15 +0800383 batadv_send_unicast_skb(skb, neigh_node);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200384 ret = true;
385 }
386
387out:
388 if (orig_node_dst)
Sven Eckelmann5d967312016-01-17 11:01:09 +0100389 batadv_orig_node_put(orig_node_dst);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200390 if (neigh_node)
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100391 batadv_neigh_node_put(neigh_node);
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200392 return ret;
393}
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200394
395/**
396 * batadv_frag_create - create a fragment from skb
397 * @skb: skb to create fragment from
398 * @frag_head: header to use in new fragment
399 * @mtu: size of new fragment
400 *
401 * Split the passed skb into two fragments: A new one with size matching the
402 * passed mtu and the old one with the rest. The new skb contains data from the
403 * tail of the old skb.
404 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200405 * Return: the new fragment, NULL on error.
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200406 */
407static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
408 struct batadv_frag_packet *frag_head,
409 unsigned int mtu)
410{
411 struct sk_buff *skb_fragment;
Sven Eckelmannd3abce72016-03-09 22:22:51 +0100412 unsigned int header_size = sizeof(*frag_head);
413 unsigned int fragment_size = mtu - header_size;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200414
415 skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
416 if (!skb_fragment)
417 goto err;
418
Andrew Lunn19148482016-05-09 20:03:35 +0200419 skb_fragment->priority = skb->priority;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200420
421 /* Eat the last mtu-bytes of the skb */
422 skb_reserve(skb_fragment, header_size + ETH_HLEN);
423 skb_split(skb, skb_fragment, skb->len - fragment_size);
424
425 /* Add the header */
426 skb_push(skb_fragment, header_size);
427 memcpy(skb_fragment->data, frag_head, header_size);
428
429err:
430 return skb_fragment;
431}
432
433/**
434 * batadv_frag_send_packet - create up to 16 fragments from the passed skb
435 * @skb: skb to create fragments from
436 * @orig_node: final destination of the created fragments
437 * @neigh_node: next-hop of the created fragments
438 *
Antonio Quartullif50ca952016-05-18 11:38:48 +0200439 * Return: the netdev tx status or -1 in case of error.
440 * When -1 is returned the skb is not consumed.
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200441 */
Antonio Quartullif50ca952016-05-18 11:38:48 +0200442int batadv_frag_send_packet(struct sk_buff *skb,
443 struct batadv_orig_node *orig_node,
444 struct batadv_neigh_node *neigh_node)
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200445{
446 struct batadv_priv *bat_priv;
Antonio Quartullibe181012014-04-23 14:05:16 +0200447 struct batadv_hard_iface *primary_if = NULL;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200448 struct batadv_frag_packet frag_header;
449 struct sk_buff *skb_fragment;
Sven Eckelmannd3abce72016-03-09 22:22:51 +0100450 unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
451 unsigned int header_size = sizeof(frag_header);
452 unsigned int max_fragment_size, max_packet_size;
Antonio Quartullif50ca952016-05-18 11:38:48 +0200453 int ret = -1;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200454
455 /* To avoid merge and refragmentation at next-hops we never send
456 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
457 */
Sven Eckelmannd3abce72016-03-09 22:22:51 +0100458 mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
Sven Eckelmann0402e442014-12-20 13:48:56 +0100459 max_fragment_size = mtu - header_size;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200460 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
461
462 /* Don't even try to fragment, if we need more than 16 fragments */
463 if (skb->len > max_packet_size)
Antonio Quartullif50ca952016-05-18 11:38:48 +0200464 goto out;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200465
466 bat_priv = orig_node->bat_priv;
467 primary_if = batadv_primary_if_get_selected(bat_priv);
468 if (!primary_if)
Antonio Quartullif50ca952016-05-18 11:38:48 +0200469 goto out;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200470
471 /* Create one header to be copied to all fragments */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100472 frag_header.packet_type = BATADV_UNICAST_FRAG;
473 frag_header.version = BATADV_COMPAT_VERSION;
474 frag_header.ttl = BATADV_TTL;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200475 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
476 frag_header.reserved = 0;
477 frag_header.no = 0;
478 frag_header.total_size = htons(skb->len);
Andrew Lunnc0f25c82016-05-09 20:03:36 +0200479
480 /* skb->priority values from 256->263 are magic values to
481 * directly indicate a specific 802.1d priority. This is used
482 * to allow 802.1d priority to be passed directly in from VLAN
483 * tags, etc.
484 */
485 if (skb->priority >= 256 && skb->priority <= 263)
486 frag_header.priority = skb->priority - 256;
Sven Eckelmannea40bc52020-03-16 23:30:50 +0100487 else
488 frag_header.priority = 0;
Andrew Lunnc0f25c82016-05-09 20:03:36 +0200489
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100490 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
491 ether_addr_copy(frag_header.dest, orig_node->orig);
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200492
493 /* Eat and send fragments from the tail of skb */
494 while (skb->len > max_fragment_size) {
Linus Lüssinge1244b82020-03-16 23:30:43 +0100495 /* The initial check in this function should cover this case */
496 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
497 ret = -1;
498 goto out;
499 }
500
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200501 skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
502 if (!skb_fragment)
Antonio Quartullif50ca952016-05-18 11:38:48 +0200503 goto out;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200504
505 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
506 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
507 skb_fragment->len + ETH_HLEN);
Antonio Quartullif50ca952016-05-18 11:38:48 +0200508 ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
509 if (ret != NET_XMIT_SUCCESS) {
510 /* return -1 so that the caller can free the original
511 * skb
512 */
513 ret = -1;
514 goto out;
515 }
516
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200517 frag_header.no++;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200518 }
519
520 /* Make room for the fragment header. */
521 if (batadv_skb_head_push(skb, header_size) < 0 ||
522 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
Antonio Quartullif50ca952016-05-18 11:38:48 +0200523 goto out;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200524
525 memcpy(skb->data, &frag_header, header_size);
526
527 /* Send the last fragment */
528 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
529 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
530 skb->len + ETH_HLEN);
Antonio Quartullif50ca952016-05-18 11:38:48 +0200531 ret = batadv_send_unicast_skb(skb, neigh_node);
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200532
Antonio Quartullif50ca952016-05-18 11:38:48 +0200533out:
Antonio Quartullibe181012014-04-23 14:05:16 +0200534 if (primary_if)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100535 batadv_hardif_put(primary_if);
Antonio Quartullibe181012014-04-23 14:05:16 +0200536
537 return ret;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200538}