blob: dc8b5d4dd636d3d5ed982a5d93c2783f203ac4f5 [file] [log] [blame]
Antonio Quartulli0b873932013-01-04 03:05:31 +01001/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Andreas Langer
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018 */
19
20#include "main.h"
21#include "unicast.h"
22#include "send.h"
23#include "soft-interface.h"
24#include "gateway_client.h"
25#include "originator.h"
26#include "hash.h"
27#include "translation-table.h"
28#include "routing.h"
29#include "hard-interface.h"
30
31
Sven Eckelmann03544402012-05-16 20:23:17 +020032static struct sk_buff *
33batadv_frag_merge_packet(struct list_head *head,
Sven Eckelmann56303d32012-06-05 22:31:31 +020034 struct batadv_frag_packet_list_entry *tfp,
Sven Eckelmann03544402012-05-16 20:23:17 +020035 struct sk_buff *skb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000036{
Sven Eckelmann96412692012-06-05 22:31:30 +020037 struct batadv_unicast_frag_packet *up;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000038 struct sk_buff *tmp_skb;
Sven Eckelmann96412692012-06-05 22:31:30 +020039 struct batadv_unicast_packet *unicast_packet;
Sven Eckelmann704509b2011-05-14 23:14:54 +020040 int hdr_len = sizeof(*unicast_packet);
41 int uni_diff = sizeof(*up) - hdr_len;
Sven Eckelmannc67893d2012-07-08 18:33:51 +020042 uint8_t *packet_pos;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000043
Sven Eckelmann96412692012-06-05 22:31:30 +020044 up = (struct batadv_unicast_frag_packet *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000045 /* set skb to the first part and tmp_skb to the second part */
Sven Eckelmannacd34af2012-06-03 22:19:21 +020046 if (up->flags & BATADV_UNI_FRAG_HEAD) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000047 tmp_skb = tfp->skb;
48 } else {
49 tmp_skb = skb;
50 skb = tfp->skb;
51 }
52
Sven Eckelmann531c9da2011-02-06 23:26:43 +000053 if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
54 goto err;
55
Sven Eckelmann704509b2011-05-14 23:14:54 +020056 skb_pull(tmp_skb, sizeof(*up));
Sven Eckelmann531c9da2011-02-06 23:26:43 +000057 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
58 goto err;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000059
60 /* move free entry to end */
61 tfp->skb = NULL;
62 tfp->seqno = 0;
63 list_move_tail(&tfp->list, head);
64
65 memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
66 kfree_skb(tmp_skb);
67
68 memmove(skb->data + uni_diff, skb->data, hdr_len);
Sven Eckelmannc67893d2012-07-08 18:33:51 +020069 packet_pos = skb_pull(skb, uni_diff);
70 unicast_packet = (struct batadv_unicast_packet *)packet_pos;
Sven Eckelmannacd34af2012-06-03 22:19:21 +020071 unicast_packet->header.packet_type = BATADV_UNICAST;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000072
73 return skb;
Sven Eckelmann531c9da2011-02-06 23:26:43 +000074
75err:
76 /* free buffered skb, skb will be freed later */
77 kfree_skb(tfp->skb);
78 return NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000079}
80
Sven Eckelmann03544402012-05-16 20:23:17 +020081static void batadv_frag_create_entry(struct list_head *head,
82 struct sk_buff *skb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000083{
Sven Eckelmann56303d32012-06-05 22:31:31 +020084 struct batadv_frag_packet_list_entry *tfp;
Sven Eckelmann96412692012-06-05 22:31:30 +020085 struct batadv_unicast_frag_packet *up;
86
87 up = (struct batadv_unicast_frag_packet *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000088
89 /* free and oldest packets stand at the end */
90 tfp = list_entry((head)->prev, typeof(*tfp), list);
91 kfree_skb(tfp->skb);
92
93 tfp->seqno = ntohs(up->seqno);
94 tfp->skb = skb;
95 list_move(&tfp->list, head);
96 return;
97}
98
Sven Eckelmann03544402012-05-16 20:23:17 +020099static int batadv_frag_create_buffer(struct list_head *head)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000100{
101 int i;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200102 struct batadv_frag_packet_list_entry *tfp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000103
Sven Eckelmann4d5d2db82012-06-03 22:19:15 +0200104 for (i = 0; i < BATADV_FRAG_BUFFER_SIZE; i++) {
Sven Eckelmann704509b2011-05-14 23:14:54 +0200105 tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000106 if (!tfp) {
Sven Eckelmann88ed1e772012-05-12 02:09:40 +0200107 batadv_frag_list_free(head);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000108 return -ENOMEM;
109 }
110 tfp->skb = NULL;
111 tfp->seqno = 0;
112 INIT_LIST_HEAD(&tfp->list);
113 list_add(&tfp->list, head);
114 }
115
116 return 0;
117}
118
Sven Eckelmann56303d32012-06-05 22:31:31 +0200119static struct batadv_frag_packet_list_entry *
Sven Eckelmann03544402012-05-16 20:23:17 +0200120batadv_frag_search_packet(struct list_head *head,
Sven Eckelmann96412692012-06-05 22:31:30 +0200121 const struct batadv_unicast_frag_packet *up)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000122{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200123 struct batadv_frag_packet_list_entry *tfp;
Sven Eckelmann96412692012-06-05 22:31:30 +0200124 struct batadv_unicast_frag_packet *tmp_up = NULL;
Antonio Quartullic1d07432013-01-15 22:17:19 +1000125 bool is_head_tmp, is_head;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000126 uint16_t search_seqno;
127
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200128 if (up->flags & BATADV_UNI_FRAG_HEAD)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000129 search_seqno = ntohs(up->seqno)+1;
130 else
131 search_seqno = ntohs(up->seqno)-1;
132
Antonio Quartullic1d07432013-01-15 22:17:19 +1000133 is_head = up->flags & BATADV_UNI_FRAG_HEAD;
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200134
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000135 list_for_each_entry(tfp, head, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000136 if (!tfp->skb)
137 continue;
138
139 if (tfp->seqno == ntohs(up->seqno))
140 goto mov_tail;
141
Sven Eckelmann96412692012-06-05 22:31:30 +0200142 tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000143
144 if (tfp->seqno == search_seqno) {
Antonio Quartullic1d07432013-01-15 22:17:19 +1000145 is_head_tmp = tmp_up->flags & BATADV_UNI_FRAG_HEAD;
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200146 if (is_head_tmp != is_head)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000147 return tfp;
148 else
149 goto mov_tail;
150 }
151 }
152 return NULL;
153
154mov_tail:
155 list_move_tail(&tfp->list, head);
156 return NULL;
157}
158
Sven Eckelmann88ed1e772012-05-12 02:09:40 +0200159void batadv_frag_list_free(struct list_head *head)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000160{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200161 struct batadv_frag_packet_list_entry *pf, *tmp_pf;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000162
163 if (!list_empty(head)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000164 list_for_each_entry_safe(pf, tmp_pf, head, list) {
165 kfree_skb(pf->skb);
166 list_del(&pf->list);
167 kfree(pf);
168 }
169 }
170 return;
171}
172
173/* frag_reassemble_skb():
174 * returns NET_RX_DROP if the operation failed - skb is left intact
175 * returns NET_RX_SUCCESS if the fragment was buffered (skb_new will be NULL)
176 * or the skb could be reassembled (skb_new will point to the new packet and
177 * skb was freed)
178 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200179int batadv_frag_reassemble_skb(struct sk_buff *skb,
180 struct batadv_priv *bat_priv,
Sven Eckelmann88ed1e772012-05-12 02:09:40 +0200181 struct sk_buff **new_skb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000182{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200183 struct batadv_orig_node *orig_node;
184 struct batadv_frag_packet_list_entry *tmp_frag_entry;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000185 int ret = NET_RX_DROP;
Sven Eckelmann96412692012-06-05 22:31:30 +0200186 struct batadv_unicast_frag_packet *unicast_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000187
Sven Eckelmann96412692012-06-05 22:31:30 +0200188 unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000189 *new_skb = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190
Sven Eckelmannda641192012-05-12 13:48:56 +0200191 orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->orig);
Marek Lindner7aadf882011-02-18 12:28:09 +0000192 if (!orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000193 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000194
195 orig_node->last_frag_packet = jiffies;
196
197 if (list_empty(&orig_node->frag_list) &&
Sven Eckelmann03544402012-05-16 20:23:17 +0200198 batadv_frag_create_buffer(&orig_node->frag_list)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000199 pr_debug("couldn't create frag buffer\n");
200 goto out;
201 }
202
Sven Eckelmann03544402012-05-16 20:23:17 +0200203 tmp_frag_entry = batadv_frag_search_packet(&orig_node->frag_list,
204 unicast_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000205
206 if (!tmp_frag_entry) {
Sven Eckelmann03544402012-05-16 20:23:17 +0200207 batadv_frag_create_entry(&orig_node->frag_list, skb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000208 ret = NET_RX_SUCCESS;
209 goto out;
210 }
211
Sven Eckelmann03544402012-05-16 20:23:17 +0200212 *new_skb = batadv_frag_merge_packet(&orig_node->frag_list,
213 tmp_frag_entry, skb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000214 /* if not, merge failed */
215 if (*new_skb)
216 ret = NET_RX_SUCCESS;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000217
Marek Lindner7aadf882011-02-18 12:28:09 +0000218out:
219 if (orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200220 batadv_orig_node_free_ref(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000221 return ret;
222}
223
Sven Eckelmann56303d32012-06-05 22:31:31 +0200224int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
225 struct batadv_hard_iface *hard_iface,
226 const uint8_t dstaddr[])
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000227{
Sven Eckelmann96412692012-06-05 22:31:30 +0200228 struct batadv_unicast_packet tmp_uc, *unicast_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200229 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000230 struct sk_buff *frag_skb;
Sven Eckelmann96412692012-06-05 22:31:30 +0200231 struct batadv_unicast_frag_packet *frag1, *frag2;
Sven Eckelmann704509b2011-05-14 23:14:54 +0200232 int uc_hdr_len = sizeof(*unicast_packet);
233 int ucf_hdr_len = sizeof(*frag1);
Sven Eckelmann5c77d8b2011-01-25 21:59:26 +0000234 int data_len = skb->len - uc_hdr_len;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200235 int large_tail = 0, ret = NET_RX_DROP;
Sven Eckelmannc2f7f0e2011-02-10 14:33:56 +0000236 uint16_t seqno;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000237
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200238 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200239 if (!primary_if)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000240 goto dropped;
241
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000242 frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
Jesper Juhled7809d2011-01-13 21:53:38 +0100243 if (!frag_skb)
244 goto dropped;
Sven Eckelmann5c77d8b2011-01-25 21:59:26 +0000245 skb_reserve(frag_skb, ucf_hdr_len);
Jesper Juhled7809d2011-01-13 21:53:38 +0100246
Sven Eckelmann96412692012-06-05 22:31:30 +0200247 unicast_packet = (struct batadv_unicast_packet *)skb->data;
Jesper Juhled7809d2011-01-13 21:53:38 +0100248 memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
Sven Eckelmann5c77d8b2011-01-25 21:59:26 +0000249 skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000250
Sven Eckelmann04b482a2012-05-12 02:09:38 +0200251 if (batadv_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
252 batadv_skb_head_push(frag_skb, ucf_hdr_len) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000253 goto drop_frag;
254
Sven Eckelmann96412692012-06-05 22:31:30 +0200255 frag1 = (struct batadv_unicast_frag_packet *)skb->data;
256 frag2 = (struct batadv_unicast_frag_packet *)frag_skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000257
Sven Eckelmann704509b2011-05-14 23:14:54 +0200258 memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000259
Sven Eckelmann76543d12011-11-20 15:47:38 +0100260 frag1->header.ttl--;
Sven Eckelmann7e071c72012-06-03 22:19:13 +0200261 frag1->header.version = BATADV_COMPAT_VERSION;
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200262 frag1->header.packet_type = BATADV_UNICAST_FRAG;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000263
Marek Lindner32ae9b22011-04-20 15:40:58 +0200264 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
Sven Eckelmann704509b2011-05-14 23:14:54 +0200265 memcpy(frag2, frag1, sizeof(*frag2));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000266
Sven Eckelmannae361ce2011-01-25 22:02:31 +0000267 if (data_len & 1)
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200268 large_tail = BATADV_UNI_FRAG_LARGETAIL;
Sven Eckelmannae361ce2011-01-25 22:02:31 +0000269
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200270 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
Sven Eckelmannae361ce2011-01-25 22:02:31 +0000271 frag2->flags = large_tail;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000272
Marek Lindnere6c10f42011-02-18 12:33:20 +0000273 seqno = atomic_add_return(2, &hard_iface->frag_seqno);
Sven Eckelmannc2f7f0e2011-02-10 14:33:56 +0000274 frag1->seqno = htons(seqno - 1);
275 frag2->seqno = htons(seqno);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000276
Sven Eckelmann9455e342012-05-12 02:09:37 +0200277 batadv_send_skb_packet(skb, hard_iface, dstaddr);
278 batadv_send_skb_packet(frag_skb, hard_iface, dstaddr);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200279 ret = NET_RX_SUCCESS;
280 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000281
282drop_frag:
283 kfree_skb(frag_skb);
284dropped:
285 kfree_skb(skb);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200286out:
287 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200288 batadv_hardif_free_ref(primary_if);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200289 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000290}
291
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200292/**
293 * batadv_unicast_push_and_fill_skb - extends the buffer and initializes the
294 * common fields for unicast packets
295 * @skb: packet
296 * @hdr_size: amount of bytes to push at the beginning of the skb
297 * @orig_node: the destination node
298 *
299 * Returns false if the buffer extension was not possible or true otherwise
300 */
301static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
302 struct batadv_orig_node *orig_node)
303{
304 struct batadv_unicast_packet *unicast_packet;
305 uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
306
307 if (batadv_skb_head_push(skb, hdr_size) < 0)
308 return false;
309
310 unicast_packet = (struct batadv_unicast_packet *)skb->data;
311 unicast_packet->header.version = BATADV_COMPAT_VERSION;
312 /* batman packet type: unicast */
313 unicast_packet->header.packet_type = BATADV_UNICAST;
314 /* set unicast ttl */
315 unicast_packet->header.ttl = BATADV_TTL;
316 /* copy the destination for faster routing */
317 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
318 /* set the destination tt version number */
319 unicast_packet->ttvn = ttvn;
320
321 return true;
322}
323
324/**
325 * batadv_unicast_prepare_skb - encapsulate an skb with a unicast header
326 * @skb: the skb containing the payload to encapsulate
327 * @orig_node: the destination node
328 *
329 * Returns false if the payload could not be encapsulated or true otherwise
330 */
331static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
332 struct batadv_orig_node *orig_node)
333{
334 size_t uni_size = sizeof(struct batadv_unicast_packet);
335 return batadv_unicast_push_and_fill_skb(skb, uni_size, orig_node);
336}
337
338/**
339 * batadv_unicast_4addr_prepare_skb - encapsulate an skb with a unicast4addr
340 * header
341 * @bat_priv: the bat priv with all the soft interface information
342 * @skb: the skb containing the payload to encapsulate
343 * @orig_node: the destination node
344 * @packet_subtype: the batman 4addr packet subtype to use
345 *
346 * Returns false if the payload could not be encapsulated or true otherwise
347 */
Antonio Quartulli785ea112011-11-23 11:35:44 +0100348bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
349 struct sk_buff *skb,
350 struct batadv_orig_node *orig,
351 int packet_subtype)
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200352{
353 struct batadv_hard_iface *primary_if;
354 struct batadv_unicast_4addr_packet *unicast_4addr_packet;
355 bool ret = false;
356
357 primary_if = batadv_primary_if_get_selected(bat_priv);
358 if (!primary_if)
359 goto out;
360
361 /* pull the header space and fill the unicast_packet substructure.
362 * We can do that because the first member of the unicast_4addr_packet
363 * is of type struct unicast_packet
364 */
365 if (!batadv_unicast_push_and_fill_skb(skb,
366 sizeof(*unicast_4addr_packet),
367 orig))
368 goto out;
369
370 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
371 unicast_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
372 memcpy(unicast_4addr_packet->src, primary_if->net_dev->dev_addr,
373 ETH_ALEN);
374 unicast_4addr_packet->subtype = packet_subtype;
375 unicast_4addr_packet->reserved = 0;
376
377 ret = true;
378out:
379 if (primary_if)
380 batadv_hardif_free_ref(primary_if);
381 return ret;
382}
383
384/**
385 * batadv_unicast_generic_send_skb - send an skb as unicast
386 * @bat_priv: the bat priv with all the soft interface information
387 * @skb: payload to send
388 * @packet_type: the batman unicast packet type to use
389 * @packet_subtype: the batman packet subtype. It is ignored if packet_type is
390 * not BATADV_UNICAT_4ADDR
391 *
392 * Returns 1 in case of error or 0 otherwise
393 */
394int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
395 struct sk_buff *skb, int packet_type,
396 int packet_subtype)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000397{
398 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
Sven Eckelmann96412692012-06-05 22:31:30 +0200399 struct batadv_unicast_packet *unicast_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200400 struct batadv_orig_node *orig_node;
401 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000402 int data_len = skb->len;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200403 int ret = NET_RX_DROP;
Sven Eckelmann0aca2362012-06-19 20:26:30 +0200404 unsigned int dev_mtu;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000405
406 /* get routing information */
Linus Lüssing43c70ad2011-02-13 21:13:04 +0000407 if (is_multicast_ether_addr(ethhdr->h_dest)) {
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +0200408 orig_node = batadv_gw_get_selected_orig(bat_priv);
Linus Lüssing43c70ad2011-02-13 21:13:04 +0000409 if (orig_node)
Marek Lindner44524fc2011-02-10 14:33:53 +0000410 goto find_router;
411 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000412
Antonio Quartulli3d393e42011-07-07 15:35:37 +0200413 /* check for tt host - increases orig_node refcount.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200414 * returns NULL in case of AP isolation
415 */
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200416 orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
417 ethhdr->h_dest);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200418
Marek Lindner44524fc2011-02-10 14:33:53 +0000419find_router:
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200420 /* find_router():
Marek Lindnerd0072602011-01-19 20:01:44 +0000421 * - if orig_node is NULL it returns NULL
422 * - increases neigh_nodes refcount if found.
423 */
Sven Eckelmann30d3c512012-05-12 02:09:36 +0200424 neigh_node = batadv_find_router(bat_priv, orig_node, NULL);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200425
Marek Lindner44524fc2011-02-10 14:33:53 +0000426 if (!neigh_node)
Marek Lindnerd0072602011-01-19 20:01:44 +0000427 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000428
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200429 switch (packet_type) {
430 case BATADV_UNICAST:
431 batadv_unicast_prepare_skb(skb, orig_node);
432 break;
433 case BATADV_UNICAST_4ADDR:
434 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
435 packet_subtype);
436 break;
437 default:
438 /* this function supports UNICAST and UNICAST_4ADDR only. It
439 * should never be invoked with any other packet type
440 */
Marek Lindnerd0072602011-01-19 20:01:44 +0000441 goto out;
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200442 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000443
Sven Eckelmann96412692012-06-05 22:31:30 +0200444 unicast_packet = (struct batadv_unicast_packet *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000445
Antonio Quartulli3275e7c2012-03-16 18:03:28 +0100446 /* inform the destination node that we are still missing a correct route
447 * for this client. The destination will receive this packet and will
448 * try to reroute it because the ttvn contained in the header is less
449 * than the current one
450 */
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200451 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
Antonio Quartulli3275e7c2012-03-16 18:03:28 +0100452 unicast_packet->ttvn = unicast_packet->ttvn - 1;
453
Sven Eckelmann0aca2362012-06-19 20:26:30 +0200454 dev_mtu = neigh_node->if_incoming->net_dev->mtu;
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200455 /* fragmentation mechanism only works for UNICAST (now) */
456 if (packet_type == BATADV_UNICAST &&
457 atomic_read(&bat_priv->fragmentation) &&
Sven Eckelmann0aca2362012-06-19 20:26:30 +0200458 data_len + sizeof(*unicast_packet) > dev_mtu) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000459 /* send frag skb decreases ttl */
Sven Eckelmann76543d12011-11-20 15:47:38 +0100460 unicast_packet->header.ttl++;
Sven Eckelmann88ed1e772012-05-12 02:09:40 +0200461 ret = batadv_frag_send_skb(skb, bat_priv,
462 neigh_node->if_incoming,
463 neigh_node->addr);
Marek Lindner44524fc2011-02-10 14:33:53 +0000464 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000465 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000466
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200467 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200468 ret = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000469
Marek Lindner44524fc2011-02-10 14:33:53 +0000470out:
471 if (neigh_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200472 batadv_neigh_node_free_ref(neigh_node);
Marek Lindner44524fc2011-02-10 14:33:53 +0000473 if (orig_node)
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200474 batadv_orig_node_free_ref(orig_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200475 if (ret == NET_RX_DROP)
Marek Lindner44524fc2011-02-10 14:33:53 +0000476 kfree_skb(skb);
477 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000478}