blob: 782fa33ec296a85a2869e40fec9dabc6c89da923 [file] [log] [blame]
Sven Eckelmann9f6446c2015-04-23 13:16:35 +02001/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "send.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
23#include <linux/etherdevice.h>
24#include <linux/fs.h>
25#include <linux/if_ether.h>
26#include <linux/if.h>
27#include <linux/jiffies.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/netdevice.h>
31#include <linux/printk.h>
32#include <linux/rculist.h>
33#include <linux/rcupdate.h>
34#include <linux/skbuff.h>
35#include <linux/slab.h>
36#include <linux/spinlock.h>
37#include <linux/stddef.h>
38#include <linux/workqueue.h>
39
40#include "distributed-arp-table.h"
Martin Hundebøllee75ed82013-05-23 16:53:03 +020041#include "fragmentation.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020042#include "gateway_client.h"
43#include "hard-interface.h"
44#include "network-coding.h"
45#include "originator.h"
46#include "routing.h"
47#include "soft-interface.h"
48#include "translation-table.h"
Antonio Quartulliaf5d4f72012-11-26 00:38:50 +010049
Sven Eckelmannbb079c82012-05-16 20:23:14 +020050static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000051
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000052/* send out an already prepared packet to the given address via the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020053 * specified batman interface
54 */
Sven Eckelmann56303d32012-06-05 22:31:31 +020055int batadv_send_skb_packet(struct sk_buff *skb,
56 struct batadv_hard_iface *hard_iface,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +020057 const u8 *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000058{
Martin Hundebøll612d2b42013-01-25 11:12:42 +010059 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000060 struct ethhdr *ethhdr;
61
Sven Eckelmanne9a4f292012-06-03 22:19:19 +020062 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000063 goto send_skb_err;
64
Marek Lindnere6c10f42011-02-18 12:33:20 +000065 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000066 goto send_skb_err;
67
Marek Lindnere6c10f42011-02-18 12:33:20 +000068 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann67969582012-03-26 16:22:45 +020069 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
70 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000071 goto send_skb_err;
72 }
73
74 /* push to the ethernet header. */
Sven Eckelmann04b482a2012-05-12 02:09:38 +020075 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000076 goto send_skb_err;
77
78 skb_reset_mac_header(skb);
79
Antonio Quartulli7ed4be92013-04-08 15:08:18 +020080 ethhdr = eth_hdr(skb);
Antonio Quartulli8fdd0152014-01-22 00:42:11 +010081 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
82 ether_addr_copy(ethhdr->h_dest, dst_addr);
Antonio Quartulli293e9332013-05-19 12:55:16 +020083 ethhdr->h_proto = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000084
85 skb_set_network_header(skb, ETH_HLEN);
Antonio Quartulli293e9332013-05-19 12:55:16 +020086 skb->protocol = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000087
Marek Lindnere6c10f42011-02-18 12:33:20 +000088 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000089
Martin Hundebøll612d2b42013-01-25 11:12:42 +010090 /* Save a clone of the skb to use when decoding coded packets */
91 batadv_nc_skb_store_for_decoding(bat_priv, skb);
92
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000093 /* dev_queue_xmit() returns a negative result on error. However on
94 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020095 * (which is > 0). This will not be treated as an error.
96 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000097 return dev_queue_xmit(skb);
98send_skb_err:
99 kfree_skb(skb);
100 return NET_XMIT_DROP;
101}
102
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200103/**
104 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
105 * @skb: Packet to be transmitted.
106 * @orig_node: Final destination of the packet.
107 * @recv_if: Interface used when receiving the packet (can be NULL).
108 *
109 * Looks up the best next-hop towards the passed originator and passes the
110 * skb on for preparation of MAC header. If the packet originated from this
111 * host, NULL can be passed as recv_if and no interface alternating is
112 * attempted.
113 *
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200114 * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
115 * NET_XMIT_POLICED if the skb is buffered for later transmit.
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200116 */
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200117int batadv_send_skb_to_orig(struct sk_buff *skb,
118 struct batadv_orig_node *orig_node,
119 struct batadv_hard_iface *recv_if)
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200120{
121 struct batadv_priv *bat_priv = orig_node->bat_priv;
122 struct batadv_neigh_node *neigh_node;
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200123 int ret = NET_XMIT_DROP;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200124
125 /* batadv_find_router() increases neigh_nodes refcount if found. */
126 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
127 if (!neigh_node)
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200128 goto out;
129
130 /* Check if the skb is too large to send in one piece and fragment
131 * it if needed.
132 */
133 if (atomic_read(&bat_priv->fragmentation) &&
134 skb->len > neigh_node->if_incoming->net_dev->mtu) {
135 /* Fragment and send packet. */
136 if (batadv_frag_send_packet(skb, orig_node, neigh_node))
137 ret = NET_XMIT_SUCCESS;
138
139 goto out;
140 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200141
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200142 /* try to network code the packet, if it is received on an interface
143 * (i.e. being forwarded). If the packet originates from this node or if
144 * network coding fails, then send the packet as usual.
145 */
146 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
147 ret = NET_XMIT_POLICED;
148 } else {
149 batadv_send_skb_packet(skb, neigh_node->if_incoming,
150 neigh_node->addr);
151 ret = NET_XMIT_SUCCESS;
152 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200153
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200154out:
155 if (neigh_node)
156 batadv_neigh_node_free_ref(neigh_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200157
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200158 return ret;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200159}
160
Martin Hundebøllf097e252013-05-23 16:53:01 +0200161/**
162 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
163 * common fields for unicast packets
164 * @skb: the skb carrying the unicast header to initialize
165 * @hdr_size: amount of bytes to push at the beginning of the skb
166 * @orig_node: the destination node
167 *
168 * Returns false if the buffer extension was not possible or true otherwise.
169 */
170static bool
171batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
172 struct batadv_orig_node *orig_node)
173{
174 struct batadv_unicast_packet *unicast_packet;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200175 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200176
177 if (batadv_skb_head_push(skb, hdr_size) < 0)
178 return false;
179
180 unicast_packet = (struct batadv_unicast_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100181 unicast_packet->version = BATADV_COMPAT_VERSION;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200182 /* batman packet type: unicast */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100183 unicast_packet->packet_type = BATADV_UNICAST;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200184 /* set unicast ttl */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100185 unicast_packet->ttl = BATADV_TTL;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200186 /* copy the destination for faster routing */
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100187 ether_addr_copy(unicast_packet->dest, orig_node->orig);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200188 /* set the destination tt version number */
189 unicast_packet->ttvn = ttvn;
190
191 return true;
192}
193
194/**
195 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
196 * @skb: the skb containing the payload to encapsulate
197 * @orig_node: the destination node
198 *
199 * Returns false if the payload could not be encapsulated or true otherwise.
200 */
201static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
202 struct batadv_orig_node *orig_node)
203{
204 size_t uni_size = sizeof(struct batadv_unicast_packet);
205
206 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
207}
208
209/**
210 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
211 * unicast 4addr header
212 * @bat_priv: the bat priv with all the soft interface information
213 * @skb: the skb containing the payload to encapsulate
214 * @orig_node: the destination node
215 * @packet_subtype: the unicast 4addr packet subtype to use
216 *
217 * Returns false if the payload could not be encapsulated or true otherwise.
218 */
219bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
220 struct sk_buff *skb,
221 struct batadv_orig_node *orig,
222 int packet_subtype)
223{
224 struct batadv_hard_iface *primary_if;
225 struct batadv_unicast_4addr_packet *uc_4addr_packet;
226 bool ret = false;
227
228 primary_if = batadv_primary_if_get_selected(bat_priv);
229 if (!primary_if)
230 goto out;
231
232 /* Pull the header space and fill the unicast_packet substructure.
233 * We can do that because the first member of the uc_4addr_packet
234 * is of type struct unicast_packet
235 */
236 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
237 orig))
238 goto out;
239
240 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100241 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100242 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200243 uc_4addr_packet->subtype = packet_subtype;
244 uc_4addr_packet->reserved = 0;
245
246 ret = true;
247out:
248 if (primary_if)
249 batadv_hardif_free_ref(primary_if);
250 return ret;
251}
252
253/**
Linus Lüssinge300d312013-07-03 10:40:00 +0200254 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
Martin Hundebøllf097e252013-05-23 16:53:01 +0200255 * @bat_priv: the bat priv with all the soft interface information
256 * @skb: payload to send
257 * @packet_type: the batman unicast packet type to use
258 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
259 * 4addr packets)
Linus Lüssinge300d312013-07-03 10:40:00 +0200260 * @orig_node: the originator to send the packet to
Antonio Quartullic018ad32013-06-04 12:11:39 +0200261 * @vid: the vid to be used to search the translation table
Martin Hundebøllf097e252013-05-23 16:53:01 +0200262 *
Linus Lüssinge300d312013-07-03 10:40:00 +0200263 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
264 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
265 * as packet_type. Then send this frame to the given orig_node and release a
266 * reference to this orig_node.
267 *
268 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200269 */
Linus Lüssing1d8ab8d2014-02-15 17:47:52 +0100270int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
271 struct sk_buff *skb, int packet_type,
272 int packet_subtype,
273 struct batadv_orig_node *orig_node,
274 unsigned short vid)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200275{
Martin Hundebøllf097e252013-05-23 16:53:01 +0200276 struct batadv_unicast_packet *unicast_packet;
Antonio Quartulli8ea64e22015-05-11 20:34:52 +0200277 struct ethhdr *ethhdr;
Linus Lüssing927c2ed2014-01-19 22:22:45 +0100278 int ret = NET_XMIT_DROP;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200279
Simon Wunderlich56a5ca82013-05-28 11:49:47 +0200280 if (!orig_node)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200281 goto out;
282
283 switch (packet_type) {
284 case BATADV_UNICAST:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200285 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
286 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200287 break;
288 case BATADV_UNICAST_4ADDR:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200289 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
290 orig_node,
291 packet_subtype))
292 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200293 break;
294 default:
295 /* this function supports UNICAST and UNICAST_4ADDR only. It
296 * should never be invoked with any other packet type
297 */
298 goto out;
299 }
300
Linus Lüssing927c2ed2014-01-19 22:22:45 +0100301 /* skb->data might have been reallocated by
302 * batadv_send_skb_prepare_unicast{,_4addr}()
303 */
304 ethhdr = eth_hdr(skb);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200305 unicast_packet = (struct batadv_unicast_packet *)skb->data;
306
307 /* inform the destination node that we are still missing a correct route
308 * for this client. The destination will receive this packet and will
309 * try to reroute it because the ttvn contained in the header is less
310 * than the current one
311 */
Antonio Quartullic018ad32013-06-04 12:11:39 +0200312 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
Martin Hundebøllf097e252013-05-23 16:53:01 +0200313 unicast_packet->ttvn = unicast_packet->ttvn - 1;
314
315 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
Linus Lüssinge300d312013-07-03 10:40:00 +0200316 ret = NET_XMIT_SUCCESS;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200317
318out:
Martin Hundebøllf097e252013-05-23 16:53:01 +0200319 if (orig_node)
320 batadv_orig_node_free_ref(orig_node);
Linus Lüssinge300d312013-07-03 10:40:00 +0200321 if (ret == NET_XMIT_DROP)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200322 kfree_skb(skb);
323 return ret;
324}
325
Linus Lüssinge300d312013-07-03 10:40:00 +0200326/**
327 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
328 * @bat_priv: the bat priv with all the soft interface information
329 * @skb: payload to send
330 * @packet_type: the batman unicast packet type to use
331 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
332 * 4addr packets)
Antonio Quartullic5d3a652014-02-15 11:58:01 +0100333 * @dst_hint: can be used to override the destination contained in the skb
Linus Lüssinge300d312013-07-03 10:40:00 +0200334 * @vid: the vid to be used to search the translation table
335 *
336 * Look up the recipient node for the destination address in the ethernet
337 * header via the translation table. Wrap the given skb into a batman-adv
338 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
339 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
340 * to the according destination node.
341 *
342 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
343 */
344int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
345 struct sk_buff *skb, int packet_type,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200346 int packet_subtype, u8 *dst_hint,
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100347 unsigned short vid)
Linus Lüssinge300d312013-07-03 10:40:00 +0200348{
349 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
350 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200351 u8 *src, *dst;
Linus Lüssinge300d312013-07-03 10:40:00 +0200352
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100353 src = ethhdr->h_source;
354 dst = ethhdr->h_dest;
355
356 /* if we got an hint! let's send the packet to this client (if any) */
357 if (dst_hint) {
358 src = NULL;
359 dst = dst_hint;
360 }
361 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
362
Linus Lüssinge300d312013-07-03 10:40:00 +0200363 return batadv_send_skb_unicast(bat_priv, skb, packet_type,
364 packet_subtype, orig_node, vid);
365}
366
367/**
368 * batadv_send_skb_via_gw - send an skb via gateway lookup
369 * @bat_priv: the bat priv with all the soft interface information
370 * @skb: payload to send
371 * @vid: the vid to be used to search the translation table
372 *
373 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
374 * unicast header and send this frame to this gateway node.
375 *
376 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
377 */
378int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
379 unsigned short vid)
380{
381 struct batadv_orig_node *orig_node;
382
383 orig_node = batadv_gw_get_selected_orig(bat_priv);
384 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
385 orig_node, vid);
386}
387
Sven Eckelmann56303d32012-06-05 22:31:31 +0200388void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000389{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200390 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000391
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200392 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
393 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000394 return;
395
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200396 /* the interface gets activated here to avoid race conditions between
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000397 * the moment of activating the interface in
398 * hardif_activate_interface() where the originator mac is set and
399 * outdated packets (especially uninitialized mac addresses) in the
400 * packet queue
401 */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200402 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
403 hard_iface->if_status = BATADV_IF_ACTIVE;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000404
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800405 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000406}
407
Sven Eckelmann56303d32012-06-05 22:31:31 +0200408static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000409{
Markus Elfringc7994432015-11-15 08:04:43 +0100410 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200411 if (forw_packet->if_incoming)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200412 batadv_hardif_free_ref(forw_packet->if_incoming);
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100413 if (forw_packet->if_outgoing)
414 batadv_hardif_free_ref(forw_packet->if_outgoing);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000415 kfree(forw_packet);
416}
417
Sven Eckelmann56303d32012-06-05 22:31:31 +0200418static void
419_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
420 struct batadv_forw_packet *forw_packet,
421 unsigned long send_time)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000422{
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000423 /* add new packet to packet list */
424 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
425 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
426 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
427
428 /* start timer for this packet */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200429 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000430 send_time);
431}
432
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000433/* add a broadcast packet to the queue and setup timers. broadcast packets
Antonio Quartulli015758d2011-07-09 17:52:13 +0200434 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000435 *
436 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
437 * errors.
438 *
439 * The skb is not consumed, so the caller should make sure that the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200440 * skb is freed.
441 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200442int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
Sven Eckelmann9455e342012-05-12 02:09:37 +0200443 const struct sk_buff *skb,
444 unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000445{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200446 struct batadv_hard_iface *primary_if = NULL;
447 struct batadv_forw_packet *forw_packet;
Sven Eckelmann96412692012-06-05 22:31:30 +0200448 struct batadv_bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200449 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000450
Sven Eckelmann3e348192012-05-16 20:23:22 +0200451 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200452 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
453 "bcast packet queue full\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000454 goto out;
455 }
456
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200457 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200458 if (!primary_if)
Marek Lindnerca06c6e2011-05-14 20:01:22 +0200459 goto out_and_inc;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000460
Sven Eckelmann704509b2011-05-14 23:14:54 +0200461 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000462
463 if (!forw_packet)
464 goto out_and_inc;
465
Sven Eckelmann747e4222011-05-14 23:14:50 +0200466 newskb = skb_copy(skb, GFP_ATOMIC);
467 if (!newskb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000468 goto packet_free;
469
470 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann96412692012-06-05 22:31:30 +0200471 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100472 bcast_packet->ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000473
Sven Eckelmann747e4222011-05-14 23:14:50 +0200474 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000475
Sven Eckelmann747e4222011-05-14 23:14:50 +0200476 forw_packet->skb = newskb;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200477 forw_packet->if_incoming = primary_if;
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100478 forw_packet->if_outgoing = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000479
480 /* how often did we send the bcast packet ? */
481 forw_packet->num_packets = 0;
482
Antonio Quartulli72414442012-12-25 13:14:37 +0100483 INIT_DELAYED_WORK(&forw_packet->delayed_work,
484 batadv_send_outstanding_bcast_packet);
485
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200486 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000487 return NETDEV_TX_OK;
488
489packet_free:
490 kfree(forw_packet);
491out_and_inc:
492 atomic_inc(&bat_priv->bcast_queue_left);
493out:
Marek Lindner32ae9b22011-04-20 15:40:58 +0200494 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200495 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000496 return NETDEV_TX_BUSY;
497}
498
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200499static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000500{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200501 struct batadv_hard_iface *hard_iface;
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200502 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200503 struct batadv_forw_packet *forw_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000504 struct sk_buff *skb1;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200505 struct net_device *soft_iface;
506 struct batadv_priv *bat_priv;
507
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200508 delayed_work = container_of(work, struct delayed_work, work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200509 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
510 delayed_work);
511 soft_iface = forw_packet->if_incoming->soft_iface;
512 bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000513
514 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
515 hlist_del(&forw_packet->list);
516 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
517
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200518 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000519 goto out;
520
Antonio Quartullic384ea32011-06-26 03:37:18 +0200521 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
522 goto out;
523
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000524 /* rebroadcast packet */
525 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200526 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000527 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000528 continue;
529
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100530 if (forw_packet->num_packets >= hard_iface->num_bcasts)
531 continue;
532
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000533 /* send a copy of the saved skb */
534 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
535 if (skb1)
Sven Eckelmann9455e342012-05-12 02:09:37 +0200536 batadv_send_skb_packet(skb1, hard_iface,
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200537 batadv_broadcast_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000538 }
539 rcu_read_unlock();
540
541 forw_packet->num_packets++;
542
543 /* if we still have some more bcasts to send */
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100544 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200545 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
546 msecs_to_jiffies(5));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000547 return;
548 }
549
550out:
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200551 batadv_forw_packet_free(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000552 atomic_inc(&bat_priv->bcast_queue_left);
553}
554
Sven Eckelmann9455e342012-05-12 02:09:37 +0200555void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000556{
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200557 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200558 struct batadv_forw_packet *forw_packet;
559 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000560
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200561 delayed_work = container_of(work, struct delayed_work, work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200562 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
563 delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000564 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
565 spin_lock_bh(&bat_priv->forw_bat_list_lock);
566 hlist_del(&forw_packet->list);
567 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
568
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200569 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000570 goto out;
571
Marek Lindner01c42242011-11-28 21:31:55 +0800572 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000573
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100574 /* we have to have at least one packet in the queue to determine the
575 * queues wake up time unless we are shutting down.
576 *
577 * only re-schedule if this is the "original" copy, e.g. the OGM of the
578 * primary interface should only be rescheduled once per period, but
579 * this function will be called for the forw_packet instances of the
580 * other secondary interfaces as well.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000581 */
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100582 if (forw_packet->own &&
583 forw_packet->if_incoming == forw_packet->if_outgoing)
Sven Eckelmann9455e342012-05-12 02:09:37 +0200584 batadv_schedule_bat_ogm(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000585
586out:
587 /* don't count own packet */
588 if (!forw_packet->own)
589 atomic_inc(&bat_priv->batman_queue_left);
590
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200591 batadv_forw_packet_free(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000592}
593
Sven Eckelmann56303d32012-06-05 22:31:31 +0200594void
595batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
596 const struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000597{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200598 struct batadv_forw_packet *forw_packet;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800599 struct hlist_node *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200600 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000601
Marek Lindnere6c10f42011-02-18 12:33:20 +0000602 if (hard_iface)
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200603 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200604 "purge_outstanding_packets(): %s\n",
605 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000606 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200607 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200608 "purge_outstanding_packets()\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000609
610 /* free bcast list */
611 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800612 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000613 &bat_priv->forw_bcast_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200614 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000615 * we delete only packets belonging to the given interface
616 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000617 if ((hard_iface) &&
Simon Wunderlich3f1e08d2015-06-24 14:50:20 +0200618 (forw_packet->if_incoming != hard_iface) &&
619 (forw_packet->if_outgoing != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000620 continue;
621
622 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
623
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200624 /* batadv_send_outstanding_bcast_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000625 * delete the item from the list
626 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200627 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000628 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200629
630 if (pending) {
631 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200632 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200633 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000634 }
635 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
636
637 /* free batman packet list */
638 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800639 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000640 &bat_priv->forw_bat_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200641 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000642 * we delete only packets belonging to the given interface
643 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000644 if ((hard_iface) &&
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100645 (forw_packet->if_incoming != hard_iface) &&
646 (forw_packet->if_outgoing != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000647 continue;
648
649 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
650
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200651 /* send_outstanding_bat_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000652 * delete the item from the list
653 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200654 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000655 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200656
657 if (pending) {
658 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200659 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200660 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000661 }
662 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
663}