blob: 49021b7124f37a4e95e043eb1b9d88855b60885c [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "send.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/atomic.h>
Linus Lüssing9b4aec62016-11-01 09:44:44 +010022#include <linux/bug.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020023#include <linux/byteorder/generic.h>
Antonio Quartullif50ca952016-05-18 11:38:48 +020024#include <linux/errno.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020025#include <linux/etherdevice.h>
26#include <linux/fs.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020027#include <linux/if.h>
Sven Eckelmannfcafa5e2016-05-15 11:07:42 +020028#include <linux/if_ether.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020029#include <linux/jiffies.h>
30#include <linux/kernel.h>
Sven Eckelmann27353442016-03-05 16:09:16 +010031#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020032#include <linux/list.h>
33#include <linux/netdevice.h>
34#include <linux/printk.h>
35#include <linux/rculist.h>
36#include <linux/rcupdate.h>
37#include <linux/skbuff.h>
38#include <linux/slab.h>
39#include <linux/spinlock.h>
40#include <linux/stddef.h>
41#include <linux/workqueue.h>
42
43#include "distributed-arp-table.h"
Martin Hundebøllee75ed82013-05-23 16:53:03 +020044#include "fragmentation.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020045#include "gateway_client.h"
46#include "hard-interface.h"
Sven Eckelmannba412082016-05-15 23:48:31 +020047#include "log.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020048#include "network-coding.h"
49#include "originator.h"
50#include "routing.h"
51#include "soft-interface.h"
52#include "translation-table.h"
Antonio Quartulliaf5d4f72012-11-26 00:38:50 +010053
Sven Eckelmannbb079c82012-05-16 20:23:14 +020054static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000055
Antonio Quartulli95d39272016-01-16 16:40:15 +080056/**
57 * batadv_send_skb_packet - send an already prepared packet
58 * @skb: the packet to send
59 * @hard_iface: the interface to use to send the broadcast packet
60 * @dst_addr: the payload destination
61 *
62 * Send out an already prepared packet to the given neighbor or broadcast it
63 * using the specified interface. Either hard_iface or neigh_node must be not
64 * NULL.
65 * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
66 * otherwise it is sent as unicast to the given neighbor.
67 *
Sven Eckelmann7d72d172016-07-17 21:04:05 +020068 * Regardless of the return value, the skb is consumed.
69 *
70 * Return: A negative errno code is returned on a failure. A success does not
71 * guarantee the frame will be transmitted as it may be dropped due
72 * to congestion or traffic shaping.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020073 */
Sven Eckelmann56303d32012-06-05 22:31:31 +020074int batadv_send_skb_packet(struct sk_buff *skb,
75 struct batadv_hard_iface *hard_iface,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +020076 const u8 *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000077{
Antonio Quartulli95d39272016-01-16 16:40:15 +080078 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000079 struct ethhdr *ethhdr;
80
Antonio Quartulli95d39272016-01-16 16:40:15 +080081 bat_priv = netdev_priv(hard_iface->soft_iface);
82
Sven Eckelmanne9a4f292012-06-03 22:19:19 +020083 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000084 goto send_skb_err;
85
Marek Lindnere6c10f42011-02-18 12:33:20 +000086 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000087 goto send_skb_err;
88
Marek Lindnere6c10f42011-02-18 12:33:20 +000089 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann67969582012-03-26 16:22:45 +020090 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
91 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000092 goto send_skb_err;
93 }
94
95 /* push to the ethernet header. */
Sven Eckelmann04b482a2012-05-12 02:09:38 +020096 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000097 goto send_skb_err;
98
99 skb_reset_mac_header(skb);
100
Antonio Quartulli7ed4be92013-04-08 15:08:18 +0200101 ethhdr = eth_hdr(skb);
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100102 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
103 ether_addr_copy(ethhdr->h_dest, dst_addr);
Antonio Quartulli293e9332013-05-19 12:55:16 +0200104 ethhdr->h_proto = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000105
106 skb_set_network_header(skb, ETH_HLEN);
Antonio Quartulli293e9332013-05-19 12:55:16 +0200107 skb->protocol = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000108
Marek Lindnere6c10f42011-02-18 12:33:20 +0000109 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000110
Martin Hundebøll612d2b42013-01-25 11:12:42 +0100111 /* Save a clone of the skb to use when decoding coded packets */
112 batadv_nc_skb_store_for_decoding(bat_priv, skb);
113
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000114 /* dev_queue_xmit() returns a negative result on error. However on
115 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200116 * (which is > 0). This will not be treated as an error.
117 */
Sven Eckelmann7d72d172016-07-17 21:04:05 +0200118 return dev_queue_xmit(skb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000119send_skb_err:
120 kfree_skb(skb);
121 return NET_XMIT_DROP;
122}
123
Antonio Quartulli95d39272016-01-16 16:40:15 +0800124int batadv_send_broadcast_skb(struct sk_buff *skb,
125 struct batadv_hard_iface *hard_iface)
126{
127 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
128}
129
130int batadv_send_unicast_skb(struct sk_buff *skb,
131 struct batadv_neigh_node *neigh)
132{
133#ifdef CONFIG_BATMAN_ADV_BATMAN_V
134 struct batadv_hardif_neigh_node *hardif_neigh;
135#endif
136 int ret;
137
138 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
139
140#ifdef CONFIG_BATMAN_ADV_BATMAN_V
141 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
142
143 if ((hardif_neigh) && (ret != NET_XMIT_DROP))
144 hardif_neigh->bat_v.last_unicast_tx = jiffies;
145
146 if (hardif_neigh)
147 batadv_hardif_neigh_put(hardif_neigh);
148#endif
149
150 return ret;
151}
152
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200153/**
154 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
155 * @skb: Packet to be transmitted.
156 * @orig_node: Final destination of the packet.
157 * @recv_if: Interface used when receiving the packet (can be NULL).
158 *
159 * Looks up the best next-hop towards the passed originator and passes the
160 * skb on for preparation of MAC header. If the packet originated from this
161 * host, NULL can be passed as recv_if and no interface alternating is
162 * attempted.
163 *
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200164 * Return: negative errno code on a failure, -EINPROGRESS if the skb is
165 * buffered for later transmit or the NET_XMIT status returned by the
Antonio Quartullif50ca952016-05-18 11:38:48 +0200166 * lower routine if the packet has been passed down.
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200167 */
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200168int batadv_send_skb_to_orig(struct sk_buff *skb,
169 struct batadv_orig_node *orig_node,
170 struct batadv_hard_iface *recv_if)
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200171{
172 struct batadv_priv *bat_priv = orig_node->bat_priv;
173 struct batadv_neigh_node *neigh_node;
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200174 int ret;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200175
176 /* batadv_find_router() increases neigh_nodes refcount if found. */
177 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200178 if (!neigh_node) {
179 ret = -EINVAL;
180 goto free_skb;
181 }
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200182
183 /* Check if the skb is too large to send in one piece and fragment
184 * it if needed.
185 */
186 if (atomic_read(&bat_priv->fragmentation) &&
187 skb->len > neigh_node->if_incoming->net_dev->mtu) {
188 /* Fragment and send packet. */
Antonio Quartullif50ca952016-05-18 11:38:48 +0200189 ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200190 /* skb was consumed */
191 skb = NULL;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200192
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200193 goto put_neigh_node;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200194 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200195
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200196 /* try to network code the packet, if it is received on an interface
197 * (i.e. being forwarded). If the packet originates from this node or if
198 * network coding fails, then send the packet as usual.
199 */
Antonio Quartullif50ca952016-05-18 11:38:48 +0200200 if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
Florian Westphal99860202016-06-11 12:46:04 +0200201 ret = -EINPROGRESS;
Antonio Quartullif50ca952016-05-18 11:38:48 +0200202 else
203 ret = batadv_send_unicast_skb(skb, neigh_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200204
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200205 /* skb was consumed */
206 skb = NULL;
207
208put_neigh_node:
209 batadv_neigh_node_put(neigh_node);
210free_skb:
211 kfree_skb(skb);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200212
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200213 return ret;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200214}
215
Martin Hundebøllf097e252013-05-23 16:53:01 +0200216/**
217 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
218 * common fields for unicast packets
219 * @skb: the skb carrying the unicast header to initialize
220 * @hdr_size: amount of bytes to push at the beginning of the skb
221 * @orig_node: the destination node
222 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200223 * Return: false if the buffer extension was not possible or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200224 */
225static bool
226batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
227 struct batadv_orig_node *orig_node)
228{
229 struct batadv_unicast_packet *unicast_packet;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200230 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200231
232 if (batadv_skb_head_push(skb, hdr_size) < 0)
233 return false;
234
235 unicast_packet = (struct batadv_unicast_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100236 unicast_packet->version = BATADV_COMPAT_VERSION;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200237 /* batman packet type: unicast */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100238 unicast_packet->packet_type = BATADV_UNICAST;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200239 /* set unicast ttl */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100240 unicast_packet->ttl = BATADV_TTL;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200241 /* copy the destination for faster routing */
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100242 ether_addr_copy(unicast_packet->dest, orig_node->orig);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200243 /* set the destination tt version number */
244 unicast_packet->ttvn = ttvn;
245
246 return true;
247}
248
249/**
250 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
251 * @skb: the skb containing the payload to encapsulate
252 * @orig_node: the destination node
253 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200254 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200255 */
256static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
257 struct batadv_orig_node *orig_node)
258{
259 size_t uni_size = sizeof(struct batadv_unicast_packet);
260
261 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
262}
263
264/**
265 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
266 * unicast 4addr header
267 * @bat_priv: the bat priv with all the soft interface information
268 * @skb: the skb containing the payload to encapsulate
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200269 * @orig: the destination node
Martin Hundebøllf097e252013-05-23 16:53:01 +0200270 * @packet_subtype: the unicast 4addr packet subtype to use
271 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200272 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200273 */
274bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
275 struct sk_buff *skb,
276 struct batadv_orig_node *orig,
277 int packet_subtype)
278{
279 struct batadv_hard_iface *primary_if;
280 struct batadv_unicast_4addr_packet *uc_4addr_packet;
281 bool ret = false;
282
283 primary_if = batadv_primary_if_get_selected(bat_priv);
284 if (!primary_if)
285 goto out;
286
287 /* Pull the header space and fill the unicast_packet substructure.
288 * We can do that because the first member of the uc_4addr_packet
289 * is of type struct unicast_packet
290 */
291 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
292 orig))
293 goto out;
294
295 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100296 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100297 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200298 uc_4addr_packet->subtype = packet_subtype;
299 uc_4addr_packet->reserved = 0;
300
301 ret = true;
302out:
303 if (primary_if)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100304 batadv_hardif_put(primary_if);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200305 return ret;
306}
307
308/**
Linus Lüssinge300d312013-07-03 10:40:00 +0200309 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
Martin Hundebøllf097e252013-05-23 16:53:01 +0200310 * @bat_priv: the bat priv with all the soft interface information
311 * @skb: payload to send
312 * @packet_type: the batman unicast packet type to use
313 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
314 * 4addr packets)
Linus Lüssinge300d312013-07-03 10:40:00 +0200315 * @orig_node: the originator to send the packet to
Antonio Quartullic018ad32013-06-04 12:11:39 +0200316 * @vid: the vid to be used to search the translation table
Martin Hundebøllf097e252013-05-23 16:53:01 +0200317 *
Linus Lüssinge300d312013-07-03 10:40:00 +0200318 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
319 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200320 * as packet_type. Then send this frame to the given orig_node.
Linus Lüssinge300d312013-07-03 10:40:00 +0200321 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200322 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200323 */
Linus Lüssing1d8ab8d2014-02-15 17:47:52 +0100324int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
325 struct sk_buff *skb, int packet_type,
326 int packet_subtype,
327 struct batadv_orig_node *orig_node,
328 unsigned short vid)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200329{
Martin Hundebøllf097e252013-05-23 16:53:01 +0200330 struct batadv_unicast_packet *unicast_packet;
Antonio Quartulli8ea64e22015-05-11 20:34:52 +0200331 struct ethhdr *ethhdr;
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200332 int ret = NET_XMIT_DROP;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200333
Simon Wunderlich56a5ca82013-05-28 11:49:47 +0200334 if (!orig_node)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200335 goto out;
336
337 switch (packet_type) {
338 case BATADV_UNICAST:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200339 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
340 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200341 break;
342 case BATADV_UNICAST_4ADDR:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200343 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
344 orig_node,
345 packet_subtype))
346 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200347 break;
348 default:
349 /* this function supports UNICAST and UNICAST_4ADDR only. It
350 * should never be invoked with any other packet type
351 */
352 goto out;
353 }
354
Linus Lüssing927c2ed2014-01-19 22:22:45 +0100355 /* skb->data might have been reallocated by
356 * batadv_send_skb_prepare_unicast{,_4addr}()
357 */
358 ethhdr = eth_hdr(skb);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200359 unicast_packet = (struct batadv_unicast_packet *)skb->data;
360
361 /* inform the destination node that we are still missing a correct route
362 * for this client. The destination will receive this packet and will
363 * try to reroute it because the ttvn contained in the header is less
364 * than the current one
365 */
Antonio Quartullic018ad32013-06-04 12:11:39 +0200366 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
Martin Hundebøllf097e252013-05-23 16:53:01 +0200367 unicast_packet->ttvn = unicast_packet->ttvn - 1;
368
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200369 ret = batadv_send_skb_to_orig(skb, orig_node, NULL);
370 /* skb was consumed */
371 skb = NULL;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200372
373out:
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200374 kfree_skb(skb);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200375 return ret;
376}
377
Linus Lüssinge300d312013-07-03 10:40:00 +0200378/**
379 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
380 * @bat_priv: the bat priv with all the soft interface information
381 * @skb: payload to send
382 * @packet_type: the batman unicast packet type to use
383 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
384 * 4addr packets)
Antonio Quartullic5d3a652014-02-15 11:58:01 +0100385 * @dst_hint: can be used to override the destination contained in the skb
Linus Lüssinge300d312013-07-03 10:40:00 +0200386 * @vid: the vid to be used to search the translation table
387 *
388 * Look up the recipient node for the destination address in the ethernet
389 * header via the translation table. Wrap the given skb into a batman-adv
390 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
391 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
392 * to the according destination node.
393 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200394 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus Lüssinge300d312013-07-03 10:40:00 +0200395 */
396int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
397 struct sk_buff *skb, int packet_type,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200398 int packet_subtype, u8 *dst_hint,
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100399 unsigned short vid)
Linus Lüssinge300d312013-07-03 10:40:00 +0200400{
401 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
402 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200403 u8 *src, *dst;
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200404 int ret;
Linus Lüssinge300d312013-07-03 10:40:00 +0200405
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100406 src = ethhdr->h_source;
407 dst = ethhdr->h_dest;
408
409 /* if we got an hint! let's send the packet to this client (if any) */
410 if (dst_hint) {
411 src = NULL;
412 dst = dst_hint;
413 }
414 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
415
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200416 ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
417 packet_subtype, orig_node, vid);
418
419 if (orig_node)
420 batadv_orig_node_put(orig_node);
421
422 return ret;
Linus Lüssinge300d312013-07-03 10:40:00 +0200423}
424
425/**
426 * batadv_send_skb_via_gw - send an skb via gateway lookup
427 * @bat_priv: the bat priv with all the soft interface information
428 * @skb: payload to send
429 * @vid: the vid to be used to search the translation table
430 *
431 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
432 * unicast header and send this frame to this gateway node.
433 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200434 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus Lüssinge300d312013-07-03 10:40:00 +0200435 */
436int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
437 unsigned short vid)
438{
439 struct batadv_orig_node *orig_node;
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200440 int ret;
Linus Lüssinge300d312013-07-03 10:40:00 +0200441
442 orig_node = batadv_gw_get_selected_orig(bat_priv);
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200443 ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
444 BATADV_P_DATA, orig_node, vid);
445
446 if (orig_node)
447 batadv_orig_node_put(orig_node);
448
449 return ret;
Linus Lüssinge300d312013-07-03 10:40:00 +0200450}
451
Linus Lüssinga65e5482016-06-20 21:39:54 +0200452/**
453 * batadv_forw_packet_free - free a forwarding packet
454 * @forw_packet: The packet to free
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200455 * @dropped: whether the packet is freed because is is dropped
Linus Lüssinga65e5482016-06-20 21:39:54 +0200456 *
457 * This frees a forwarding packet and releases any resources it might
458 * have claimed.
459 */
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200460void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
461 bool dropped)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000462{
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200463 if (dropped)
464 kfree_skb(forw_packet->skb);
465 else
466 consume_skb(forw_packet->skb);
467
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200468 if (forw_packet->if_incoming)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100469 batadv_hardif_put(forw_packet->if_incoming);
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100470 if (forw_packet->if_outgoing)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100471 batadv_hardif_put(forw_packet->if_outgoing);
Linus Lüssinga65e5482016-06-20 21:39:54 +0200472 if (forw_packet->queue_left)
473 atomic_inc(forw_packet->queue_left);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000474 kfree(forw_packet);
475}
476
Linus Lüssinga65e5482016-06-20 21:39:54 +0200477/**
478 * batadv_forw_packet_alloc - allocate a forwarding packet
479 * @if_incoming: The (optional) if_incoming to be grabbed
480 * @if_outgoing: The (optional) if_outgoing to be grabbed
481 * @queue_left: The (optional) queue counter to decrease
482 * @bat_priv: The bat_priv for the mesh of this forw_packet
483 *
484 * Allocates a forwarding packet and tries to get a reference to the
485 * (optional) if_incoming, if_outgoing and queue_left. If queue_left
486 * is NULL then bat_priv is optional, too.
487 *
488 * Return: An allocated forwarding packet on success, NULL otherwise.
489 */
490struct batadv_forw_packet *
491batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
492 struct batadv_hard_iface *if_outgoing,
493 atomic_t *queue_left,
494 struct batadv_priv *bat_priv)
495{
496 struct batadv_forw_packet *forw_packet;
497 const char *qname;
498
499 if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
500 qname = "unknown";
501
502 if (queue_left == &bat_priv->bcast_queue_left)
503 qname = "bcast";
504
505 if (queue_left == &bat_priv->batman_queue_left)
506 qname = "batman";
507
508 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
509 "%s queue is full\n", qname);
510
511 return NULL;
512 }
513
514 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
515 if (!forw_packet)
516 goto err;
517
518 if (if_incoming)
519 kref_get(&if_incoming->refcount);
520
521 if (if_outgoing)
522 kref_get(&if_outgoing->refcount);
523
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100524 INIT_HLIST_NODE(&forw_packet->list);
525 INIT_HLIST_NODE(&forw_packet->cleanup_list);
Linus Lüssinga65e5482016-06-20 21:39:54 +0200526 forw_packet->skb = NULL;
527 forw_packet->queue_left = queue_left;
528 forw_packet->if_incoming = if_incoming;
529 forw_packet->if_outgoing = if_outgoing;
530 forw_packet->num_packets = 0;
531
532 return forw_packet;
533
534err:
535 if (queue_left)
536 atomic_inc(queue_left);
537
538 return NULL;
539}
540
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100541/**
542 * batadv_forw_packet_was_stolen - check whether someone stole this packet
543 * @forw_packet: the forwarding packet to check
544 *
545 * This function checks whether the given forwarding packet was claimed by
546 * someone else for free().
547 *
548 * Return: True if someone stole it, false otherwise.
549 */
550static bool
551batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000552{
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100553 return !hlist_unhashed(&forw_packet->cleanup_list);
554}
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000555
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100556/**
557 * batadv_forw_packet_steal - claim a forw_packet for free()
558 * @forw_packet: the forwarding packet to steal
559 * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
560 *
561 * This function tries to steal a specific forw_packet from global
562 * visibility for the purpose of getting it for free(). That means
563 * the caller is *not* allowed to requeue it afterwards.
564 *
565 * Return: True if stealing was successful. False if someone else stole it
566 * before us.
567 */
568bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
569 spinlock_t *lock)
570{
571 /* did purging routine steal it earlier? */
572 spin_lock_bh(lock);
573 if (batadv_forw_packet_was_stolen(forw_packet)) {
574 spin_unlock_bh(lock);
575 return false;
576 }
577
578 hlist_del_init(&forw_packet->list);
579
580 /* Just to spot misuse of this function */
581 hlist_add_fake(&forw_packet->cleanup_list);
582
583 spin_unlock_bh(lock);
584 return true;
585}
586
587/**
588 * batadv_forw_packet_list_steal - claim a list of forward packets for free()
589 * @forw_list: the to be stolen forward packets
590 * @cleanup_list: a backup pointer, to be able to dispose the packet later
591 * @hard_iface: the interface to steal forward packets from
592 *
593 * This function claims responsibility to free any forw_packet queued on the
594 * given hard_iface. If hard_iface is NULL forwarding packets on all hard
595 * interfaces will be claimed.
596 *
597 * The packets are being moved from the forw_list to the cleanup_list and
598 * by that allows already running threads to notice the claiming.
599 */
600static void
601batadv_forw_packet_list_steal(struct hlist_head *forw_list,
602 struct hlist_head *cleanup_list,
603 const struct batadv_hard_iface *hard_iface)
604{
605 struct batadv_forw_packet *forw_packet;
606 struct hlist_node *safe_tmp_node;
607
608 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
609 forw_list, list) {
610 /* if purge_outstanding_packets() was called with an argument
611 * we delete only packets belonging to the given interface
612 */
613 if (hard_iface &&
614 (forw_packet->if_incoming != hard_iface) &&
615 (forw_packet->if_outgoing != hard_iface))
616 continue;
617
618 hlist_del(&forw_packet->list);
619 hlist_add_head(&forw_packet->cleanup_list, cleanup_list);
620 }
621}
622
623/**
624 * batadv_forw_packet_list_free - free a list of forward packets
625 * @head: a list of to be freed forw_packets
626 *
627 * This function cancels the scheduling of any packet in the provided list,
628 * waits for any possibly running packet forwarding thread to finish and
629 * finally, safely frees this forward packet.
630 *
631 * This function might sleep.
632 */
633static void batadv_forw_packet_list_free(struct hlist_head *head)
634{
635 struct batadv_forw_packet *forw_packet;
636 struct hlist_node *safe_tmp_node;
637
638 hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head,
639 cleanup_list) {
640 cancel_delayed_work_sync(&forw_packet->delayed_work);
641
642 hlist_del(&forw_packet->cleanup_list);
643 batadv_forw_packet_free(forw_packet, true);
644 }
645}
646
647/**
648 * batadv_forw_packet_queue - try to queue a forwarding packet
649 * @forw_packet: the forwarding packet to queue
650 * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock)
651 * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list)
652 * @send_time: timestamp (jiffies) when the packet is to be sent
653 *
654 * This function tries to (re)queue a forwarding packet. Requeuing
655 * is prevented if the according interface is shutting down
656 * (e.g. if batadv_forw_packet_list_steal() was called for this
657 * packet earlier).
658 *
659 * Calling batadv_forw_packet_queue() after a call to
660 * batadv_forw_packet_steal() is forbidden!
661 *
662 * Caller needs to ensure that forw_packet->delayed_work was initialized.
663 */
664static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
665 spinlock_t *lock, struct hlist_head *head,
666 unsigned long send_time)
667{
668 spin_lock_bh(lock);
669
670 /* did purging routine steal it from us? */
671 if (batadv_forw_packet_was_stolen(forw_packet)) {
672 /* If you got it for free() without trouble, then
673 * don't get back into the queue after stealing...
674 */
675 WARN_ONCE(hlist_fake(&forw_packet->cleanup_list),
676 "Requeuing after batadv_forw_packet_steal() not allowed!\n");
677
678 spin_unlock_bh(lock);
679 return;
680 }
681
682 hlist_del_init(&forw_packet->list);
683 hlist_add_head(&forw_packet->list, head);
684
685 queue_delayed_work(batadv_event_workqueue,
686 &forw_packet->delayed_work,
687 send_time - jiffies);
688 spin_unlock_bh(lock);
689}
690
691/**
692 * batadv_forw_packet_bcast_queue - try to queue a broadcast packet
693 * @bat_priv: the bat priv with all the soft interface information
694 * @forw_packet: the forwarding packet to queue
695 * @send_time: timestamp (jiffies) when the packet is to be sent
696 *
697 * This function tries to (re)queue a broadcast packet.
698 *
699 * Caller needs to ensure that forw_packet->delayed_work was initialized.
700 */
701static void
702batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
703 struct batadv_forw_packet *forw_packet,
704 unsigned long send_time)
705{
706 batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock,
707 &bat_priv->forw_bcast_list, send_time);
708}
709
710/**
711 * batadv_forw_packet_ogmv1_queue - try to queue an OGMv1 packet
712 * @bat_priv: the bat priv with all the soft interface information
713 * @forw_packet: the forwarding packet to queue
714 * @send_time: timestamp (jiffies) when the packet is to be sent
715 *
716 * This function tries to (re)queue an OGMv1 packet.
717 *
718 * Caller needs to ensure that forw_packet->delayed_work was initialized.
719 */
720void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
721 struct batadv_forw_packet *forw_packet,
722 unsigned long send_time)
723{
724 batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock,
725 &bat_priv->forw_bat_list, send_time);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000726}
727
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200728/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +0100729 * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
730 * @bat_priv: the bat priv with all the soft interface information
731 * @skb: broadcast packet to add
732 * @delay: number of jiffies to wait before sending
Linus Lüssing3111bee2016-08-07 12:34:19 +0200733 * @own_packet: true if it is a self-generated broadcast packet
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000734 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200735 * add a broadcast packet to the queue and setup timers. broadcast packets
736 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000737 *
738 * The skb is not consumed, so the caller should make sure that the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200739 * skb is freed.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200740 *
741 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200742 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200743int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
Sven Eckelmann9455e342012-05-12 02:09:37 +0200744 const struct sk_buff *skb,
Linus Lüssing3111bee2016-08-07 12:34:19 +0200745 unsigned long delay,
746 bool own_packet)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000747{
Sven Eckelmann422d2f72016-07-25 00:42:44 +0200748 struct batadv_hard_iface *primary_if;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200749 struct batadv_forw_packet *forw_packet;
Sven Eckelmann96412692012-06-05 22:31:30 +0200750 struct batadv_bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200751 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000752
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200753 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200754 if (!primary_if)
Linus Lüssinga65e5482016-06-20 21:39:54 +0200755 goto err;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000756
Linus Lüssinga65e5482016-06-20 21:39:54 +0200757 forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
758 &bat_priv->bcast_queue_left,
759 bat_priv);
760 batadv_hardif_put(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000761 if (!forw_packet)
Linus Lüssinga65e5482016-06-20 21:39:54 +0200762 goto err;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000763
Sven Eckelmann747e4222011-05-14 23:14:50 +0200764 newskb = skb_copy(skb, GFP_ATOMIC);
765 if (!newskb)
Linus Lüssinga65e5482016-06-20 21:39:54 +0200766 goto err_packet_free;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000767
768 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann96412692012-06-05 22:31:30 +0200769 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100770 bcast_packet->ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000771
Sven Eckelmann747e4222011-05-14 23:14:50 +0200772 forw_packet->skb = newskb;
Linus Lüssing3111bee2016-08-07 12:34:19 +0200773 forw_packet->own = own_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000774
Antonio Quartulli72414442012-12-25 13:14:37 +0100775 INIT_DELAYED_WORK(&forw_packet->delayed_work,
776 batadv_send_outstanding_bcast_packet);
777
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100778 batadv_forw_packet_bcast_queue(bat_priv, forw_packet, jiffies + delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000779 return NETDEV_TX_OK;
780
Linus Lüssinga65e5482016-06-20 21:39:54 +0200781err_packet_free:
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200782 batadv_forw_packet_free(forw_packet, true);
Linus Lüssinga65e5482016-06-20 21:39:54 +0200783err:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000784 return NETDEV_TX_BUSY;
785}
786
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200787static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000788{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200789 struct batadv_hard_iface *hard_iface;
Linus Lüssing3111bee2016-08-07 12:34:19 +0200790 struct batadv_hardif_neigh_node *neigh_node;
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200791 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200792 struct batadv_forw_packet *forw_packet;
Linus Lüssing3111bee2016-08-07 12:34:19 +0200793 struct batadv_bcast_packet *bcast_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000794 struct sk_buff *skb1;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200795 struct net_device *soft_iface;
796 struct batadv_priv *bat_priv;
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100797 unsigned long send_time = jiffies + msecs_to_jiffies(5);
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200798 bool dropped = false;
Linus Lüssing3111bee2016-08-07 12:34:19 +0200799 u8 *neigh_addr;
800 u8 *orig_neigh;
801 int ret = 0;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200802
Geliang Tang4ba4bc02015-12-28 23:43:37 +0800803 delayed_work = to_delayed_work(work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200804 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
805 delayed_work);
806 soft_iface = forw_packet->if_incoming->soft_iface;
807 bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000808
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200809 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
810 dropped = true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000811 goto out;
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200812 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000813
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200814 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) {
815 dropped = true;
Antonio Quartullic384ea32011-06-26 03:37:18 +0200816 goto out;
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200817 }
Antonio Quartullic384ea32011-06-26 03:37:18 +0200818
Linus Lüssing3111bee2016-08-07 12:34:19 +0200819 bcast_packet = (struct batadv_bcast_packet *)forw_packet->skb->data;
820
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000821 /* rebroadcast packet */
822 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200823 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000824 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000825 continue;
826
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100827 if (forw_packet->num_packets >= hard_iface->num_bcasts)
828 continue;
829
Linus Lüssing3111bee2016-08-07 12:34:19 +0200830 if (forw_packet->own) {
831 neigh_node = NULL;
832 } else {
833 neigh_addr = eth_hdr(forw_packet->skb)->h_source;
834 neigh_node = batadv_hardif_neigh_get(hard_iface,
835 neigh_addr);
836 }
837
838 orig_neigh = neigh_node ? neigh_node->orig : NULL;
839
840 ret = batadv_hardif_no_broadcast(hard_iface, bcast_packet->orig,
841 orig_neigh);
842
843 if (ret) {
844 char *type;
845
846 switch (ret) {
847 case BATADV_HARDIF_BCAST_NORECIPIENT:
848 type = "no neighbor";
849 break;
850 case BATADV_HARDIF_BCAST_DUPFWD:
851 type = "single neighbor is source";
852 break;
853 case BATADV_HARDIF_BCAST_DUPORIG:
854 type = "single neighbor is originator";
855 break;
856 default:
857 type = "unknown";
858 }
859
860 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s surpressed: %s\n",
861 bcast_packet->orig,
862 hard_iface->net_dev->name, type);
863
864 if (neigh_node)
865 batadv_hardif_neigh_put(neigh_node);
866
867 continue;
868 }
869
870 if (neigh_node)
871 batadv_hardif_neigh_put(neigh_node);
872
Sven Eckelmann27353442016-03-05 16:09:16 +0100873 if (!kref_get_unless_zero(&hard_iface->refcount))
874 continue;
875
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000876 /* send a copy of the saved skb */
877 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
878 if (skb1)
Antonio Quartulli95d39272016-01-16 16:40:15 +0800879 batadv_send_broadcast_skb(skb1, hard_iface);
Sven Eckelmann27353442016-03-05 16:09:16 +0100880
881 batadv_hardif_put(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000882 }
883 rcu_read_unlock();
884
885 forw_packet->num_packets++;
886
887 /* if we still have some more bcasts to send */
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100888 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100889 batadv_forw_packet_bcast_queue(bat_priv, forw_packet,
890 send_time);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000891 return;
892 }
893
894out:
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100895 /* do we get something for free()? */
896 if (batadv_forw_packet_steal(forw_packet,
897 &bat_priv->forw_bcast_list_lock))
898 batadv_forw_packet_free(forw_packet, dropped);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000899}
900
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100901/**
902 * batadv_purge_outstanding_packets - stop/purge scheduled bcast/OGMv1 packets
903 * @bat_priv: the bat priv with all the soft interface information
904 * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on
905 *
906 * This method cancels and purges any broadcast and OGMv1 packet on the given
907 * hard_iface. If hard_iface is NULL, broadcast and OGMv1 packets on all hard
908 * interfaces will be canceled and purged.
909 *
910 * This function might sleep.
911 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200912void
913batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
914 const struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000915{
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100916 struct hlist_head head = HLIST_HEAD_INIT;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000917
Marek Lindnere6c10f42011-02-18 12:33:20 +0000918 if (hard_iface)
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200919 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200920 "purge_outstanding_packets(): %s\n",
921 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000922 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200923 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200924 "purge_outstanding_packets()\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000925
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100926 /* claim bcast list for free() */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000927 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100928 batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head,
929 hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000930 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
931
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100932 /* claim batman packet list for free() */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000933 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100934 batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head,
935 hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000936 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100937
938 /* then cancel or wait for packet workers to finish and free */
939 batadv_forw_packet_list_free(&head);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000940}