blob: 7895323fd2a79edf9dfe3b0fea9716729191b508 [file] [log] [blame]
Sven Eckelmannac79cbb2017-01-01 00:00:00 +01001/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "send.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/atomic.h>
Linus Lüssing9b4aec62016-11-01 09:44:44 +010022#include <linux/bug.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020023#include <linux/byteorder/generic.h>
Antonio Quartullif50ca952016-05-18 11:38:48 +020024#include <linux/errno.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020025#include <linux/etherdevice.h>
26#include <linux/fs.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020027#include <linux/if.h>
Sven Eckelmannfcafa5e2016-05-15 11:07:42 +020028#include <linux/if_ether.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020029#include <linux/jiffies.h>
30#include <linux/kernel.h>
Sven Eckelmann27353442016-03-05 16:09:16 +010031#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020032#include <linux/list.h>
33#include <linux/netdevice.h>
34#include <linux/printk.h>
35#include <linux/rculist.h>
36#include <linux/rcupdate.h>
37#include <linux/skbuff.h>
38#include <linux/slab.h>
39#include <linux/spinlock.h>
40#include <linux/stddef.h>
41#include <linux/workqueue.h>
42
43#include "distributed-arp-table.h"
Martin Hundebøllee75ed82013-05-23 16:53:03 +020044#include "fragmentation.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020045#include "gateway_client.h"
46#include "hard-interface.h"
Sven Eckelmannba412082016-05-15 23:48:31 +020047#include "log.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020048#include "network-coding.h"
49#include "originator.h"
50#include "routing.h"
51#include "soft-interface.h"
52#include "translation-table.h"
Antonio Quartulliaf5d4f72012-11-26 00:38:50 +010053
Sven Eckelmannbb079c82012-05-16 20:23:14 +020054static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000055
Antonio Quartulli95d39272016-01-16 16:40:15 +080056/**
57 * batadv_send_skb_packet - send an already prepared packet
58 * @skb: the packet to send
59 * @hard_iface: the interface to use to send the broadcast packet
60 * @dst_addr: the payload destination
61 *
62 * Send out an already prepared packet to the given neighbor or broadcast it
63 * using the specified interface. Either hard_iface or neigh_node must be not
64 * NULL.
65 * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
66 * otherwise it is sent as unicast to the given neighbor.
67 *
Sven Eckelmann7d72d172016-07-17 21:04:05 +020068 * Regardless of the return value, the skb is consumed.
69 *
70 * Return: A negative errno code is returned on a failure. A success does not
71 * guarantee the frame will be transmitted as it may be dropped due
72 * to congestion or traffic shaping.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020073 */
Sven Eckelmann56303d32012-06-05 22:31:31 +020074int batadv_send_skb_packet(struct sk_buff *skb,
75 struct batadv_hard_iface *hard_iface,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +020076 const u8 *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000077{
Antonio Quartulli95d39272016-01-16 16:40:15 +080078 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000079 struct ethhdr *ethhdr;
Sven Eckelmann7c946062017-01-28 10:12:39 +010080 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000081
Antonio Quartulli95d39272016-01-16 16:40:15 +080082 bat_priv = netdev_priv(hard_iface->soft_iface);
83
Sven Eckelmanne9a4f292012-06-03 22:19:19 +020084 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000085 goto send_skb_err;
86
Marek Lindnere6c10f42011-02-18 12:33:20 +000087 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000088 goto send_skb_err;
89
Marek Lindnere6c10f42011-02-18 12:33:20 +000090 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann67969582012-03-26 16:22:45 +020091 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
92 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000093 goto send_skb_err;
94 }
95
96 /* push to the ethernet header. */
Sven Eckelmann04b482a2012-05-12 02:09:38 +020097 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000098 goto send_skb_err;
99
100 skb_reset_mac_header(skb);
101
Antonio Quartulli7ed4be92013-04-08 15:08:18 +0200102 ethhdr = eth_hdr(skb);
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100103 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
104 ether_addr_copy(ethhdr->h_dest, dst_addr);
Antonio Quartulli293e9332013-05-19 12:55:16 +0200105 ethhdr->h_proto = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000106
107 skb_set_network_header(skb, ETH_HLEN);
Antonio Quartulli293e9332013-05-19 12:55:16 +0200108 skb->protocol = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000109
Marek Lindnere6c10f42011-02-18 12:33:20 +0000110 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000111
Martin Hundebøll612d2b42013-01-25 11:12:42 +0100112 /* Save a clone of the skb to use when decoding coded packets */
113 batadv_nc_skb_store_for_decoding(bat_priv, skb);
114
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000115 /* dev_queue_xmit() returns a negative result on error. However on
116 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200117 * (which is > 0). This will not be treated as an error.
118 */
Sven Eckelmann7c946062017-01-28 10:12:39 +0100119 ret = dev_queue_xmit(skb);
120 return net_xmit_eval(ret);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000121send_skb_err:
122 kfree_skb(skb);
123 return NET_XMIT_DROP;
124}
125
Antonio Quartulli95d39272016-01-16 16:40:15 +0800126int batadv_send_broadcast_skb(struct sk_buff *skb,
127 struct batadv_hard_iface *hard_iface)
128{
129 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
130}
131
132int batadv_send_unicast_skb(struct sk_buff *skb,
133 struct batadv_neigh_node *neigh)
134{
135#ifdef CONFIG_BATMAN_ADV_BATMAN_V
136 struct batadv_hardif_neigh_node *hardif_neigh;
137#endif
138 int ret;
139
140 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
141
142#ifdef CONFIG_BATMAN_ADV_BATMAN_V
143 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
144
Sven Eckelmann825ffe12017-08-23 21:52:13 +0200145 if (hardif_neigh && ret != NET_XMIT_DROP)
Antonio Quartulli95d39272016-01-16 16:40:15 +0800146 hardif_neigh->bat_v.last_unicast_tx = jiffies;
147
148 if (hardif_neigh)
149 batadv_hardif_neigh_put(hardif_neigh);
150#endif
151
152 return ret;
153}
154
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200155/**
156 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
157 * @skb: Packet to be transmitted.
158 * @orig_node: Final destination of the packet.
159 * @recv_if: Interface used when receiving the packet (can be NULL).
160 *
161 * Looks up the best next-hop towards the passed originator and passes the
162 * skb on for preparation of MAC header. If the packet originated from this
163 * host, NULL can be passed as recv_if and no interface alternating is
164 * attempted.
165 *
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200166 * Return: negative errno code on a failure, -EINPROGRESS if the skb is
167 * buffered for later transmit or the NET_XMIT status returned by the
Antonio Quartullif50ca952016-05-18 11:38:48 +0200168 * lower routine if the packet has been passed down.
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200169 */
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200170int batadv_send_skb_to_orig(struct sk_buff *skb,
171 struct batadv_orig_node *orig_node,
172 struct batadv_hard_iface *recv_if)
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200173{
174 struct batadv_priv *bat_priv = orig_node->bat_priv;
175 struct batadv_neigh_node *neigh_node;
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200176 int ret;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200177
178 /* batadv_find_router() increases neigh_nodes refcount if found. */
179 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200180 if (!neigh_node) {
181 ret = -EINVAL;
182 goto free_skb;
183 }
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200184
185 /* Check if the skb is too large to send in one piece and fragment
186 * it if needed.
187 */
188 if (atomic_read(&bat_priv->fragmentation) &&
189 skb->len > neigh_node->if_incoming->net_dev->mtu) {
190 /* Fragment and send packet. */
Antonio Quartullif50ca952016-05-18 11:38:48 +0200191 ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200192 /* skb was consumed */
193 skb = NULL;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200194
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200195 goto put_neigh_node;
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200196 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200197
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200198 /* try to network code the packet, if it is received on an interface
199 * (i.e. being forwarded). If the packet originates from this node or if
200 * network coding fails, then send the packet as usual.
201 */
Antonio Quartullif50ca952016-05-18 11:38:48 +0200202 if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
Florian Westphal99860202016-06-11 12:46:04 +0200203 ret = -EINPROGRESS;
Antonio Quartullif50ca952016-05-18 11:38:48 +0200204 else
205 ret = batadv_send_unicast_skb(skb, neigh_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200206
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200207 /* skb was consumed */
208 skb = NULL;
209
210put_neigh_node:
211 batadv_neigh_node_put(neigh_node);
212free_skb:
213 kfree_skb(skb);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200214
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200215 return ret;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200216}
217
Martin Hundebøllf097e252013-05-23 16:53:01 +0200218/**
219 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
220 * common fields for unicast packets
221 * @skb: the skb carrying the unicast header to initialize
222 * @hdr_size: amount of bytes to push at the beginning of the skb
223 * @orig_node: the destination node
224 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200225 * Return: false if the buffer extension was not possible or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200226 */
227static bool
228batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
229 struct batadv_orig_node *orig_node)
230{
231 struct batadv_unicast_packet *unicast_packet;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200232 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200233
234 if (batadv_skb_head_push(skb, hdr_size) < 0)
235 return false;
236
237 unicast_packet = (struct batadv_unicast_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100238 unicast_packet->version = BATADV_COMPAT_VERSION;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200239 /* batman packet type: unicast */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100240 unicast_packet->packet_type = BATADV_UNICAST;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200241 /* set unicast ttl */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100242 unicast_packet->ttl = BATADV_TTL;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200243 /* copy the destination for faster routing */
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100244 ether_addr_copy(unicast_packet->dest, orig_node->orig);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200245 /* set the destination tt version number */
246 unicast_packet->ttvn = ttvn;
247
248 return true;
249}
250
251/**
252 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
253 * @skb: the skb containing the payload to encapsulate
254 * @orig_node: the destination node
255 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200256 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200257 */
258static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
259 struct batadv_orig_node *orig_node)
260{
261 size_t uni_size = sizeof(struct batadv_unicast_packet);
262
263 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
264}
265
266/**
267 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
268 * unicast 4addr header
269 * @bat_priv: the bat priv with all the soft interface information
270 * @skb: the skb containing the payload to encapsulate
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200271 * @orig: the destination node
Martin Hundebøllf097e252013-05-23 16:53:01 +0200272 * @packet_subtype: the unicast 4addr packet subtype to use
273 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200274 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200275 */
276bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
277 struct sk_buff *skb,
278 struct batadv_orig_node *orig,
279 int packet_subtype)
280{
281 struct batadv_hard_iface *primary_if;
282 struct batadv_unicast_4addr_packet *uc_4addr_packet;
283 bool ret = false;
284
285 primary_if = batadv_primary_if_get_selected(bat_priv);
286 if (!primary_if)
287 goto out;
288
289 /* Pull the header space and fill the unicast_packet substructure.
290 * We can do that because the first member of the uc_4addr_packet
291 * is of type struct unicast_packet
292 */
293 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
294 orig))
295 goto out;
296
297 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100298 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100299 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200300 uc_4addr_packet->subtype = packet_subtype;
301 uc_4addr_packet->reserved = 0;
302
303 ret = true;
304out:
305 if (primary_if)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100306 batadv_hardif_put(primary_if);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200307 return ret;
308}
309
310/**
Linus Lüssinge300d312013-07-03 10:40:00 +0200311 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
Martin Hundebøllf097e252013-05-23 16:53:01 +0200312 * @bat_priv: the bat priv with all the soft interface information
313 * @skb: payload to send
314 * @packet_type: the batman unicast packet type to use
315 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
316 * 4addr packets)
Linus Lüssinge300d312013-07-03 10:40:00 +0200317 * @orig_node: the originator to send the packet to
Antonio Quartullic018ad32013-06-04 12:11:39 +0200318 * @vid: the vid to be used to search the translation table
Martin Hundebøllf097e252013-05-23 16:53:01 +0200319 *
Linus Lüssinge300d312013-07-03 10:40:00 +0200320 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
321 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200322 * as packet_type. Then send this frame to the given orig_node.
Linus Lüssinge300d312013-07-03 10:40:00 +0200323 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200324 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200325 */
Linus Lüssing1d8ab8d2014-02-15 17:47:52 +0100326int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
327 struct sk_buff *skb, int packet_type,
328 int packet_subtype,
329 struct batadv_orig_node *orig_node,
330 unsigned short vid)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200331{
Martin Hundebøllf097e252013-05-23 16:53:01 +0200332 struct batadv_unicast_packet *unicast_packet;
Antonio Quartulli8ea64e22015-05-11 20:34:52 +0200333 struct ethhdr *ethhdr;
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200334 int ret = NET_XMIT_DROP;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200335
Simon Wunderlich56a5ca82013-05-28 11:49:47 +0200336 if (!orig_node)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200337 goto out;
338
339 switch (packet_type) {
340 case BATADV_UNICAST:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200341 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
342 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200343 break;
344 case BATADV_UNICAST_4ADDR:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200345 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
346 orig_node,
347 packet_subtype))
348 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200349 break;
350 default:
351 /* this function supports UNICAST and UNICAST_4ADDR only. It
352 * should never be invoked with any other packet type
353 */
354 goto out;
355 }
356
Linus Lüssing927c2ed2014-01-19 22:22:45 +0100357 /* skb->data might have been reallocated by
358 * batadv_send_skb_prepare_unicast{,_4addr}()
359 */
360 ethhdr = eth_hdr(skb);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200361 unicast_packet = (struct batadv_unicast_packet *)skb->data;
362
363 /* inform the destination node that we are still missing a correct route
364 * for this client. The destination will receive this packet and will
365 * try to reroute it because the ttvn contained in the header is less
366 * than the current one
367 */
Antonio Quartullic018ad32013-06-04 12:11:39 +0200368 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
Martin Hundebøllf097e252013-05-23 16:53:01 +0200369 unicast_packet->ttvn = unicast_packet->ttvn - 1;
370
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200371 ret = batadv_send_skb_to_orig(skb, orig_node, NULL);
372 /* skb was consumed */
373 skb = NULL;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200374
375out:
Sven Eckelmann1ad5bcb2016-07-17 21:04:03 +0200376 kfree_skb(skb);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200377 return ret;
378}
379
Linus Lüssinge300d312013-07-03 10:40:00 +0200380/**
381 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
382 * @bat_priv: the bat priv with all the soft interface information
383 * @skb: payload to send
384 * @packet_type: the batman unicast packet type to use
385 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
386 * 4addr packets)
Antonio Quartullic5d3a652014-02-15 11:58:01 +0100387 * @dst_hint: can be used to override the destination contained in the skb
Linus Lüssinge300d312013-07-03 10:40:00 +0200388 * @vid: the vid to be used to search the translation table
389 *
390 * Look up the recipient node for the destination address in the ethernet
391 * header via the translation table. Wrap the given skb into a batman-adv
392 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
393 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
394 * to the according destination node.
395 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200396 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus Lüssinge300d312013-07-03 10:40:00 +0200397 */
398int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
399 struct sk_buff *skb, int packet_type,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200400 int packet_subtype, u8 *dst_hint,
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100401 unsigned short vid)
Linus Lüssinge300d312013-07-03 10:40:00 +0200402{
403 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
404 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200405 u8 *src, *dst;
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200406 int ret;
Linus Lüssinge300d312013-07-03 10:40:00 +0200407
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100408 src = ethhdr->h_source;
409 dst = ethhdr->h_dest;
410
411 /* if we got an hint! let's send the packet to this client (if any) */
412 if (dst_hint) {
413 src = NULL;
414 dst = dst_hint;
415 }
416 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
417
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200418 ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
419 packet_subtype, orig_node, vid);
420
421 if (orig_node)
422 batadv_orig_node_put(orig_node);
423
424 return ret;
Linus Lüssinge300d312013-07-03 10:40:00 +0200425}
426
427/**
428 * batadv_send_skb_via_gw - send an skb via gateway lookup
429 * @bat_priv: the bat priv with all the soft interface information
430 * @skb: payload to send
431 * @vid: the vid to be used to search the translation table
432 *
433 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
434 * unicast header and send this frame to this gateway node.
435 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200436 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus Lüssinge300d312013-07-03 10:40:00 +0200437 */
438int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
439 unsigned short vid)
440{
441 struct batadv_orig_node *orig_node;
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200442 int ret;
Linus Lüssinge300d312013-07-03 10:40:00 +0200443
444 orig_node = batadv_gw_get_selected_orig(bat_priv);
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200445 ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
446 BATADV_P_DATA, orig_node, vid);
447
448 if (orig_node)
449 batadv_orig_node_put(orig_node);
450
451 return ret;
Linus Lüssinge300d312013-07-03 10:40:00 +0200452}
453
Linus Lüssinga65e5482016-06-20 21:39:54 +0200454/**
455 * batadv_forw_packet_free - free a forwarding packet
456 * @forw_packet: The packet to free
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200457 * @dropped: whether the packet is freed because is is dropped
Linus Lüssinga65e5482016-06-20 21:39:54 +0200458 *
459 * This frees a forwarding packet and releases any resources it might
460 * have claimed.
461 */
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200462void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
463 bool dropped)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000464{
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200465 if (dropped)
466 kfree_skb(forw_packet->skb);
467 else
468 consume_skb(forw_packet->skb);
469
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200470 if (forw_packet->if_incoming)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100471 batadv_hardif_put(forw_packet->if_incoming);
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100472 if (forw_packet->if_outgoing)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100473 batadv_hardif_put(forw_packet->if_outgoing);
Linus Lüssinga65e5482016-06-20 21:39:54 +0200474 if (forw_packet->queue_left)
475 atomic_inc(forw_packet->queue_left);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000476 kfree(forw_packet);
477}
478
Linus Lüssinga65e5482016-06-20 21:39:54 +0200479/**
480 * batadv_forw_packet_alloc - allocate a forwarding packet
481 * @if_incoming: The (optional) if_incoming to be grabbed
482 * @if_outgoing: The (optional) if_outgoing to be grabbed
483 * @queue_left: The (optional) queue counter to decrease
484 * @bat_priv: The bat_priv for the mesh of this forw_packet
Linus Lüssing99ba18e2017-02-17 11:17:06 +0100485 * @skb: The raw packet this forwarding packet shall contain
Linus Lüssinga65e5482016-06-20 21:39:54 +0200486 *
487 * Allocates a forwarding packet and tries to get a reference to the
488 * (optional) if_incoming, if_outgoing and queue_left. If queue_left
489 * is NULL then bat_priv is optional, too.
490 *
491 * Return: An allocated forwarding packet on success, NULL otherwise.
492 */
493struct batadv_forw_packet *
494batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
495 struct batadv_hard_iface *if_outgoing,
496 atomic_t *queue_left,
Linus Lüssing99ba18e2017-02-17 11:17:06 +0100497 struct batadv_priv *bat_priv,
498 struct sk_buff *skb)
Linus Lüssinga65e5482016-06-20 21:39:54 +0200499{
500 struct batadv_forw_packet *forw_packet;
501 const char *qname;
502
503 if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
504 qname = "unknown";
505
506 if (queue_left == &bat_priv->bcast_queue_left)
507 qname = "bcast";
508
509 if (queue_left == &bat_priv->batman_queue_left)
510 qname = "batman";
511
512 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
513 "%s queue is full\n", qname);
514
515 return NULL;
516 }
517
518 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
519 if (!forw_packet)
520 goto err;
521
522 if (if_incoming)
523 kref_get(&if_incoming->refcount);
524
525 if (if_outgoing)
526 kref_get(&if_outgoing->refcount);
527
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100528 INIT_HLIST_NODE(&forw_packet->list);
529 INIT_HLIST_NODE(&forw_packet->cleanup_list);
Linus Lüssing99ba18e2017-02-17 11:17:06 +0100530 forw_packet->skb = skb;
Linus Lüssinga65e5482016-06-20 21:39:54 +0200531 forw_packet->queue_left = queue_left;
532 forw_packet->if_incoming = if_incoming;
533 forw_packet->if_outgoing = if_outgoing;
534 forw_packet->num_packets = 0;
535
536 return forw_packet;
537
538err:
539 if (queue_left)
540 atomic_inc(queue_left);
541
542 return NULL;
543}
544
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100545/**
546 * batadv_forw_packet_was_stolen - check whether someone stole this packet
547 * @forw_packet: the forwarding packet to check
548 *
549 * This function checks whether the given forwarding packet was claimed by
550 * someone else for free().
551 *
552 * Return: True if someone stole it, false otherwise.
553 */
554static bool
555batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000556{
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100557 return !hlist_unhashed(&forw_packet->cleanup_list);
558}
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000559
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100560/**
561 * batadv_forw_packet_steal - claim a forw_packet for free()
562 * @forw_packet: the forwarding packet to steal
563 * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
564 *
565 * This function tries to steal a specific forw_packet from global
566 * visibility for the purpose of getting it for free(). That means
567 * the caller is *not* allowed to requeue it afterwards.
568 *
569 * Return: True if stealing was successful. False if someone else stole it
570 * before us.
571 */
572bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
573 spinlock_t *lock)
574{
575 /* did purging routine steal it earlier? */
576 spin_lock_bh(lock);
577 if (batadv_forw_packet_was_stolen(forw_packet)) {
578 spin_unlock_bh(lock);
579 return false;
580 }
581
582 hlist_del_init(&forw_packet->list);
583
584 /* Just to spot misuse of this function */
585 hlist_add_fake(&forw_packet->cleanup_list);
586
587 spin_unlock_bh(lock);
588 return true;
589}
590
591/**
592 * batadv_forw_packet_list_steal - claim a list of forward packets for free()
593 * @forw_list: the to be stolen forward packets
594 * @cleanup_list: a backup pointer, to be able to dispose the packet later
595 * @hard_iface: the interface to steal forward packets from
596 *
597 * This function claims responsibility to free any forw_packet queued on the
598 * given hard_iface. If hard_iface is NULL forwarding packets on all hard
599 * interfaces will be claimed.
600 *
601 * The packets are being moved from the forw_list to the cleanup_list and
602 * by that allows already running threads to notice the claiming.
603 */
604static void
605batadv_forw_packet_list_steal(struct hlist_head *forw_list,
606 struct hlist_head *cleanup_list,
607 const struct batadv_hard_iface *hard_iface)
608{
609 struct batadv_forw_packet *forw_packet;
610 struct hlist_node *safe_tmp_node;
611
612 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
613 forw_list, list) {
614 /* if purge_outstanding_packets() was called with an argument
615 * we delete only packets belonging to the given interface
616 */
617 if (hard_iface &&
Sven Eckelmann825ffe12017-08-23 21:52:13 +0200618 forw_packet->if_incoming != hard_iface &&
619 forw_packet->if_outgoing != hard_iface)
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100620 continue;
621
622 hlist_del(&forw_packet->list);
623 hlist_add_head(&forw_packet->cleanup_list, cleanup_list);
624 }
625}
626
627/**
628 * batadv_forw_packet_list_free - free a list of forward packets
629 * @head: a list of to be freed forw_packets
630 *
631 * This function cancels the scheduling of any packet in the provided list,
632 * waits for any possibly running packet forwarding thread to finish and
633 * finally, safely frees this forward packet.
634 *
635 * This function might sleep.
636 */
637static void batadv_forw_packet_list_free(struct hlist_head *head)
638{
639 struct batadv_forw_packet *forw_packet;
640 struct hlist_node *safe_tmp_node;
641
642 hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head,
643 cleanup_list) {
644 cancel_delayed_work_sync(&forw_packet->delayed_work);
645
646 hlist_del(&forw_packet->cleanup_list);
647 batadv_forw_packet_free(forw_packet, true);
648 }
649}
650
651/**
652 * batadv_forw_packet_queue - try to queue a forwarding packet
653 * @forw_packet: the forwarding packet to queue
654 * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock)
655 * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list)
656 * @send_time: timestamp (jiffies) when the packet is to be sent
657 *
658 * This function tries to (re)queue a forwarding packet. Requeuing
659 * is prevented if the according interface is shutting down
660 * (e.g. if batadv_forw_packet_list_steal() was called for this
661 * packet earlier).
662 *
663 * Calling batadv_forw_packet_queue() after a call to
664 * batadv_forw_packet_steal() is forbidden!
665 *
666 * Caller needs to ensure that forw_packet->delayed_work was initialized.
667 */
668static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
669 spinlock_t *lock, struct hlist_head *head,
670 unsigned long send_time)
671{
672 spin_lock_bh(lock);
673
674 /* did purging routine steal it from us? */
675 if (batadv_forw_packet_was_stolen(forw_packet)) {
676 /* If you got it for free() without trouble, then
677 * don't get back into the queue after stealing...
678 */
679 WARN_ONCE(hlist_fake(&forw_packet->cleanup_list),
680 "Requeuing after batadv_forw_packet_steal() not allowed!\n");
681
682 spin_unlock_bh(lock);
683 return;
684 }
685
686 hlist_del_init(&forw_packet->list);
687 hlist_add_head(&forw_packet->list, head);
688
689 queue_delayed_work(batadv_event_workqueue,
690 &forw_packet->delayed_work,
691 send_time - jiffies);
692 spin_unlock_bh(lock);
693}
694
695/**
696 * batadv_forw_packet_bcast_queue - try to queue a broadcast packet
697 * @bat_priv: the bat priv with all the soft interface information
698 * @forw_packet: the forwarding packet to queue
699 * @send_time: timestamp (jiffies) when the packet is to be sent
700 *
701 * This function tries to (re)queue a broadcast packet.
702 *
703 * Caller needs to ensure that forw_packet->delayed_work was initialized.
704 */
705static void
706batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
707 struct batadv_forw_packet *forw_packet,
708 unsigned long send_time)
709{
710 batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock,
711 &bat_priv->forw_bcast_list, send_time);
712}
713
714/**
715 * batadv_forw_packet_ogmv1_queue - try to queue an OGMv1 packet
716 * @bat_priv: the bat priv with all the soft interface information
717 * @forw_packet: the forwarding packet to queue
718 * @send_time: timestamp (jiffies) when the packet is to be sent
719 *
720 * This function tries to (re)queue an OGMv1 packet.
721 *
722 * Caller needs to ensure that forw_packet->delayed_work was initialized.
723 */
724void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
725 struct batadv_forw_packet *forw_packet,
726 unsigned long send_time)
727{
728 batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock,
729 &bat_priv->forw_bat_list, send_time);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000730}
731
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200732/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +0100733 * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
734 * @bat_priv: the bat priv with all the soft interface information
735 * @skb: broadcast packet to add
736 * @delay: number of jiffies to wait before sending
Linus Lüssing3111bee2016-08-07 12:34:19 +0200737 * @own_packet: true if it is a self-generated broadcast packet
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000738 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200739 * add a broadcast packet to the queue and setup timers. broadcast packets
740 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000741 *
742 * The skb is not consumed, so the caller should make sure that the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200743 * skb is freed.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200744 *
745 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200746 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200747int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
Sven Eckelmann9455e342012-05-12 02:09:37 +0200748 const struct sk_buff *skb,
Linus Lüssing3111bee2016-08-07 12:34:19 +0200749 unsigned long delay,
750 bool own_packet)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000751{
Sven Eckelmann422d2f72016-07-25 00:42:44 +0200752 struct batadv_hard_iface *primary_if;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200753 struct batadv_forw_packet *forw_packet;
Sven Eckelmann96412692012-06-05 22:31:30 +0200754 struct batadv_bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200755 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000756
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200757 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200758 if (!primary_if)
Linus Lüssinga65e5482016-06-20 21:39:54 +0200759 goto err;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000760
Linus Lüssing99ba18e2017-02-17 11:17:06 +0100761 newskb = skb_copy(skb, GFP_ATOMIC);
762 if (!newskb) {
763 batadv_hardif_put(primary_if);
764 goto err;
765 }
766
Linus Lüssinga65e5482016-06-20 21:39:54 +0200767 forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
768 &bat_priv->bcast_queue_left,
Linus Lüssing99ba18e2017-02-17 11:17:06 +0100769 bat_priv, newskb);
Linus Lüssinga65e5482016-06-20 21:39:54 +0200770 batadv_hardif_put(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000771 if (!forw_packet)
Linus Lüssinga65e5482016-06-20 21:39:54 +0200772 goto err_packet_free;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000773
774 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann96412692012-06-05 22:31:30 +0200775 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100776 bcast_packet->ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000777
Linus Lüssing3111bee2016-08-07 12:34:19 +0200778 forw_packet->own = own_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000779
Antonio Quartulli72414442012-12-25 13:14:37 +0100780 INIT_DELAYED_WORK(&forw_packet->delayed_work,
781 batadv_send_outstanding_bcast_packet);
782
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100783 batadv_forw_packet_bcast_queue(bat_priv, forw_packet, jiffies + delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000784 return NETDEV_TX_OK;
785
Linus Lüssinga65e5482016-06-20 21:39:54 +0200786err_packet_free:
Linus Lüssing99ba18e2017-02-17 11:17:06 +0100787 kfree_skb(newskb);
Linus Lüssinga65e5482016-06-20 21:39:54 +0200788err:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000789 return NETDEV_TX_BUSY;
790}
791
Linus Lüssinge2d9ba42017-02-17 11:17:07 +0100792/**
793 * batadv_forw_packet_bcasts_left - check if a retransmission is necessary
794 * @forw_packet: the forwarding packet to check
795 * @hard_iface: the interface to check on
796 *
797 * Checks whether a given packet has any (re)transmissions left on the provided
798 * interface.
799 *
800 * hard_iface may be NULL: In that case the number of transmissions this skb had
801 * so far is compared with the maximum amount of retransmissions independent of
802 * any interface instead.
803 *
804 * Return: True if (re)transmissions are left, false otherwise.
805 */
806static bool
807batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet,
808 struct batadv_hard_iface *hard_iface)
809{
810 unsigned int max;
811
812 if (hard_iface)
813 max = hard_iface->num_bcasts;
814 else
815 max = BATADV_NUM_BCASTS_MAX;
816
817 return BATADV_SKB_CB(forw_packet->skb)->num_bcasts < max;
818}
819
820/**
821 * batadv_forw_packet_bcasts_inc - increment retransmission counter of a packet
822 * @forw_packet: the packet to increase the counter for
823 */
824static void
825batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet)
826{
827 BATADV_SKB_CB(forw_packet->skb)->num_bcasts++;
828}
829
830/**
831 * batadv_forw_packet_is_rebroadcast - check packet for previous transmissions
832 * @forw_packet: the packet to check
833 *
834 * Return: True if this packet was transmitted before, false otherwise.
835 */
836bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet)
837{
838 return BATADV_SKB_CB(forw_packet->skb)->num_bcasts > 0;
839}
840
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200841static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000842{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200843 struct batadv_hard_iface *hard_iface;
Linus Lüssing3111bee2016-08-07 12:34:19 +0200844 struct batadv_hardif_neigh_node *neigh_node;
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200845 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200846 struct batadv_forw_packet *forw_packet;
Linus Lüssing3111bee2016-08-07 12:34:19 +0200847 struct batadv_bcast_packet *bcast_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000848 struct sk_buff *skb1;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200849 struct net_device *soft_iface;
850 struct batadv_priv *bat_priv;
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100851 unsigned long send_time = jiffies + msecs_to_jiffies(5);
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200852 bool dropped = false;
Linus Lüssing3111bee2016-08-07 12:34:19 +0200853 u8 *neigh_addr;
854 u8 *orig_neigh;
855 int ret = 0;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200856
Geliang Tang4ba4bc02015-12-28 23:43:37 +0800857 delayed_work = to_delayed_work(work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200858 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
859 delayed_work);
860 soft_iface = forw_packet->if_incoming->soft_iface;
861 bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000862
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200863 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
864 dropped = true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000865 goto out;
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200866 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000867
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200868 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) {
869 dropped = true;
Antonio Quartullic384ea32011-06-26 03:37:18 +0200870 goto out;
Sven Eckelmannbd687fe2016-07-17 21:04:00 +0200871 }
Antonio Quartullic384ea32011-06-26 03:37:18 +0200872
Linus Lüssing3111bee2016-08-07 12:34:19 +0200873 bcast_packet = (struct batadv_bcast_packet *)forw_packet->skb->data;
874
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000875 /* rebroadcast packet */
876 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200877 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000878 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000879 continue;
880
Linus Lüssinge2d9ba42017-02-17 11:17:07 +0100881 if (!batadv_forw_packet_bcasts_left(forw_packet, hard_iface))
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100882 continue;
883
Linus Lüssing3111bee2016-08-07 12:34:19 +0200884 if (forw_packet->own) {
885 neigh_node = NULL;
886 } else {
887 neigh_addr = eth_hdr(forw_packet->skb)->h_source;
888 neigh_node = batadv_hardif_neigh_get(hard_iface,
889 neigh_addr);
890 }
891
892 orig_neigh = neigh_node ? neigh_node->orig : NULL;
893
894 ret = batadv_hardif_no_broadcast(hard_iface, bcast_packet->orig,
895 orig_neigh);
896
897 if (ret) {
898 char *type;
899
900 switch (ret) {
901 case BATADV_HARDIF_BCAST_NORECIPIENT:
902 type = "no neighbor";
903 break;
904 case BATADV_HARDIF_BCAST_DUPFWD:
905 type = "single neighbor is source";
906 break;
907 case BATADV_HARDIF_BCAST_DUPORIG:
908 type = "single neighbor is originator";
909 break;
910 default:
911 type = "unknown";
912 }
913
Colin Ian Kingf25cbb22017-06-26 11:26:44 +0100914 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s suppressed: %s\n",
Linus Lüssing3111bee2016-08-07 12:34:19 +0200915 bcast_packet->orig,
916 hard_iface->net_dev->name, type);
917
918 if (neigh_node)
919 batadv_hardif_neigh_put(neigh_node);
920
921 continue;
922 }
923
924 if (neigh_node)
925 batadv_hardif_neigh_put(neigh_node);
926
Sven Eckelmann27353442016-03-05 16:09:16 +0100927 if (!kref_get_unless_zero(&hard_iface->refcount))
928 continue;
929
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000930 /* send a copy of the saved skb */
931 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
932 if (skb1)
Antonio Quartulli95d39272016-01-16 16:40:15 +0800933 batadv_send_broadcast_skb(skb1, hard_iface);
Sven Eckelmann27353442016-03-05 16:09:16 +0100934
935 batadv_hardif_put(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000936 }
937 rcu_read_unlock();
938
Linus Lüssinge2d9ba42017-02-17 11:17:07 +0100939 batadv_forw_packet_bcasts_inc(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000940
941 /* if we still have some more bcasts to send */
Linus Lüssinge2d9ba42017-02-17 11:17:07 +0100942 if (batadv_forw_packet_bcasts_left(forw_packet, NULL)) {
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100943 batadv_forw_packet_bcast_queue(bat_priv, forw_packet,
944 send_time);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000945 return;
946 }
947
948out:
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100949 /* do we get something for free()? */
950 if (batadv_forw_packet_steal(forw_packet,
951 &bat_priv->forw_bcast_list_lock))
952 batadv_forw_packet_free(forw_packet, dropped);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000953}
954
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100955/**
956 * batadv_purge_outstanding_packets - stop/purge scheduled bcast/OGMv1 packets
957 * @bat_priv: the bat priv with all the soft interface information
958 * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on
959 *
960 * This method cancels and purges any broadcast and OGMv1 packet on the given
961 * hard_iface. If hard_iface is NULL, broadcast and OGMv1 packets on all hard
962 * interfaces will be canceled and purged.
963 *
964 * This function might sleep.
965 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200966void
967batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
968 const struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000969{
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100970 struct hlist_head head = HLIST_HEAD_INIT;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000971
Marek Lindnere6c10f42011-02-18 12:33:20 +0000972 if (hard_iface)
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200973 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann22f05022017-05-19 13:02:00 +0200974 "%s(): %s\n",
975 __func__, hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000976 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200977 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann22f05022017-05-19 13:02:00 +0200978 "%s()\n", __func__);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000979
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100980 /* claim bcast list for free() */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000981 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100982 batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head,
983 hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000984 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
985
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100986 /* claim batman packet list for free() */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000987 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100988 batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head,
989 hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000990 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
Linus Lüssing9b4aec62016-11-01 09:44:44 +0100991
992 /* then cancel or wait for packet workers to finish and free */
993 batadv_forw_packet_list_free(&head);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000994}