blob: 59e695b5cfbdf8b8422d2d2a55b11b4d4ddfa45e [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "send.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
23#include <linux/etherdevice.h>
24#include <linux/fs.h>
25#include <linux/if_ether.h>
26#include <linux/if.h>
27#include <linux/jiffies.h>
28#include <linux/kernel.h>
Sven Eckelmann27353442016-03-05 16:09:16 +010029#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020030#include <linux/list.h>
31#include <linux/netdevice.h>
32#include <linux/printk.h>
33#include <linux/rculist.h>
34#include <linux/rcupdate.h>
35#include <linux/skbuff.h>
36#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/stddef.h>
39#include <linux/workqueue.h>
40
41#include "distributed-arp-table.h"
Martin Hundebøllee75ed82013-05-23 16:53:03 +020042#include "fragmentation.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020043#include "gateway_client.h"
44#include "hard-interface.h"
45#include "network-coding.h"
46#include "originator.h"
47#include "routing.h"
48#include "soft-interface.h"
49#include "translation-table.h"
Antonio Quartulliaf5d4f72012-11-26 00:38:50 +010050
Sven Eckelmannbb079c82012-05-16 20:23:14 +020051static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000052
Antonio Quartulli95d39272016-01-16 16:40:15 +080053/**
54 * batadv_send_skb_packet - send an already prepared packet
55 * @skb: the packet to send
56 * @hard_iface: the interface to use to send the broadcast packet
57 * @dst_addr: the payload destination
58 *
59 * Send out an already prepared packet to the given neighbor or broadcast it
60 * using the specified interface. Either hard_iface or neigh_node must be not
61 * NULL.
62 * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
63 * otherwise it is sent as unicast to the given neighbor.
64 *
65 * Return: NET_TX_DROP in case of error or the result of dev_queue_xmit(skb)
66 * otherwise
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020067 */
Sven Eckelmann56303d32012-06-05 22:31:31 +020068int batadv_send_skb_packet(struct sk_buff *skb,
69 struct batadv_hard_iface *hard_iface,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +020070 const u8 *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000071{
Antonio Quartulli95d39272016-01-16 16:40:15 +080072 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000073 struct ethhdr *ethhdr;
74
Antonio Quartulli95d39272016-01-16 16:40:15 +080075 bat_priv = netdev_priv(hard_iface->soft_iface);
76
Sven Eckelmanne9a4f292012-06-03 22:19:19 +020077 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000078 goto send_skb_err;
79
Marek Lindnere6c10f42011-02-18 12:33:20 +000080 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000081 goto send_skb_err;
82
Marek Lindnere6c10f42011-02-18 12:33:20 +000083 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann67969582012-03-26 16:22:45 +020084 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
85 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000086 goto send_skb_err;
87 }
88
89 /* push to the ethernet header. */
Sven Eckelmann04b482a2012-05-12 02:09:38 +020090 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000091 goto send_skb_err;
92
93 skb_reset_mac_header(skb);
94
Antonio Quartulli7ed4be92013-04-08 15:08:18 +020095 ethhdr = eth_hdr(skb);
Antonio Quartulli8fdd0152014-01-22 00:42:11 +010096 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
97 ether_addr_copy(ethhdr->h_dest, dst_addr);
Antonio Quartulli293e9332013-05-19 12:55:16 +020098 ethhdr->h_proto = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000099
100 skb_set_network_header(skb, ETH_HLEN);
Antonio Quartulli293e9332013-05-19 12:55:16 +0200101 skb->protocol = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000102
Marek Lindnere6c10f42011-02-18 12:33:20 +0000103 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000104
Martin Hundebøll612d2b42013-01-25 11:12:42 +0100105 /* Save a clone of the skb to use when decoding coded packets */
106 batadv_nc_skb_store_for_decoding(bat_priv, skb);
107
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000108 /* dev_queue_xmit() returns a negative result on error. However on
109 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200110 * (which is > 0). This will not be treated as an error.
111 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000112 return dev_queue_xmit(skb);
113send_skb_err:
114 kfree_skb(skb);
115 return NET_XMIT_DROP;
116}
117
Antonio Quartulli95d39272016-01-16 16:40:15 +0800118int batadv_send_broadcast_skb(struct sk_buff *skb,
119 struct batadv_hard_iface *hard_iface)
120{
121 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
122}
123
124int batadv_send_unicast_skb(struct sk_buff *skb,
125 struct batadv_neigh_node *neigh)
126{
127#ifdef CONFIG_BATMAN_ADV_BATMAN_V
128 struct batadv_hardif_neigh_node *hardif_neigh;
129#endif
130 int ret;
131
132 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
133
134#ifdef CONFIG_BATMAN_ADV_BATMAN_V
135 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
136
137 if ((hardif_neigh) && (ret != NET_XMIT_DROP))
138 hardif_neigh->bat_v.last_unicast_tx = jiffies;
139
140 if (hardif_neigh)
141 batadv_hardif_neigh_put(hardif_neigh);
142#endif
143
144 return ret;
145}
146
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200147/**
148 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
149 * @skb: Packet to be transmitted.
150 * @orig_node: Final destination of the packet.
151 * @recv_if: Interface used when receiving the packet (can be NULL).
152 *
153 * Looks up the best next-hop towards the passed originator and passes the
154 * skb on for preparation of MAC header. If the packet originated from this
155 * host, NULL can be passed as recv_if and no interface alternating is
156 * attempted.
157 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200158 * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
Florian Westphal99860202016-06-11 12:46:04 +0200159 * -EINPROGRESS if the skb is buffered for later transmit.
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200160 */
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200161int batadv_send_skb_to_orig(struct sk_buff *skb,
162 struct batadv_orig_node *orig_node,
163 struct batadv_hard_iface *recv_if)
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200164{
165 struct batadv_priv *bat_priv = orig_node->bat_priv;
166 struct batadv_neigh_node *neigh_node;
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200167 int ret = NET_XMIT_DROP;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200168
169 /* batadv_find_router() increases neigh_nodes refcount if found. */
170 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
171 if (!neigh_node)
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200172 goto out;
173
174 /* Check if the skb is too large to send in one piece and fragment
175 * it if needed.
176 */
177 if (atomic_read(&bat_priv->fragmentation) &&
178 skb->len > neigh_node->if_incoming->net_dev->mtu) {
179 /* Fragment and send packet. */
180 if (batadv_frag_send_packet(skb, orig_node, neigh_node))
181 ret = NET_XMIT_SUCCESS;
182
183 goto out;
184 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200185
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200186 /* try to network code the packet, if it is received on an interface
187 * (i.e. being forwarded). If the packet originates from this node or if
188 * network coding fails, then send the packet as usual.
189 */
190 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
Florian Westphal99860202016-06-11 12:46:04 +0200191 ret = -EINPROGRESS;
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200192 } else {
Antonio Quartulli95d39272016-01-16 16:40:15 +0800193 batadv_send_unicast_skb(skb, neigh_node);
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200194 ret = NET_XMIT_SUCCESS;
195 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200196
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200197out:
198 if (neigh_node)
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100199 batadv_neigh_node_put(neigh_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200200
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200201 return ret;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200202}
203
Martin Hundebøllf097e252013-05-23 16:53:01 +0200204/**
205 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
206 * common fields for unicast packets
207 * @skb: the skb carrying the unicast header to initialize
208 * @hdr_size: amount of bytes to push at the beginning of the skb
209 * @orig_node: the destination node
210 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200211 * Return: false if the buffer extension was not possible or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200212 */
213static bool
214batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
215 struct batadv_orig_node *orig_node)
216{
217 struct batadv_unicast_packet *unicast_packet;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200218 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200219
220 if (batadv_skb_head_push(skb, hdr_size) < 0)
221 return false;
222
223 unicast_packet = (struct batadv_unicast_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100224 unicast_packet->version = BATADV_COMPAT_VERSION;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200225 /* batman packet type: unicast */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100226 unicast_packet->packet_type = BATADV_UNICAST;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200227 /* set unicast ttl */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100228 unicast_packet->ttl = BATADV_TTL;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200229 /* copy the destination for faster routing */
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100230 ether_addr_copy(unicast_packet->dest, orig_node->orig);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200231 /* set the destination tt version number */
232 unicast_packet->ttvn = ttvn;
233
234 return true;
235}
236
237/**
238 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
239 * @skb: the skb containing the payload to encapsulate
240 * @orig_node: the destination node
241 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200242 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200243 */
244static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
245 struct batadv_orig_node *orig_node)
246{
247 size_t uni_size = sizeof(struct batadv_unicast_packet);
248
249 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
250}
251
252/**
253 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
254 * unicast 4addr header
255 * @bat_priv: the bat priv with all the soft interface information
256 * @skb: the skb containing the payload to encapsulate
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200257 * @orig: the destination node
Martin Hundebøllf097e252013-05-23 16:53:01 +0200258 * @packet_subtype: the unicast 4addr packet subtype to use
259 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200260 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200261 */
262bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
263 struct sk_buff *skb,
264 struct batadv_orig_node *orig,
265 int packet_subtype)
266{
267 struct batadv_hard_iface *primary_if;
268 struct batadv_unicast_4addr_packet *uc_4addr_packet;
269 bool ret = false;
270
271 primary_if = batadv_primary_if_get_selected(bat_priv);
272 if (!primary_if)
273 goto out;
274
275 /* Pull the header space and fill the unicast_packet substructure.
276 * We can do that because the first member of the uc_4addr_packet
277 * is of type struct unicast_packet
278 */
279 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
280 orig))
281 goto out;
282
283 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100284 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100285 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200286 uc_4addr_packet->subtype = packet_subtype;
287 uc_4addr_packet->reserved = 0;
288
289 ret = true;
290out:
291 if (primary_if)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100292 batadv_hardif_put(primary_if);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200293 return ret;
294}
295
296/**
Linus Lüssinge300d312013-07-03 10:40:00 +0200297 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
Martin Hundebøllf097e252013-05-23 16:53:01 +0200298 * @bat_priv: the bat priv with all the soft interface information
299 * @skb: payload to send
300 * @packet_type: the batman unicast packet type to use
301 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
302 * 4addr packets)
Linus Lüssinge300d312013-07-03 10:40:00 +0200303 * @orig_node: the originator to send the packet to
Antonio Quartullic018ad32013-06-04 12:11:39 +0200304 * @vid: the vid to be used to search the translation table
Martin Hundebøllf097e252013-05-23 16:53:01 +0200305 *
Linus Lüssinge300d312013-07-03 10:40:00 +0200306 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
307 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
308 * as packet_type. Then send this frame to the given orig_node and release a
309 * reference to this orig_node.
310 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200311 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200312 */
Linus Lüssing1d8ab8d2014-02-15 17:47:52 +0100313int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
314 struct sk_buff *skb, int packet_type,
315 int packet_subtype,
316 struct batadv_orig_node *orig_node,
317 unsigned short vid)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200318{
Martin Hundebøllf097e252013-05-23 16:53:01 +0200319 struct batadv_unicast_packet *unicast_packet;
Antonio Quartulli8ea64e22015-05-11 20:34:52 +0200320 struct ethhdr *ethhdr;
Linus Lüssing927c2ed2014-01-19 22:22:45 +0100321 int ret = NET_XMIT_DROP;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200322
Simon Wunderlich56a5ca82013-05-28 11:49:47 +0200323 if (!orig_node)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200324 goto out;
325
326 switch (packet_type) {
327 case BATADV_UNICAST:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200328 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
329 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200330 break;
331 case BATADV_UNICAST_4ADDR:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200332 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
333 orig_node,
334 packet_subtype))
335 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200336 break;
337 default:
338 /* this function supports UNICAST and UNICAST_4ADDR only. It
339 * should never be invoked with any other packet type
340 */
341 goto out;
342 }
343
Linus Lüssing927c2ed2014-01-19 22:22:45 +0100344 /* skb->data might have been reallocated by
345 * batadv_send_skb_prepare_unicast{,_4addr}()
346 */
347 ethhdr = eth_hdr(skb);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200348 unicast_packet = (struct batadv_unicast_packet *)skb->data;
349
350 /* inform the destination node that we are still missing a correct route
351 * for this client. The destination will receive this packet and will
352 * try to reroute it because the ttvn contained in the header is less
353 * than the current one
354 */
Antonio Quartullic018ad32013-06-04 12:11:39 +0200355 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
Martin Hundebøllf097e252013-05-23 16:53:01 +0200356 unicast_packet->ttvn = unicast_packet->ttvn - 1;
357
358 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
Linus Lüssinge300d312013-07-03 10:40:00 +0200359 ret = NET_XMIT_SUCCESS;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200360
361out:
Martin Hundebøllf097e252013-05-23 16:53:01 +0200362 if (orig_node)
Sven Eckelmann5d967312016-01-17 11:01:09 +0100363 batadv_orig_node_put(orig_node);
Linus Lüssinge300d312013-07-03 10:40:00 +0200364 if (ret == NET_XMIT_DROP)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200365 kfree_skb(skb);
366 return ret;
367}
368
Linus Lüssinge300d312013-07-03 10:40:00 +0200369/**
370 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
371 * @bat_priv: the bat priv with all the soft interface information
372 * @skb: payload to send
373 * @packet_type: the batman unicast packet type to use
374 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
375 * 4addr packets)
Antonio Quartullic5d3a652014-02-15 11:58:01 +0100376 * @dst_hint: can be used to override the destination contained in the skb
Linus Lüssinge300d312013-07-03 10:40:00 +0200377 * @vid: the vid to be used to search the translation table
378 *
379 * Look up the recipient node for the destination address in the ethernet
380 * header via the translation table. Wrap the given skb into a batman-adv
381 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
382 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
383 * to the according destination node.
384 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200385 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus Lüssinge300d312013-07-03 10:40:00 +0200386 */
387int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
388 struct sk_buff *skb, int packet_type,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200389 int packet_subtype, u8 *dst_hint,
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100390 unsigned short vid)
Linus Lüssinge300d312013-07-03 10:40:00 +0200391{
392 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
393 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200394 u8 *src, *dst;
Linus Lüssinge300d312013-07-03 10:40:00 +0200395
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100396 src = ethhdr->h_source;
397 dst = ethhdr->h_dest;
398
399 /* if we got an hint! let's send the packet to this client (if any) */
400 if (dst_hint) {
401 src = NULL;
402 dst = dst_hint;
403 }
404 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
405
Linus Lüssinge300d312013-07-03 10:40:00 +0200406 return batadv_send_skb_unicast(bat_priv, skb, packet_type,
407 packet_subtype, orig_node, vid);
408}
409
410/**
411 * batadv_send_skb_via_gw - send an skb via gateway lookup
412 * @bat_priv: the bat priv with all the soft interface information
413 * @skb: payload to send
414 * @vid: the vid to be used to search the translation table
415 *
416 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
417 * unicast header and send this frame to this gateway node.
418 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200419 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus Lüssinge300d312013-07-03 10:40:00 +0200420 */
421int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
422 unsigned short vid)
423{
424 struct batadv_orig_node *orig_node;
425
426 orig_node = batadv_gw_get_selected_orig(bat_priv);
427 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
428 orig_node, vid);
429}
430
Antonio Quartullif0d97252016-05-03 01:45:34 +0800431void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000432{
Markus Elfringc7994432015-11-15 08:04:43 +0100433 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200434 if (forw_packet->if_incoming)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100435 batadv_hardif_put(forw_packet->if_incoming);
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100436 if (forw_packet->if_outgoing)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100437 batadv_hardif_put(forw_packet->if_outgoing);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000438 kfree(forw_packet);
439}
440
Sven Eckelmann56303d32012-06-05 22:31:31 +0200441static void
442_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
443 struct batadv_forw_packet *forw_packet,
444 unsigned long send_time)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000445{
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000446 /* add new packet to packet list */
447 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
448 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
449 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
450
451 /* start timer for this packet */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200452 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000453 send_time);
454}
455
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200456/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +0100457 * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
458 * @bat_priv: the bat priv with all the soft interface information
459 * @skb: broadcast packet to add
460 * @delay: number of jiffies to wait before sending
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000461 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200462 * add a broadcast packet to the queue and setup timers. broadcast packets
463 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000464 *
465 * The skb is not consumed, so the caller should make sure that the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200466 * skb is freed.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200467 *
468 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200469 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200470int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
Sven Eckelmann9455e342012-05-12 02:09:37 +0200471 const struct sk_buff *skb,
472 unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000473{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200474 struct batadv_hard_iface *primary_if = NULL;
475 struct batadv_forw_packet *forw_packet;
Sven Eckelmann96412692012-06-05 22:31:30 +0200476 struct batadv_bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200477 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000478
Sven Eckelmann3e348192012-05-16 20:23:22 +0200479 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200480 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
481 "bcast packet queue full\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000482 goto out;
483 }
484
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200485 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200486 if (!primary_if)
Marek Lindnerca06c6e2011-05-14 20:01:22 +0200487 goto out_and_inc;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000488
Sven Eckelmann704509b2011-05-14 23:14:54 +0200489 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000490
491 if (!forw_packet)
492 goto out_and_inc;
493
Sven Eckelmann747e4222011-05-14 23:14:50 +0200494 newskb = skb_copy(skb, GFP_ATOMIC);
495 if (!newskb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000496 goto packet_free;
497
498 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann96412692012-06-05 22:31:30 +0200499 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100500 bcast_packet->ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000501
Sven Eckelmann747e4222011-05-14 23:14:50 +0200502 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000503
Sven Eckelmann747e4222011-05-14 23:14:50 +0200504 forw_packet->skb = newskb;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200505 forw_packet->if_incoming = primary_if;
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100506 forw_packet->if_outgoing = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000507
508 /* how often did we send the bcast packet ? */
509 forw_packet->num_packets = 0;
510
Antonio Quartulli72414442012-12-25 13:14:37 +0100511 INIT_DELAYED_WORK(&forw_packet->delayed_work,
512 batadv_send_outstanding_bcast_packet);
513
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200514 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000515 return NETDEV_TX_OK;
516
517packet_free:
518 kfree(forw_packet);
519out_and_inc:
520 atomic_inc(&bat_priv->bcast_queue_left);
521out:
Marek Lindner32ae9b22011-04-20 15:40:58 +0200522 if (primary_if)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100523 batadv_hardif_put(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000524 return NETDEV_TX_BUSY;
525}
526
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200527static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000528{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200529 struct batadv_hard_iface *hard_iface;
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200530 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200531 struct batadv_forw_packet *forw_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000532 struct sk_buff *skb1;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200533 struct net_device *soft_iface;
534 struct batadv_priv *bat_priv;
535
Geliang Tang4ba4bc02015-12-28 23:43:37 +0800536 delayed_work = to_delayed_work(work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200537 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
538 delayed_work);
539 soft_iface = forw_packet->if_incoming->soft_iface;
540 bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000541
542 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
543 hlist_del(&forw_packet->list);
544 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
545
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200546 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000547 goto out;
548
Antonio Quartullic384ea32011-06-26 03:37:18 +0200549 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
550 goto out;
551
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000552 /* rebroadcast packet */
553 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200554 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000555 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000556 continue;
557
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100558 if (forw_packet->num_packets >= hard_iface->num_bcasts)
559 continue;
560
Sven Eckelmann27353442016-03-05 16:09:16 +0100561 if (!kref_get_unless_zero(&hard_iface->refcount))
562 continue;
563
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000564 /* send a copy of the saved skb */
565 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
566 if (skb1)
Antonio Quartulli95d39272016-01-16 16:40:15 +0800567 batadv_send_broadcast_skb(skb1, hard_iface);
Sven Eckelmann27353442016-03-05 16:09:16 +0100568
569 batadv_hardif_put(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000570 }
571 rcu_read_unlock();
572
573 forw_packet->num_packets++;
574
575 /* if we still have some more bcasts to send */
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100576 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200577 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
578 msecs_to_jiffies(5));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000579 return;
580 }
581
582out:
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200583 batadv_forw_packet_free(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000584 atomic_inc(&bat_priv->bcast_queue_left);
585}
586
Sven Eckelmann56303d32012-06-05 22:31:31 +0200587void
588batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
589 const struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000590{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200591 struct batadv_forw_packet *forw_packet;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800592 struct hlist_node *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200593 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000594
Marek Lindnere6c10f42011-02-18 12:33:20 +0000595 if (hard_iface)
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200596 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200597 "purge_outstanding_packets(): %s\n",
598 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000599 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200600 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200601 "purge_outstanding_packets()\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000602
603 /* free bcast list */
604 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800605 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000606 &bat_priv->forw_bcast_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200607 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000608 * we delete only packets belonging to the given interface
609 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000610 if ((hard_iface) &&
Simon Wunderlich3f1e08d2015-06-24 14:50:20 +0200611 (forw_packet->if_incoming != hard_iface) &&
612 (forw_packet->if_outgoing != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000613 continue;
614
615 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
616
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200617 /* batadv_send_outstanding_bcast_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000618 * delete the item from the list
619 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200620 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000621 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200622
623 if (pending) {
624 hlist_del(&forw_packet->list);
Linus Lüssingc4fdb6cf2016-03-11 14:04:49 +0100625 if (!forw_packet->own)
626 atomic_inc(&bat_priv->bcast_queue_left);
627
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200628 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200629 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000630 }
631 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
632
633 /* free batman packet list */
634 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800635 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000636 &bat_priv->forw_bat_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200637 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000638 * we delete only packets belonging to the given interface
639 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000640 if ((hard_iface) &&
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100641 (forw_packet->if_incoming != hard_iface) &&
642 (forw_packet->if_outgoing != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000643 continue;
644
645 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
646
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200647 /* send_outstanding_bat_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000648 * delete the item from the list
649 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200650 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000651 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200652
653 if (pending) {
654 hlist_del(&forw_packet->list);
Linus Lüssingc4fdb6cf2016-03-11 14:04:49 +0100655 if (!forw_packet->own)
656 atomic_inc(&bat_priv->batman_queue_left);
657
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200658 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200659 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000660 }
661 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
662}