blob: 8d4e1f57857455ea5694ed7a8ef014a70cdee7b4 [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "send.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
Antonio Quartullif50ca952016-05-18 11:38:48 +020023#include <linux/errno.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020024#include <linux/etherdevice.h>
25#include <linux/fs.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020026#include <linux/if.h>
Sven Eckelmannfcafa5e2016-05-15 11:07:42 +020027#include <linux/if_ether.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020028#include <linux/jiffies.h>
29#include <linux/kernel.h>
Sven Eckelmann27353442016-03-05 16:09:16 +010030#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020031#include <linux/list.h>
32#include <linux/netdevice.h>
33#include <linux/printk.h>
34#include <linux/rculist.h>
35#include <linux/rcupdate.h>
36#include <linux/skbuff.h>
37#include <linux/slab.h>
38#include <linux/spinlock.h>
39#include <linux/stddef.h>
40#include <linux/workqueue.h>
41
42#include "distributed-arp-table.h"
Martin Hundebøllee75ed82013-05-23 16:53:03 +020043#include "fragmentation.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020044#include "gateway_client.h"
45#include "hard-interface.h"
Sven Eckelmannba412082016-05-15 23:48:31 +020046#include "log.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020047#include "network-coding.h"
48#include "originator.h"
49#include "routing.h"
50#include "soft-interface.h"
51#include "translation-table.h"
Antonio Quartulliaf5d4f72012-11-26 00:38:50 +010052
Sven Eckelmannbb079c82012-05-16 20:23:14 +020053static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000054
Antonio Quartulli95d39272016-01-16 16:40:15 +080055/**
56 * batadv_send_skb_packet - send an already prepared packet
57 * @skb: the packet to send
58 * @hard_iface: the interface to use to send the broadcast packet
59 * @dst_addr: the payload destination
60 *
61 * Send out an already prepared packet to the given neighbor or broadcast it
62 * using the specified interface. Either hard_iface or neigh_node must be not
63 * NULL.
64 * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
65 * otherwise it is sent as unicast to the given neighbor.
66 *
67 * Return: NET_TX_DROP in case of error or the result of dev_queue_xmit(skb)
68 * otherwise
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020069 */
Sven Eckelmann56303d32012-06-05 22:31:31 +020070int batadv_send_skb_packet(struct sk_buff *skb,
71 struct batadv_hard_iface *hard_iface,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +020072 const u8 *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000073{
Antonio Quartulli95d39272016-01-16 16:40:15 +080074 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000075 struct ethhdr *ethhdr;
Antonio Quartullif50ca952016-05-18 11:38:48 +020076 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000077
Antonio Quartulli95d39272016-01-16 16:40:15 +080078 bat_priv = netdev_priv(hard_iface->soft_iface);
79
Sven Eckelmanne9a4f292012-06-03 22:19:19 +020080 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000081 goto send_skb_err;
82
Marek Lindnere6c10f42011-02-18 12:33:20 +000083 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000084 goto send_skb_err;
85
Marek Lindnere6c10f42011-02-18 12:33:20 +000086 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann67969582012-03-26 16:22:45 +020087 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
88 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000089 goto send_skb_err;
90 }
91
92 /* push to the ethernet header. */
Sven Eckelmann04b482a2012-05-12 02:09:38 +020093 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000094 goto send_skb_err;
95
96 skb_reset_mac_header(skb);
97
Antonio Quartulli7ed4be92013-04-08 15:08:18 +020098 ethhdr = eth_hdr(skb);
Antonio Quartulli8fdd0152014-01-22 00:42:11 +010099 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
100 ether_addr_copy(ethhdr->h_dest, dst_addr);
Antonio Quartulli293e9332013-05-19 12:55:16 +0200101 ethhdr->h_proto = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000102
103 skb_set_network_header(skb, ETH_HLEN);
Antonio Quartulli293e9332013-05-19 12:55:16 +0200104 skb->protocol = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000105
Marek Lindnere6c10f42011-02-18 12:33:20 +0000106 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000107
Martin Hundebøll612d2b42013-01-25 11:12:42 +0100108 /* Save a clone of the skb to use when decoding coded packets */
109 batadv_nc_skb_store_for_decoding(bat_priv, skb);
110
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000111 /* dev_queue_xmit() returns a negative result on error. However on
112 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200113 * (which is > 0). This will not be treated as an error.
Antonio Quartullif50ca952016-05-18 11:38:48 +0200114 *
115 * a negative value cannot be returned because it could be interepreted
116 * as not consumed skb by callers of batadv_send_skb_to_orig.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200117 */
Antonio Quartullif50ca952016-05-18 11:38:48 +0200118 ret = dev_queue_xmit(skb);
119 if (ret < 0)
120 ret = NET_XMIT_DROP;
121
122 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000123send_skb_err:
124 kfree_skb(skb);
125 return NET_XMIT_DROP;
126}
127
Antonio Quartulli95d39272016-01-16 16:40:15 +0800128int batadv_send_broadcast_skb(struct sk_buff *skb,
129 struct batadv_hard_iface *hard_iface)
130{
131 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
132}
133
134int batadv_send_unicast_skb(struct sk_buff *skb,
135 struct batadv_neigh_node *neigh)
136{
137#ifdef CONFIG_BATMAN_ADV_BATMAN_V
138 struct batadv_hardif_neigh_node *hardif_neigh;
139#endif
140 int ret;
141
142 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
143
144#ifdef CONFIG_BATMAN_ADV_BATMAN_V
145 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
146
147 if ((hardif_neigh) && (ret != NET_XMIT_DROP))
148 hardif_neigh->bat_v.last_unicast_tx = jiffies;
149
150 if (hardif_neigh)
151 batadv_hardif_neigh_put(hardif_neigh);
152#endif
153
154 return ret;
155}
156
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200157/**
158 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
159 * @skb: Packet to be transmitted.
160 * @orig_node: Final destination of the packet.
161 * @recv_if: Interface used when receiving the packet (can be NULL).
162 *
163 * Looks up the best next-hop towards the passed originator and passes the
164 * skb on for preparation of MAC header. If the packet originated from this
165 * host, NULL can be passed as recv_if and no interface alternating is
166 * attempted.
167 *
Antonio Quartullif50ca952016-05-18 11:38:48 +0200168 * Return: -1 on failure (and the skb is not consumed), -EINPROGRESS if the
169 * skb is buffered for later transmit or the NET_XMIT status returned by the
170 * lower routine if the packet has been passed down.
171 *
172 * If the returning value is not -1 the skb has been consumed.
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200173 */
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200174int batadv_send_skb_to_orig(struct sk_buff *skb,
175 struct batadv_orig_node *orig_node,
176 struct batadv_hard_iface *recv_if)
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200177{
178 struct batadv_priv *bat_priv = orig_node->bat_priv;
179 struct batadv_neigh_node *neigh_node;
Antonio Quartullif50ca952016-05-18 11:38:48 +0200180 int ret = -1;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200181
182 /* batadv_find_router() increases neigh_nodes refcount if found. */
183 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
184 if (!neigh_node)
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200185 goto out;
186
187 /* Check if the skb is too large to send in one piece and fragment
188 * it if needed.
189 */
190 if (atomic_read(&bat_priv->fragmentation) &&
191 skb->len > neigh_node->if_incoming->net_dev->mtu) {
192 /* Fragment and send packet. */
Antonio Quartullif50ca952016-05-18 11:38:48 +0200193 ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200194
195 goto out;
196 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200197
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200198 /* try to network code the packet, if it is received on an interface
199 * (i.e. being forwarded). If the packet originates from this node or if
200 * network coding fails, then send the packet as usual.
201 */
Antonio Quartullif50ca952016-05-18 11:38:48 +0200202 if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
Florian Westphal99860202016-06-11 12:46:04 +0200203 ret = -EINPROGRESS;
Antonio Quartullif50ca952016-05-18 11:38:48 +0200204 else
205 ret = batadv_send_unicast_skb(skb, neigh_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200206
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200207out:
208 if (neigh_node)
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100209 batadv_neigh_node_put(neigh_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200210
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200211 return ret;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200212}
213
Martin Hundebøllf097e252013-05-23 16:53:01 +0200214/**
215 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
216 * common fields for unicast packets
217 * @skb: the skb carrying the unicast header to initialize
218 * @hdr_size: amount of bytes to push at the beginning of the skb
219 * @orig_node: the destination node
220 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200221 * Return: false if the buffer extension was not possible or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200222 */
223static bool
224batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
225 struct batadv_orig_node *orig_node)
226{
227 struct batadv_unicast_packet *unicast_packet;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200228 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200229
230 if (batadv_skb_head_push(skb, hdr_size) < 0)
231 return false;
232
233 unicast_packet = (struct batadv_unicast_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100234 unicast_packet->version = BATADV_COMPAT_VERSION;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200235 /* batman packet type: unicast */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100236 unicast_packet->packet_type = BATADV_UNICAST;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200237 /* set unicast ttl */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100238 unicast_packet->ttl = BATADV_TTL;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200239 /* copy the destination for faster routing */
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100240 ether_addr_copy(unicast_packet->dest, orig_node->orig);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200241 /* set the destination tt version number */
242 unicast_packet->ttvn = ttvn;
243
244 return true;
245}
246
247/**
248 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
249 * @skb: the skb containing the payload to encapsulate
250 * @orig_node: the destination node
251 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200252 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200253 */
254static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
255 struct batadv_orig_node *orig_node)
256{
257 size_t uni_size = sizeof(struct batadv_unicast_packet);
258
259 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
260}
261
262/**
263 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
264 * unicast 4addr header
265 * @bat_priv: the bat priv with all the soft interface information
266 * @skb: the skb containing the payload to encapsulate
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200267 * @orig: the destination node
Martin Hundebøllf097e252013-05-23 16:53:01 +0200268 * @packet_subtype: the unicast 4addr packet subtype to use
269 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200270 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200271 */
272bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
273 struct sk_buff *skb,
274 struct batadv_orig_node *orig,
275 int packet_subtype)
276{
277 struct batadv_hard_iface *primary_if;
278 struct batadv_unicast_4addr_packet *uc_4addr_packet;
279 bool ret = false;
280
281 primary_if = batadv_primary_if_get_selected(bat_priv);
282 if (!primary_if)
283 goto out;
284
285 /* Pull the header space and fill the unicast_packet substructure.
286 * We can do that because the first member of the uc_4addr_packet
287 * is of type struct unicast_packet
288 */
289 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
290 orig))
291 goto out;
292
293 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100294 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100295 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200296 uc_4addr_packet->subtype = packet_subtype;
297 uc_4addr_packet->reserved = 0;
298
299 ret = true;
300out:
301 if (primary_if)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100302 batadv_hardif_put(primary_if);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200303 return ret;
304}
305
306/**
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200307 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
Martin Hundebøllf097e252013-05-23 16:53:01 +0200308 * @bat_priv: the bat priv with all the soft interface information
309 * @skb: payload to send
310 * @packet_type: the batman unicast packet type to use
311 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
312 * 4addr packets)
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200313 * @orig_node: the originator to send the packet to
Antonio Quartullic018ad32013-06-04 12:11:39 +0200314 * @vid: the vid to be used to search the translation table
Martin Hundebøllf097e252013-05-23 16:53:01 +0200315 *
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200316 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
317 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200318 * as packet_type. Then send this frame to the given orig_node.
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200319 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200320 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200321 */
Linus LĂĽssing1d8ab8d2014-02-15 17:47:52 +0100322int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
323 struct sk_buff *skb, int packet_type,
324 int packet_subtype,
325 struct batadv_orig_node *orig_node,
326 unsigned short vid)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200327{
Martin Hundebøllf097e252013-05-23 16:53:01 +0200328 struct batadv_unicast_packet *unicast_packet;
Antonio Quartulli8ea64e22015-05-11 20:34:52 +0200329 struct ethhdr *ethhdr;
Antonio Quartullif50ca952016-05-18 11:38:48 +0200330 int res, ret = NET_XMIT_DROP;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200331
Simon Wunderlich56a5ca82013-05-28 11:49:47 +0200332 if (!orig_node)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200333 goto out;
334
335 switch (packet_type) {
336 case BATADV_UNICAST:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200337 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
338 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200339 break;
340 case BATADV_UNICAST_4ADDR:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200341 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
342 orig_node,
343 packet_subtype))
344 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200345 break;
346 default:
347 /* this function supports UNICAST and UNICAST_4ADDR only. It
348 * should never be invoked with any other packet type
349 */
350 goto out;
351 }
352
Linus LĂĽssing927c2ed2014-01-19 22:22:45 +0100353 /* skb->data might have been reallocated by
354 * batadv_send_skb_prepare_unicast{,_4addr}()
355 */
356 ethhdr = eth_hdr(skb);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200357 unicast_packet = (struct batadv_unicast_packet *)skb->data;
358
359 /* inform the destination node that we are still missing a correct route
360 * for this client. The destination will receive this packet and will
361 * try to reroute it because the ttvn contained in the header is less
362 * than the current one
363 */
Antonio Quartullic018ad32013-06-04 12:11:39 +0200364 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
Martin Hundebøllf097e252013-05-23 16:53:01 +0200365 unicast_packet->ttvn = unicast_packet->ttvn - 1;
366
Antonio Quartullif50ca952016-05-18 11:38:48 +0200367 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
368 if (res != -1)
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200369 ret = NET_XMIT_SUCCESS;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200370
371out:
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200372 if (ret == NET_XMIT_DROP)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200373 kfree_skb(skb);
374 return ret;
375}
376
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200377/**
378 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
379 * @bat_priv: the bat priv with all the soft interface information
380 * @skb: payload to send
381 * @packet_type: the batman unicast packet type to use
382 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
383 * 4addr packets)
Antonio Quartullic5d3a652014-02-15 11:58:01 +0100384 * @dst_hint: can be used to override the destination contained in the skb
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200385 * @vid: the vid to be used to search the translation table
386 *
387 * Look up the recipient node for the destination address in the ethernet
388 * header via the translation table. Wrap the given skb into a batman-adv
389 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
390 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
391 * to the according destination node.
392 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200393 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200394 */
395int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
396 struct sk_buff *skb, int packet_type,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200397 int packet_subtype, u8 *dst_hint,
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100398 unsigned short vid)
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200399{
400 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
401 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200402 u8 *src, *dst;
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200403 int ret;
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200404
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100405 src = ethhdr->h_source;
406 dst = ethhdr->h_dest;
407
408 /* if we got an hint! let's send the packet to this client (if any) */
409 if (dst_hint) {
410 src = NULL;
411 dst = dst_hint;
412 }
413 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
414
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200415 ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
416 packet_subtype, orig_node, vid);
417
418 if (orig_node)
419 batadv_orig_node_put(orig_node);
420
421 return ret;
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200422}
423
424/**
425 * batadv_send_skb_via_gw - send an skb via gateway lookup
426 * @bat_priv: the bat priv with all the soft interface information
427 * @skb: payload to send
428 * @vid: the vid to be used to search the translation table
429 *
430 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
431 * unicast header and send this frame to this gateway node.
432 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200433 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200434 */
435int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
436 unsigned short vid)
437{
438 struct batadv_orig_node *orig_node;
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200439 int ret;
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200440
441 orig_node = batadv_gw_get_selected_orig(bat_priv);
Sven Eckelmannf19dc772016-06-27 08:15:42 +0200442 ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
443 BATADV_P_DATA, orig_node, vid);
444
445 if (orig_node)
446 batadv_orig_node_put(orig_node);
447
448 return ret;
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200449}
450
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200451/**
452 * batadv_forw_packet_free - free a forwarding packet
453 * @forw_packet: The packet to free
454 *
455 * This frees a forwarding packet and releases any resources it might
456 * have claimed.
457 */
Antonio Quartullif0d97252016-05-03 01:45:34 +0800458void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000459{
Markus Elfringc7994432015-11-15 08:04:43 +0100460 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200461 if (forw_packet->if_incoming)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100462 batadv_hardif_put(forw_packet->if_incoming);
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100463 if (forw_packet->if_outgoing)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100464 batadv_hardif_put(forw_packet->if_outgoing);
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200465 if (forw_packet->queue_left)
466 atomic_inc(forw_packet->queue_left);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000467 kfree(forw_packet);
468}
469
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200470/**
471 * batadv_forw_packet_alloc - allocate a forwarding packet
472 * @if_incoming: The (optional) if_incoming to be grabbed
473 * @if_outgoing: The (optional) if_outgoing to be grabbed
474 * @queue_left: The (optional) queue counter to decrease
475 * @bat_priv: The bat_priv for the mesh of this forw_packet
476 *
477 * Allocates a forwarding packet and tries to get a reference to the
478 * (optional) if_incoming, if_outgoing and queue_left. If queue_left
479 * is NULL then bat_priv is optional, too.
480 *
481 * Return: An allocated forwarding packet on success, NULL otherwise.
482 */
483struct batadv_forw_packet *
484batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
485 struct batadv_hard_iface *if_outgoing,
486 atomic_t *queue_left,
487 struct batadv_priv *bat_priv)
488{
489 struct batadv_forw_packet *forw_packet;
490 const char *qname;
491
492 if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
493 qname = "unknown";
494
495 if (queue_left == &bat_priv->bcast_queue_left)
496 qname = "bcast";
497
498 if (queue_left == &bat_priv->batman_queue_left)
499 qname = "batman";
500
501 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
502 "%s queue is full\n", qname);
503
504 return NULL;
505 }
506
507 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
508 if (!forw_packet)
509 goto err;
510
511 if (if_incoming)
512 kref_get(&if_incoming->refcount);
513
514 if (if_outgoing)
515 kref_get(&if_outgoing->refcount);
516
517 forw_packet->skb = NULL;
518 forw_packet->queue_left = queue_left;
519 forw_packet->if_incoming = if_incoming;
520 forw_packet->if_outgoing = if_outgoing;
521 forw_packet->num_packets = 0;
522
523 return forw_packet;
524
525err:
526 if (queue_left)
527 atomic_inc(queue_left);
528
529 return NULL;
530}
531
Sven Eckelmann56303d32012-06-05 22:31:31 +0200532static void
533_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
534 struct batadv_forw_packet *forw_packet,
535 unsigned long send_time)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000536{
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000537 /* add new packet to packet list */
538 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
539 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
540 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
541
542 /* start timer for this packet */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200543 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000544 send_time);
545}
546
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200547/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +0100548 * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
549 * @bat_priv: the bat priv with all the soft interface information
550 * @skb: broadcast packet to add
551 * @delay: number of jiffies to wait before sending
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000552 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200553 * add a broadcast packet to the queue and setup timers. broadcast packets
554 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000555 *
556 * The skb is not consumed, so the caller should make sure that the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200557 * skb is freed.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200558 *
559 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200560 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200561int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
Sven Eckelmann9455e342012-05-12 02:09:37 +0200562 const struct sk_buff *skb,
563 unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000564{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200565 struct batadv_hard_iface *primary_if = NULL;
566 struct batadv_forw_packet *forw_packet;
Sven Eckelmann96412692012-06-05 22:31:30 +0200567 struct batadv_bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200568 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000569
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200570 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200571 if (!primary_if)
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200572 goto err;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000573
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200574 forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
575 &bat_priv->bcast_queue_left,
576 bat_priv);
577 batadv_hardif_put(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000578 if (!forw_packet)
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200579 goto err;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000580
Sven Eckelmann747e4222011-05-14 23:14:50 +0200581 newskb = skb_copy(skb, GFP_ATOMIC);
582 if (!newskb)
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200583 goto err_packet_free;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000584
585 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann96412692012-06-05 22:31:30 +0200586 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100587 bcast_packet->ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000588
Sven Eckelmann747e4222011-05-14 23:14:50 +0200589 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000590
Sven Eckelmann747e4222011-05-14 23:14:50 +0200591 forw_packet->skb = newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000592
Antonio Quartulli72414442012-12-25 13:14:37 +0100593 INIT_DELAYED_WORK(&forw_packet->delayed_work,
594 batadv_send_outstanding_bcast_packet);
595
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200596 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000597 return NETDEV_TX_OK;
598
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200599err_packet_free:
600 batadv_forw_packet_free(forw_packet);
601err:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000602 return NETDEV_TX_BUSY;
603}
604
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200605static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000606{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200607 struct batadv_hard_iface *hard_iface;
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200608 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200609 struct batadv_forw_packet *forw_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000610 struct sk_buff *skb1;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200611 struct net_device *soft_iface;
612 struct batadv_priv *bat_priv;
613
Geliang Tang4ba4bc02015-12-28 23:43:37 +0800614 delayed_work = to_delayed_work(work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200615 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
616 delayed_work);
617 soft_iface = forw_packet->if_incoming->soft_iface;
618 bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000619
620 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
621 hlist_del(&forw_packet->list);
622 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
623
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200624 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000625 goto out;
626
Antonio Quartullic384ea32011-06-26 03:37:18 +0200627 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
628 goto out;
629
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000630 /* rebroadcast packet */
631 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200632 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000633 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000634 continue;
635
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100636 if (forw_packet->num_packets >= hard_iface->num_bcasts)
637 continue;
638
Sven Eckelmann27353442016-03-05 16:09:16 +0100639 if (!kref_get_unless_zero(&hard_iface->refcount))
640 continue;
641
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000642 /* send a copy of the saved skb */
643 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
644 if (skb1)
Antonio Quartulli95d39272016-01-16 16:40:15 +0800645 batadv_send_broadcast_skb(skb1, hard_iface);
Sven Eckelmann27353442016-03-05 16:09:16 +0100646
647 batadv_hardif_put(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000648 }
649 rcu_read_unlock();
650
651 forw_packet->num_packets++;
652
653 /* if we still have some more bcasts to send */
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100654 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200655 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
656 msecs_to_jiffies(5));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000657 return;
658 }
659
660out:
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200661 batadv_forw_packet_free(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000662}
663
Sven Eckelmann56303d32012-06-05 22:31:31 +0200664void
665batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
666 const struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000667{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200668 struct batadv_forw_packet *forw_packet;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800669 struct hlist_node *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200670 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000671
Marek Lindnere6c10f42011-02-18 12:33:20 +0000672 if (hard_iface)
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200673 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200674 "purge_outstanding_packets(): %s\n",
675 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000676 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200677 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200678 "purge_outstanding_packets()\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000679
680 /* free bcast list */
681 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800682 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000683 &bat_priv->forw_bcast_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200684 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000685 * we delete only packets belonging to the given interface
686 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000687 if ((hard_iface) &&
Simon Wunderlich3f1e08d2015-06-24 14:50:20 +0200688 (forw_packet->if_incoming != hard_iface) &&
689 (forw_packet->if_outgoing != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000690 continue;
691
692 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
693
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200694 /* batadv_send_outstanding_bcast_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000695 * delete the item from the list
696 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200697 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000698 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200699
700 if (pending) {
701 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200702 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200703 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000704 }
705 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
706
707 /* free batman packet list */
708 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800709 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000710 &bat_priv->forw_bat_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200711 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000712 * we delete only packets belonging to the given interface
713 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000714 if ((hard_iface) &&
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100715 (forw_packet->if_incoming != hard_iface) &&
716 (forw_packet->if_outgoing != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000717 continue;
718
719 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
720
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200721 /* send_outstanding_bat_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000722 * delete the item from the list
723 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200724 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000725 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200726
727 if (pending) {
728 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200729 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200730 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000731 }
732 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
733}