blob: 33d8bd14140c746b173ecb259363a2280c9bfdd9 [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "send.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
Antonio Quartullif50ca952016-05-18 11:38:48 +020023#include <linux/errno.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020024#include <linux/etherdevice.h>
25#include <linux/fs.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020026#include <linux/if.h>
Sven Eckelmannfcafa5e2016-05-15 11:07:42 +020027#include <linux/if_ether.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020028#include <linux/jiffies.h>
29#include <linux/kernel.h>
Sven Eckelmann27353442016-03-05 16:09:16 +010030#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020031#include <linux/list.h>
32#include <linux/netdevice.h>
33#include <linux/printk.h>
34#include <linux/rculist.h>
35#include <linux/rcupdate.h>
36#include <linux/skbuff.h>
37#include <linux/slab.h>
38#include <linux/spinlock.h>
39#include <linux/stddef.h>
40#include <linux/workqueue.h>
41
42#include "distributed-arp-table.h"
Martin Hundebøllee75ed82013-05-23 16:53:03 +020043#include "fragmentation.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020044#include "gateway_client.h"
45#include "hard-interface.h"
Sven Eckelmannba412082016-05-15 23:48:31 +020046#include "log.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020047#include "network-coding.h"
48#include "originator.h"
49#include "routing.h"
50#include "soft-interface.h"
51#include "translation-table.h"
Antonio Quartulliaf5d4f72012-11-26 00:38:50 +010052
Sven Eckelmannbb079c82012-05-16 20:23:14 +020053static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000054
Antonio Quartulli95d39272016-01-16 16:40:15 +080055/**
56 * batadv_send_skb_packet - send an already prepared packet
57 * @skb: the packet to send
58 * @hard_iface: the interface to use to send the broadcast packet
59 * @dst_addr: the payload destination
60 *
61 * Send out an already prepared packet to the given neighbor or broadcast it
62 * using the specified interface. Either hard_iface or neigh_node must be not
63 * NULL.
64 * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
65 * otherwise it is sent as unicast to the given neighbor.
66 *
67 * Return: NET_TX_DROP in case of error or the result of dev_queue_xmit(skb)
68 * otherwise
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020069 */
Sven Eckelmann56303d32012-06-05 22:31:31 +020070int batadv_send_skb_packet(struct sk_buff *skb,
71 struct batadv_hard_iface *hard_iface,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +020072 const u8 *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000073{
Antonio Quartulli95d39272016-01-16 16:40:15 +080074 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000075 struct ethhdr *ethhdr;
Antonio Quartullif50ca952016-05-18 11:38:48 +020076 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000077
Antonio Quartulli95d39272016-01-16 16:40:15 +080078 bat_priv = netdev_priv(hard_iface->soft_iface);
79
Sven Eckelmanne9a4f292012-06-03 22:19:19 +020080 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000081 goto send_skb_err;
82
Marek Lindnere6c10f42011-02-18 12:33:20 +000083 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000084 goto send_skb_err;
85
Marek Lindnere6c10f42011-02-18 12:33:20 +000086 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann67969582012-03-26 16:22:45 +020087 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
88 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000089 goto send_skb_err;
90 }
91
92 /* push to the ethernet header. */
Sven Eckelmann04b482a2012-05-12 02:09:38 +020093 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000094 goto send_skb_err;
95
96 skb_reset_mac_header(skb);
97
Antonio Quartulli7ed4be92013-04-08 15:08:18 +020098 ethhdr = eth_hdr(skb);
Antonio Quartulli8fdd0152014-01-22 00:42:11 +010099 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
100 ether_addr_copy(ethhdr->h_dest, dst_addr);
Antonio Quartulli293e9332013-05-19 12:55:16 +0200101 ethhdr->h_proto = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000102
103 skb_set_network_header(skb, ETH_HLEN);
Antonio Quartulli293e9332013-05-19 12:55:16 +0200104 skb->protocol = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000105
Marek Lindnere6c10f42011-02-18 12:33:20 +0000106 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000107
Martin Hundebøll612d2b42013-01-25 11:12:42 +0100108 /* Save a clone of the skb to use when decoding coded packets */
109 batadv_nc_skb_store_for_decoding(bat_priv, skb);
110
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000111 /* dev_queue_xmit() returns a negative result on error. However on
112 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200113 * (which is > 0). This will not be treated as an error.
Antonio Quartullif50ca952016-05-18 11:38:48 +0200114 *
115 * a negative value cannot be returned because it could be interepreted
116 * as not consumed skb by callers of batadv_send_skb_to_orig.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200117 */
Antonio Quartullif50ca952016-05-18 11:38:48 +0200118 ret = dev_queue_xmit(skb);
119 if (ret < 0)
120 ret = NET_XMIT_DROP;
121
122 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000123send_skb_err:
124 kfree_skb(skb);
125 return NET_XMIT_DROP;
126}
127
Antonio Quartulli95d39272016-01-16 16:40:15 +0800128int batadv_send_broadcast_skb(struct sk_buff *skb,
129 struct batadv_hard_iface *hard_iface)
130{
131 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
132}
133
134int batadv_send_unicast_skb(struct sk_buff *skb,
135 struct batadv_neigh_node *neigh)
136{
137#ifdef CONFIG_BATMAN_ADV_BATMAN_V
138 struct batadv_hardif_neigh_node *hardif_neigh;
139#endif
140 int ret;
141
142 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
143
144#ifdef CONFIG_BATMAN_ADV_BATMAN_V
145 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
146
147 if ((hardif_neigh) && (ret != NET_XMIT_DROP))
148 hardif_neigh->bat_v.last_unicast_tx = jiffies;
149
150 if (hardif_neigh)
151 batadv_hardif_neigh_put(hardif_neigh);
152#endif
153
154 return ret;
155}
156
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200157/**
158 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
159 * @skb: Packet to be transmitted.
160 * @orig_node: Final destination of the packet.
161 * @recv_if: Interface used when receiving the packet (can be NULL).
162 *
163 * Looks up the best next-hop towards the passed originator and passes the
164 * skb on for preparation of MAC header. If the packet originated from this
165 * host, NULL can be passed as recv_if and no interface alternating is
166 * attempted.
167 *
Antonio Quartullif50ca952016-05-18 11:38:48 +0200168 * Return: -1 on failure (and the skb is not consumed), -EINPROGRESS if the
169 * skb is buffered for later transmit or the NET_XMIT status returned by the
170 * lower routine if the packet has been passed down.
171 *
172 * If the returning value is not -1 the skb has been consumed.
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200173 */
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200174int batadv_send_skb_to_orig(struct sk_buff *skb,
175 struct batadv_orig_node *orig_node,
176 struct batadv_hard_iface *recv_if)
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200177{
178 struct batadv_priv *bat_priv = orig_node->bat_priv;
179 struct batadv_neigh_node *neigh_node;
Antonio Quartullif50ca952016-05-18 11:38:48 +0200180 int ret = -1;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200181
182 /* batadv_find_router() increases neigh_nodes refcount if found. */
183 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
184 if (!neigh_node)
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200185 goto out;
186
187 /* Check if the skb is too large to send in one piece and fragment
188 * it if needed.
189 */
190 if (atomic_read(&bat_priv->fragmentation) &&
191 skb->len > neigh_node->if_incoming->net_dev->mtu) {
192 /* Fragment and send packet. */
Antonio Quartullif50ca952016-05-18 11:38:48 +0200193 ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200194
195 goto out;
196 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200197
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200198 /* try to network code the packet, if it is received on an interface
199 * (i.e. being forwarded). If the packet originates from this node or if
200 * network coding fails, then send the packet as usual.
201 */
Antonio Quartullif50ca952016-05-18 11:38:48 +0200202 if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
Florian Westphal99860202016-06-11 12:46:04 +0200203 ret = -EINPROGRESS;
Antonio Quartullif50ca952016-05-18 11:38:48 +0200204 else
205 ret = batadv_send_unicast_skb(skb, neigh_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200206
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200207out:
208 if (neigh_node)
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100209 batadv_neigh_node_put(neigh_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200210
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200211 return ret;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200212}
213
Martin Hundebøllf097e252013-05-23 16:53:01 +0200214/**
215 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
216 * common fields for unicast packets
217 * @skb: the skb carrying the unicast header to initialize
218 * @hdr_size: amount of bytes to push at the beginning of the skb
219 * @orig_node: the destination node
220 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200221 * Return: false if the buffer extension was not possible or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200222 */
223static bool
224batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
225 struct batadv_orig_node *orig_node)
226{
227 struct batadv_unicast_packet *unicast_packet;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200228 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200229
230 if (batadv_skb_head_push(skb, hdr_size) < 0)
231 return false;
232
233 unicast_packet = (struct batadv_unicast_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100234 unicast_packet->version = BATADV_COMPAT_VERSION;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200235 /* batman packet type: unicast */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100236 unicast_packet->packet_type = BATADV_UNICAST;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200237 /* set unicast ttl */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100238 unicast_packet->ttl = BATADV_TTL;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200239 /* copy the destination for faster routing */
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100240 ether_addr_copy(unicast_packet->dest, orig_node->orig);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200241 /* set the destination tt version number */
242 unicast_packet->ttvn = ttvn;
243
244 return true;
245}
246
247/**
248 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
249 * @skb: the skb containing the payload to encapsulate
250 * @orig_node: the destination node
251 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200252 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200253 */
254static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
255 struct batadv_orig_node *orig_node)
256{
257 size_t uni_size = sizeof(struct batadv_unicast_packet);
258
259 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
260}
261
262/**
263 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
264 * unicast 4addr header
265 * @bat_priv: the bat priv with all the soft interface information
266 * @skb: the skb containing the payload to encapsulate
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200267 * @orig: the destination node
Martin Hundebøllf097e252013-05-23 16:53:01 +0200268 * @packet_subtype: the unicast 4addr packet subtype to use
269 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200270 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200271 */
272bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
273 struct sk_buff *skb,
274 struct batadv_orig_node *orig,
275 int packet_subtype)
276{
277 struct batadv_hard_iface *primary_if;
278 struct batadv_unicast_4addr_packet *uc_4addr_packet;
279 bool ret = false;
280
281 primary_if = batadv_primary_if_get_selected(bat_priv);
282 if (!primary_if)
283 goto out;
284
285 /* Pull the header space and fill the unicast_packet substructure.
286 * We can do that because the first member of the uc_4addr_packet
287 * is of type struct unicast_packet
288 */
289 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
290 orig))
291 goto out;
292
293 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100294 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100295 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200296 uc_4addr_packet->subtype = packet_subtype;
297 uc_4addr_packet->reserved = 0;
298
299 ret = true;
300out:
301 if (primary_if)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100302 batadv_hardif_put(primary_if);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200303 return ret;
304}
305
306/**
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200307 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
Martin Hundebøllf097e252013-05-23 16:53:01 +0200308 * @bat_priv: the bat priv with all the soft interface information
309 * @skb: payload to send
310 * @packet_type: the batman unicast packet type to use
311 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
312 * 4addr packets)
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200313 * @orig_node: the originator to send the packet to
Antonio Quartullic018ad32013-06-04 12:11:39 +0200314 * @vid: the vid to be used to search the translation table
Martin Hundebøllf097e252013-05-23 16:53:01 +0200315 *
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200316 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
317 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
318 * as packet_type. Then send this frame to the given orig_node and release a
319 * reference to this orig_node.
320 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200321 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200322 */
Linus LĂĽssing1d8ab8d2014-02-15 17:47:52 +0100323int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
324 struct sk_buff *skb, int packet_type,
325 int packet_subtype,
326 struct batadv_orig_node *orig_node,
327 unsigned short vid)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200328{
Martin Hundebøllf097e252013-05-23 16:53:01 +0200329 struct batadv_unicast_packet *unicast_packet;
Antonio Quartulli8ea64e22015-05-11 20:34:52 +0200330 struct ethhdr *ethhdr;
Antonio Quartullif50ca952016-05-18 11:38:48 +0200331 int res, ret = NET_XMIT_DROP;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200332
Simon Wunderlich56a5ca82013-05-28 11:49:47 +0200333 if (!orig_node)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200334 goto out;
335
336 switch (packet_type) {
337 case BATADV_UNICAST:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200338 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
339 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200340 break;
341 case BATADV_UNICAST_4ADDR:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200342 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
343 orig_node,
344 packet_subtype))
345 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200346 break;
347 default:
348 /* this function supports UNICAST and UNICAST_4ADDR only. It
349 * should never be invoked with any other packet type
350 */
351 goto out;
352 }
353
Linus LĂĽssing927c2ed2014-01-19 22:22:45 +0100354 /* skb->data might have been reallocated by
355 * batadv_send_skb_prepare_unicast{,_4addr}()
356 */
357 ethhdr = eth_hdr(skb);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200358 unicast_packet = (struct batadv_unicast_packet *)skb->data;
359
360 /* inform the destination node that we are still missing a correct route
361 * for this client. The destination will receive this packet and will
362 * try to reroute it because the ttvn contained in the header is less
363 * than the current one
364 */
Antonio Quartullic018ad32013-06-04 12:11:39 +0200365 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
Martin Hundebøllf097e252013-05-23 16:53:01 +0200366 unicast_packet->ttvn = unicast_packet->ttvn - 1;
367
Antonio Quartullif50ca952016-05-18 11:38:48 +0200368 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
369 if (res != -1)
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200370 ret = NET_XMIT_SUCCESS;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200371
372out:
Martin Hundebøllf097e252013-05-23 16:53:01 +0200373 if (orig_node)
Sven Eckelmann5d967312016-01-17 11:01:09 +0100374 batadv_orig_node_put(orig_node);
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200375 if (ret == NET_XMIT_DROP)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200376 kfree_skb(skb);
377 return ret;
378}
379
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200380/**
381 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
382 * @bat_priv: the bat priv with all the soft interface information
383 * @skb: payload to send
384 * @packet_type: the batman unicast packet type to use
385 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
386 * 4addr packets)
Antonio Quartullic5d3a652014-02-15 11:58:01 +0100387 * @dst_hint: can be used to override the destination contained in the skb
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200388 * @vid: the vid to be used to search the translation table
389 *
390 * Look up the recipient node for the destination address in the ethernet
391 * header via the translation table. Wrap the given skb into a batman-adv
392 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
393 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
394 * to the according destination node.
395 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200396 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200397 */
398int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
399 struct sk_buff *skb, int packet_type,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200400 int packet_subtype, u8 *dst_hint,
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100401 unsigned short vid)
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200402{
403 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
404 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200405 u8 *src, *dst;
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200406
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100407 src = ethhdr->h_source;
408 dst = ethhdr->h_dest;
409
410 /* if we got an hint! let's send the packet to this client (if any) */
411 if (dst_hint) {
412 src = NULL;
413 dst = dst_hint;
414 }
415 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
416
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200417 return batadv_send_skb_unicast(bat_priv, skb, packet_type,
418 packet_subtype, orig_node, vid);
419}
420
421/**
422 * batadv_send_skb_via_gw - send an skb via gateway lookup
423 * @bat_priv: the bat priv with all the soft interface information
424 * @skb: payload to send
425 * @vid: the vid to be used to search the translation table
426 *
427 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
428 * unicast header and send this frame to this gateway node.
429 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200430 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200431 */
432int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
433 unsigned short vid)
434{
435 struct batadv_orig_node *orig_node;
436
437 orig_node = batadv_gw_get_selected_orig(bat_priv);
Sven Eckelmannd1fe1762016-06-12 10:43:19 +0200438 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
439 BATADV_P_DATA, orig_node, vid);
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200440}
441
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200442/**
443 * batadv_forw_packet_free - free a forwarding packet
444 * @forw_packet: The packet to free
445 *
446 * This frees a forwarding packet and releases any resources it might
447 * have claimed.
448 */
Antonio Quartullif0d97252016-05-03 01:45:34 +0800449void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000450{
Markus Elfringc7994432015-11-15 08:04:43 +0100451 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200452 if (forw_packet->if_incoming)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100453 batadv_hardif_put(forw_packet->if_incoming);
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100454 if (forw_packet->if_outgoing)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100455 batadv_hardif_put(forw_packet->if_outgoing);
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200456 if (forw_packet->queue_left)
457 atomic_inc(forw_packet->queue_left);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000458 kfree(forw_packet);
459}
460
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200461/**
462 * batadv_forw_packet_alloc - allocate a forwarding packet
463 * @if_incoming: The (optional) if_incoming to be grabbed
464 * @if_outgoing: The (optional) if_outgoing to be grabbed
465 * @queue_left: The (optional) queue counter to decrease
466 * @bat_priv: The bat_priv for the mesh of this forw_packet
467 *
468 * Allocates a forwarding packet and tries to get a reference to the
469 * (optional) if_incoming, if_outgoing and queue_left. If queue_left
470 * is NULL then bat_priv is optional, too.
471 *
472 * Return: An allocated forwarding packet on success, NULL otherwise.
473 */
474struct batadv_forw_packet *
475batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
476 struct batadv_hard_iface *if_outgoing,
477 atomic_t *queue_left,
478 struct batadv_priv *bat_priv)
479{
480 struct batadv_forw_packet *forw_packet;
481 const char *qname;
482
483 if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
484 qname = "unknown";
485
486 if (queue_left == &bat_priv->bcast_queue_left)
487 qname = "bcast";
488
489 if (queue_left == &bat_priv->batman_queue_left)
490 qname = "batman";
491
492 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
493 "%s queue is full\n", qname);
494
495 return NULL;
496 }
497
498 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
499 if (!forw_packet)
500 goto err;
501
502 if (if_incoming)
503 kref_get(&if_incoming->refcount);
504
505 if (if_outgoing)
506 kref_get(&if_outgoing->refcount);
507
508 forw_packet->skb = NULL;
509 forw_packet->queue_left = queue_left;
510 forw_packet->if_incoming = if_incoming;
511 forw_packet->if_outgoing = if_outgoing;
512 forw_packet->num_packets = 0;
513
514 return forw_packet;
515
516err:
517 if (queue_left)
518 atomic_inc(queue_left);
519
520 return NULL;
521}
522
Sven Eckelmann56303d32012-06-05 22:31:31 +0200523static void
524_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
525 struct batadv_forw_packet *forw_packet,
526 unsigned long send_time)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000527{
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000528 /* add new packet to packet list */
529 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
530 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
531 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
532
533 /* start timer for this packet */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200534 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000535 send_time);
536}
537
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200538/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +0100539 * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
540 * @bat_priv: the bat priv with all the soft interface information
541 * @skb: broadcast packet to add
542 * @delay: number of jiffies to wait before sending
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000543 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200544 * add a broadcast packet to the queue and setup timers. broadcast packets
545 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000546 *
547 * The skb is not consumed, so the caller should make sure that the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200548 * skb is freed.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200549 *
550 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200551 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200552int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
Sven Eckelmann9455e342012-05-12 02:09:37 +0200553 const struct sk_buff *skb,
554 unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000555{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200556 struct batadv_hard_iface *primary_if = NULL;
557 struct batadv_forw_packet *forw_packet;
Sven Eckelmann96412692012-06-05 22:31:30 +0200558 struct batadv_bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200559 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000560
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200561 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200562 if (!primary_if)
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200563 goto err;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000564
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200565 forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
566 &bat_priv->bcast_queue_left,
567 bat_priv);
568 batadv_hardif_put(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000569 if (!forw_packet)
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200570 goto err;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000571
Sven Eckelmann747e4222011-05-14 23:14:50 +0200572 newskb = skb_copy(skb, GFP_ATOMIC);
573 if (!newskb)
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200574 goto err_packet_free;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000575
576 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann96412692012-06-05 22:31:30 +0200577 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100578 bcast_packet->ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000579
Sven Eckelmann747e4222011-05-14 23:14:50 +0200580 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000581
Sven Eckelmann747e4222011-05-14 23:14:50 +0200582 forw_packet->skb = newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000583
Antonio Quartulli72414442012-12-25 13:14:37 +0100584 INIT_DELAYED_WORK(&forw_packet->delayed_work,
585 batadv_send_outstanding_bcast_packet);
586
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200587 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000588 return NETDEV_TX_OK;
589
Linus LĂĽssinga65e5482016-06-20 21:39:54 +0200590err_packet_free:
591 batadv_forw_packet_free(forw_packet);
592err:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000593 return NETDEV_TX_BUSY;
594}
595
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200596static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000597{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200598 struct batadv_hard_iface *hard_iface;
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200599 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200600 struct batadv_forw_packet *forw_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000601 struct sk_buff *skb1;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200602 struct net_device *soft_iface;
603 struct batadv_priv *bat_priv;
604
Geliang Tang4ba4bc02015-12-28 23:43:37 +0800605 delayed_work = to_delayed_work(work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200606 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
607 delayed_work);
608 soft_iface = forw_packet->if_incoming->soft_iface;
609 bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000610
611 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
612 hlist_del(&forw_packet->list);
613 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
614
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200615 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000616 goto out;
617
Antonio Quartullic384ea32011-06-26 03:37:18 +0200618 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
619 goto out;
620
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000621 /* rebroadcast packet */
622 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200623 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000624 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000625 continue;
626
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100627 if (forw_packet->num_packets >= hard_iface->num_bcasts)
628 continue;
629
Sven Eckelmann27353442016-03-05 16:09:16 +0100630 if (!kref_get_unless_zero(&hard_iface->refcount))
631 continue;
632
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000633 /* send a copy of the saved skb */
634 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
635 if (skb1)
Antonio Quartulli95d39272016-01-16 16:40:15 +0800636 batadv_send_broadcast_skb(skb1, hard_iface);
Sven Eckelmann27353442016-03-05 16:09:16 +0100637
638 batadv_hardif_put(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000639 }
640 rcu_read_unlock();
641
642 forw_packet->num_packets++;
643
644 /* if we still have some more bcasts to send */
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100645 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200646 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
647 msecs_to_jiffies(5));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000648 return;
649 }
650
651out:
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200652 batadv_forw_packet_free(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000653}
654
Sven Eckelmann56303d32012-06-05 22:31:31 +0200655void
656batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
657 const struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000658{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200659 struct batadv_forw_packet *forw_packet;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800660 struct hlist_node *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200661 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000662
Marek Lindnere6c10f42011-02-18 12:33:20 +0000663 if (hard_iface)
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200664 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200665 "purge_outstanding_packets(): %s\n",
666 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000667 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200668 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200669 "purge_outstanding_packets()\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000670
671 /* free bcast list */
672 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800673 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000674 &bat_priv->forw_bcast_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200675 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000676 * we delete only packets belonging to the given interface
677 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000678 if ((hard_iface) &&
Simon Wunderlich3f1e08d2015-06-24 14:50:20 +0200679 (forw_packet->if_incoming != hard_iface) &&
680 (forw_packet->if_outgoing != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000681 continue;
682
683 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
684
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200685 /* batadv_send_outstanding_bcast_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000686 * delete the item from the list
687 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200688 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000689 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200690
691 if (pending) {
692 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200693 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200694 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000695 }
696 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
697
698 /* free batman packet list */
699 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800700 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000701 &bat_priv->forw_bat_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200702 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000703 * we delete only packets belonging to the given interface
704 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000705 if ((hard_iface) &&
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100706 (forw_packet->if_incoming != hard_iface) &&
707 (forw_packet->if_outgoing != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000708 continue;
709
710 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
711
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200712 /* send_outstanding_bat_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000713 * delete the item from the list
714 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200715 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000716 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200717
718 if (pending) {
719 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200720 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200721 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000722 }
723 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
724}