blob: 3ce06e0a91b1c125cdf5ff594db57529a2deaacf [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "send.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
23#include <linux/etherdevice.h>
24#include <linux/fs.h>
25#include <linux/if_ether.h>
26#include <linux/if.h>
27#include <linux/jiffies.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/netdevice.h>
31#include <linux/printk.h>
32#include <linux/rculist.h>
33#include <linux/rcupdate.h>
34#include <linux/skbuff.h>
35#include <linux/slab.h>
36#include <linux/spinlock.h>
37#include <linux/stddef.h>
38#include <linux/workqueue.h>
39
40#include "distributed-arp-table.h"
Martin Hundebøllee75ed82013-05-23 16:53:03 +020041#include "fragmentation.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020042#include "gateway_client.h"
43#include "hard-interface.h"
44#include "network-coding.h"
45#include "originator.h"
46#include "routing.h"
47#include "soft-interface.h"
48#include "translation-table.h"
Antonio Quartulliaf5d4f72012-11-26 00:38:50 +010049
Sven Eckelmannbb079c82012-05-16 20:23:14 +020050static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000051
Antonio Quartulli95d39272016-01-16 16:40:15 +080052/**
53 * batadv_send_skb_packet - send an already prepared packet
54 * @skb: the packet to send
55 * @hard_iface: the interface to use to send the broadcast packet
56 * @dst_addr: the payload destination
57 *
58 * Send out an already prepared packet to the given neighbor or broadcast it
59 * using the specified interface. Either hard_iface or neigh_node must be not
60 * NULL.
61 * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
62 * otherwise it is sent as unicast to the given neighbor.
63 *
64 * Return: NET_TX_DROP in case of error or the result of dev_queue_xmit(skb)
65 * otherwise
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020066 */
Sven Eckelmann56303d32012-06-05 22:31:31 +020067int batadv_send_skb_packet(struct sk_buff *skb,
68 struct batadv_hard_iface *hard_iface,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +020069 const u8 *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000070{
Antonio Quartulli95d39272016-01-16 16:40:15 +080071 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000072 struct ethhdr *ethhdr;
73
Antonio Quartulli95d39272016-01-16 16:40:15 +080074 bat_priv = netdev_priv(hard_iface->soft_iface);
75
Sven Eckelmanne9a4f292012-06-03 22:19:19 +020076 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000077 goto send_skb_err;
78
Marek Lindnere6c10f42011-02-18 12:33:20 +000079 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000080 goto send_skb_err;
81
Marek Lindnere6c10f42011-02-18 12:33:20 +000082 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann67969582012-03-26 16:22:45 +020083 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
84 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000085 goto send_skb_err;
86 }
87
88 /* push to the ethernet header. */
Sven Eckelmann04b482a2012-05-12 02:09:38 +020089 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000090 goto send_skb_err;
91
92 skb_reset_mac_header(skb);
93
Antonio Quartulli7ed4be92013-04-08 15:08:18 +020094 ethhdr = eth_hdr(skb);
Antonio Quartulli8fdd0152014-01-22 00:42:11 +010095 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
96 ether_addr_copy(ethhdr->h_dest, dst_addr);
Antonio Quartulli293e9332013-05-19 12:55:16 +020097 ethhdr->h_proto = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000098
99 skb_set_network_header(skb, ETH_HLEN);
Antonio Quartulli293e9332013-05-19 12:55:16 +0200100 skb->protocol = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000101
Marek Lindnere6c10f42011-02-18 12:33:20 +0000102 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000103
Martin Hundebøll612d2b42013-01-25 11:12:42 +0100104 /* Save a clone of the skb to use when decoding coded packets */
105 batadv_nc_skb_store_for_decoding(bat_priv, skb);
106
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000107 /* dev_queue_xmit() returns a negative result on error. However on
108 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200109 * (which is > 0). This will not be treated as an error.
110 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000111 return dev_queue_xmit(skb);
112send_skb_err:
113 kfree_skb(skb);
114 return NET_XMIT_DROP;
115}
116
Antonio Quartulli95d39272016-01-16 16:40:15 +0800117int batadv_send_broadcast_skb(struct sk_buff *skb,
118 struct batadv_hard_iface *hard_iface)
119{
120 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
121}
122
123int batadv_send_unicast_skb(struct sk_buff *skb,
124 struct batadv_neigh_node *neigh)
125{
126#ifdef CONFIG_BATMAN_ADV_BATMAN_V
127 struct batadv_hardif_neigh_node *hardif_neigh;
128#endif
129 int ret;
130
131 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
132
133#ifdef CONFIG_BATMAN_ADV_BATMAN_V
134 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
135
136 if ((hardif_neigh) && (ret != NET_XMIT_DROP))
137 hardif_neigh->bat_v.last_unicast_tx = jiffies;
138
139 if (hardif_neigh)
140 batadv_hardif_neigh_put(hardif_neigh);
141#endif
142
143 return ret;
144}
145
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200146/**
147 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
148 * @skb: Packet to be transmitted.
149 * @orig_node: Final destination of the packet.
150 * @recv_if: Interface used when receiving the packet (can be NULL).
151 *
152 * Looks up the best next-hop towards the passed originator and passes the
153 * skb on for preparation of MAC header. If the packet originated from this
154 * host, NULL can be passed as recv_if and no interface alternating is
155 * attempted.
156 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200157 * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200158 * NET_XMIT_POLICED if the skb is buffered for later transmit.
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200159 */
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200160int batadv_send_skb_to_orig(struct sk_buff *skb,
161 struct batadv_orig_node *orig_node,
162 struct batadv_hard_iface *recv_if)
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200163{
164 struct batadv_priv *bat_priv = orig_node->bat_priv;
165 struct batadv_neigh_node *neigh_node;
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200166 int ret = NET_XMIT_DROP;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200167
168 /* batadv_find_router() increases neigh_nodes refcount if found. */
169 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
170 if (!neigh_node)
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200171 goto out;
172
173 /* Check if the skb is too large to send in one piece and fragment
174 * it if needed.
175 */
176 if (atomic_read(&bat_priv->fragmentation) &&
177 skb->len > neigh_node->if_incoming->net_dev->mtu) {
178 /* Fragment and send packet. */
179 if (batadv_frag_send_packet(skb, orig_node, neigh_node))
180 ret = NET_XMIT_SUCCESS;
181
182 goto out;
183 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200184
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200185 /* try to network code the packet, if it is received on an interface
186 * (i.e. being forwarded). If the packet originates from this node or if
187 * network coding fails, then send the packet as usual.
188 */
189 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
190 ret = NET_XMIT_POLICED;
191 } else {
Antonio Quartulli95d39272016-01-16 16:40:15 +0800192 batadv_send_unicast_skb(skb, neigh_node);
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200193 ret = NET_XMIT_SUCCESS;
194 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200195
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200196out:
197 if (neigh_node)
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100198 batadv_neigh_node_put(neigh_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200199
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200200 return ret;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200201}
202
Martin Hundebøllf097e252013-05-23 16:53:01 +0200203/**
204 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
205 * common fields for unicast packets
206 * @skb: the skb carrying the unicast header to initialize
207 * @hdr_size: amount of bytes to push at the beginning of the skb
208 * @orig_node: the destination node
209 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200210 * Return: false if the buffer extension was not possible or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200211 */
212static bool
213batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
214 struct batadv_orig_node *orig_node)
215{
216 struct batadv_unicast_packet *unicast_packet;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200217 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200218
219 if (batadv_skb_head_push(skb, hdr_size) < 0)
220 return false;
221
222 unicast_packet = (struct batadv_unicast_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100223 unicast_packet->version = BATADV_COMPAT_VERSION;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200224 /* batman packet type: unicast */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100225 unicast_packet->packet_type = BATADV_UNICAST;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200226 /* set unicast ttl */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100227 unicast_packet->ttl = BATADV_TTL;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200228 /* copy the destination for faster routing */
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100229 ether_addr_copy(unicast_packet->dest, orig_node->orig);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200230 /* set the destination tt version number */
231 unicast_packet->ttvn = ttvn;
232
233 return true;
234}
235
236/**
237 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
238 * @skb: the skb containing the payload to encapsulate
239 * @orig_node: the destination node
240 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200241 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200242 */
243static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
244 struct batadv_orig_node *orig_node)
245{
246 size_t uni_size = sizeof(struct batadv_unicast_packet);
247
248 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
249}
250
251/**
252 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
253 * unicast 4addr header
254 * @bat_priv: the bat priv with all the soft interface information
255 * @skb: the skb containing the payload to encapsulate
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200256 * @orig: the destination node
Martin Hundebøllf097e252013-05-23 16:53:01 +0200257 * @packet_subtype: the unicast 4addr packet subtype to use
258 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200259 * Return: false if the payload could not be encapsulated or true otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200260 */
261bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
262 struct sk_buff *skb,
263 struct batadv_orig_node *orig,
264 int packet_subtype)
265{
266 struct batadv_hard_iface *primary_if;
267 struct batadv_unicast_4addr_packet *uc_4addr_packet;
268 bool ret = false;
269
270 primary_if = batadv_primary_if_get_selected(bat_priv);
271 if (!primary_if)
272 goto out;
273
274 /* Pull the header space and fill the unicast_packet substructure.
275 * We can do that because the first member of the uc_4addr_packet
276 * is of type struct unicast_packet
277 */
278 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
279 orig))
280 goto out;
281
282 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100283 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100284 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200285 uc_4addr_packet->subtype = packet_subtype;
286 uc_4addr_packet->reserved = 0;
287
288 ret = true;
289out:
290 if (primary_if)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100291 batadv_hardif_put(primary_if);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200292 return ret;
293}
294
295/**
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200296 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
Martin Hundebøllf097e252013-05-23 16:53:01 +0200297 * @bat_priv: the bat priv with all the soft interface information
298 * @skb: payload to send
299 * @packet_type: the batman unicast packet type to use
300 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
301 * 4addr packets)
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200302 * @orig_node: the originator to send the packet to
Antonio Quartullic018ad32013-06-04 12:11:39 +0200303 * @vid: the vid to be used to search the translation table
Martin Hundebøllf097e252013-05-23 16:53:01 +0200304 *
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200305 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
306 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
307 * as packet_type. Then send this frame to the given orig_node and release a
308 * reference to this orig_node.
309 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200310 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200311 */
Linus LĂĽssing1d8ab8d2014-02-15 17:47:52 +0100312int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
313 struct sk_buff *skb, int packet_type,
314 int packet_subtype,
315 struct batadv_orig_node *orig_node,
316 unsigned short vid)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200317{
Martin Hundebøllf097e252013-05-23 16:53:01 +0200318 struct batadv_unicast_packet *unicast_packet;
Antonio Quartulli8ea64e22015-05-11 20:34:52 +0200319 struct ethhdr *ethhdr;
Linus LĂĽssing927c2ed2014-01-19 22:22:45 +0100320 int ret = NET_XMIT_DROP;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200321
Simon Wunderlich56a5ca82013-05-28 11:49:47 +0200322 if (!orig_node)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200323 goto out;
324
325 switch (packet_type) {
326 case BATADV_UNICAST:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200327 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
328 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200329 break;
330 case BATADV_UNICAST_4ADDR:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200331 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
332 orig_node,
333 packet_subtype))
334 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200335 break;
336 default:
337 /* this function supports UNICAST and UNICAST_4ADDR only. It
338 * should never be invoked with any other packet type
339 */
340 goto out;
341 }
342
Linus LĂĽssing927c2ed2014-01-19 22:22:45 +0100343 /* skb->data might have been reallocated by
344 * batadv_send_skb_prepare_unicast{,_4addr}()
345 */
346 ethhdr = eth_hdr(skb);
Martin Hundebøllf097e252013-05-23 16:53:01 +0200347 unicast_packet = (struct batadv_unicast_packet *)skb->data;
348
349 /* inform the destination node that we are still missing a correct route
350 * for this client. The destination will receive this packet and will
351 * try to reroute it because the ttvn contained in the header is less
352 * than the current one
353 */
Antonio Quartullic018ad32013-06-04 12:11:39 +0200354 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
Martin Hundebøllf097e252013-05-23 16:53:01 +0200355 unicast_packet->ttvn = unicast_packet->ttvn - 1;
356
357 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200358 ret = NET_XMIT_SUCCESS;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200359
360out:
Martin Hundebøllf097e252013-05-23 16:53:01 +0200361 if (orig_node)
Sven Eckelmann5d967312016-01-17 11:01:09 +0100362 batadv_orig_node_put(orig_node);
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200363 if (ret == NET_XMIT_DROP)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200364 kfree_skb(skb);
365 return ret;
366}
367
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200368/**
369 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
370 * @bat_priv: the bat priv with all the soft interface information
371 * @skb: payload to send
372 * @packet_type: the batman unicast packet type to use
373 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
374 * 4addr packets)
Antonio Quartullic5d3a652014-02-15 11:58:01 +0100375 * @dst_hint: can be used to override the destination contained in the skb
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200376 * @vid: the vid to be used to search the translation table
377 *
378 * Look up the recipient node for the destination address in the ethernet
379 * header via the translation table. Wrap the given skb into a batman-adv
380 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
381 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
382 * to the according destination node.
383 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200384 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200385 */
386int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
387 struct sk_buff *skb, int packet_type,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200388 int packet_subtype, u8 *dst_hint,
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100389 unsigned short vid)
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200390{
391 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
392 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200393 u8 *src, *dst;
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200394
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100395 src = ethhdr->h_source;
396 dst = ethhdr->h_dest;
397
398 /* if we got an hint! let's send the packet to this client (if any) */
399 if (dst_hint) {
400 src = NULL;
401 dst = dst_hint;
402 }
403 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
404
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200405 return batadv_send_skb_unicast(bat_priv, skb, packet_type,
406 packet_subtype, orig_node, vid);
407}
408
409/**
410 * batadv_send_skb_via_gw - send an skb via gateway lookup
411 * @bat_priv: the bat priv with all the soft interface information
412 * @skb: payload to send
413 * @vid: the vid to be used to search the translation table
414 *
415 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
416 * unicast header and send this frame to this gateway node.
417 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200418 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Linus LĂĽssinge300d312013-07-03 10:40:00 +0200419 */
420int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
421 unsigned short vid)
422{
423 struct batadv_orig_node *orig_node;
424
425 orig_node = batadv_gw_get_selected_orig(bat_priv);
426 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
427 orig_node, vid);
428}
429
Sven Eckelmann56303d32012-06-05 22:31:31 +0200430void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000431{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200432 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000433
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200434 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
435 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000436 return;
437
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200438 /* the interface gets activated here to avoid race conditions between
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000439 * the moment of activating the interface in
440 * hardif_activate_interface() where the originator mac is set and
441 * outdated packets (especially uninitialized mac addresses) in the
442 * packet queue
443 */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200444 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
445 hard_iface->if_status = BATADV_IF_ACTIVE;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000446
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800447 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000448}
449
Sven Eckelmann56303d32012-06-05 22:31:31 +0200450static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000451{
Markus Elfringc7994432015-11-15 08:04:43 +0100452 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200453 if (forw_packet->if_incoming)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100454 batadv_hardif_put(forw_packet->if_incoming);
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100455 if (forw_packet->if_outgoing)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100456 batadv_hardif_put(forw_packet->if_outgoing);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000457 kfree(forw_packet);
458}
459
Sven Eckelmann56303d32012-06-05 22:31:31 +0200460static void
461_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
462 struct batadv_forw_packet *forw_packet,
463 unsigned long send_time)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000464{
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000465 /* add new packet to packet list */
466 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
467 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
468 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
469
470 /* start timer for this packet */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200471 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000472 send_time);
473}
474
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200475/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +0100476 * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
477 * @bat_priv: the bat priv with all the soft interface information
478 * @skb: broadcast packet to add
479 * @delay: number of jiffies to wait before sending
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000480 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200481 * add a broadcast packet to the queue and setup timers. broadcast packets
482 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000483 *
484 * The skb is not consumed, so the caller should make sure that the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200485 * skb is freed.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200486 *
487 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200488 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200489int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
Sven Eckelmann9455e342012-05-12 02:09:37 +0200490 const struct sk_buff *skb,
491 unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000492{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200493 struct batadv_hard_iface *primary_if = NULL;
494 struct batadv_forw_packet *forw_packet;
Sven Eckelmann96412692012-06-05 22:31:30 +0200495 struct batadv_bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200496 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000497
Sven Eckelmann3e348192012-05-16 20:23:22 +0200498 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200499 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
500 "bcast packet queue full\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000501 goto out;
502 }
503
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200504 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200505 if (!primary_if)
Marek Lindnerca06c6e2011-05-14 20:01:22 +0200506 goto out_and_inc;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000507
Sven Eckelmann704509b2011-05-14 23:14:54 +0200508 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000509
510 if (!forw_packet)
511 goto out_and_inc;
512
Sven Eckelmann747e4222011-05-14 23:14:50 +0200513 newskb = skb_copy(skb, GFP_ATOMIC);
514 if (!newskb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000515 goto packet_free;
516
517 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann96412692012-06-05 22:31:30 +0200518 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100519 bcast_packet->ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000520
Sven Eckelmann747e4222011-05-14 23:14:50 +0200521 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000522
Sven Eckelmann747e4222011-05-14 23:14:50 +0200523 forw_packet->skb = newskb;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200524 forw_packet->if_incoming = primary_if;
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100525 forw_packet->if_outgoing = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000526
527 /* how often did we send the bcast packet ? */
528 forw_packet->num_packets = 0;
529
Antonio Quartulli72414442012-12-25 13:14:37 +0100530 INIT_DELAYED_WORK(&forw_packet->delayed_work,
531 batadv_send_outstanding_bcast_packet);
532
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200533 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000534 return NETDEV_TX_OK;
535
536packet_free:
537 kfree(forw_packet);
538out_and_inc:
539 atomic_inc(&bat_priv->bcast_queue_left);
540out:
Marek Lindner32ae9b22011-04-20 15:40:58 +0200541 if (primary_if)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100542 batadv_hardif_put(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000543 return NETDEV_TX_BUSY;
544}
545
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200546static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000547{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200548 struct batadv_hard_iface *hard_iface;
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200549 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200550 struct batadv_forw_packet *forw_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000551 struct sk_buff *skb1;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200552 struct net_device *soft_iface;
553 struct batadv_priv *bat_priv;
554
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200555 delayed_work = container_of(work, struct delayed_work, work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200556 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
557 delayed_work);
558 soft_iface = forw_packet->if_incoming->soft_iface;
559 bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000560
561 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
562 hlist_del(&forw_packet->list);
563 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
564
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200565 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000566 goto out;
567
Antonio Quartullic384ea32011-06-26 03:37:18 +0200568 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
569 goto out;
570
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000571 /* rebroadcast packet */
572 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200573 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000574 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000575 continue;
576
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100577 if (forw_packet->num_packets >= hard_iface->num_bcasts)
578 continue;
579
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000580 /* send a copy of the saved skb */
581 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
582 if (skb1)
Antonio Quartulli95d39272016-01-16 16:40:15 +0800583 batadv_send_broadcast_skb(skb1, hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000584 }
585 rcu_read_unlock();
586
587 forw_packet->num_packets++;
588
589 /* if we still have some more bcasts to send */
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100590 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200591 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
592 msecs_to_jiffies(5));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000593 return;
594 }
595
596out:
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200597 batadv_forw_packet_free(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000598 atomic_inc(&bat_priv->bcast_queue_left);
599}
600
Sven Eckelmann9455e342012-05-12 02:09:37 +0200601void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000602{
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200603 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200604 struct batadv_forw_packet *forw_packet;
605 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000606
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200607 delayed_work = container_of(work, struct delayed_work, work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200608 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
609 delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000610 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
611 spin_lock_bh(&bat_priv->forw_bat_list_lock);
612 hlist_del(&forw_packet->list);
613 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
614
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200615 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000616 goto out;
617
Marek Lindner01c42242011-11-28 21:31:55 +0800618 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000619
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100620 /* we have to have at least one packet in the queue to determine the
621 * queues wake up time unless we are shutting down.
622 *
623 * only re-schedule if this is the "original" copy, e.g. the OGM of the
624 * primary interface should only be rescheduled once per period, but
625 * this function will be called for the forw_packet instances of the
626 * other secondary interfaces as well.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000627 */
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100628 if (forw_packet->own &&
629 forw_packet->if_incoming == forw_packet->if_outgoing)
Sven Eckelmann9455e342012-05-12 02:09:37 +0200630 batadv_schedule_bat_ogm(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000631
632out:
633 /* don't count own packet */
634 if (!forw_packet->own)
635 atomic_inc(&bat_priv->batman_queue_left);
636
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200637 batadv_forw_packet_free(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000638}
639
Sven Eckelmann56303d32012-06-05 22:31:31 +0200640void
641batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
642 const struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000643{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200644 struct batadv_forw_packet *forw_packet;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800645 struct hlist_node *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200646 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000647
Marek Lindnere6c10f42011-02-18 12:33:20 +0000648 if (hard_iface)
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200649 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200650 "purge_outstanding_packets(): %s\n",
651 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000652 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200653 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200654 "purge_outstanding_packets()\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000655
656 /* free bcast list */
657 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800658 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000659 &bat_priv->forw_bcast_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200660 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000661 * we delete only packets belonging to the given interface
662 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000663 if ((hard_iface) &&
Simon Wunderlich3f1e08d2015-06-24 14:50:20 +0200664 (forw_packet->if_incoming != hard_iface) &&
665 (forw_packet->if_outgoing != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000666 continue;
667
668 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
669
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200670 /* batadv_send_outstanding_bcast_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000671 * delete the item from the list
672 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200673 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000674 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200675
676 if (pending) {
677 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200678 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200679 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000680 }
681 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
682
683 /* free batman packet list */
684 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800685 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000686 &bat_priv->forw_bat_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200687 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000688 * we delete only packets belonging to the given interface
689 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000690 if ((hard_iface) &&
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100691 (forw_packet->if_incoming != hard_iface) &&
692 (forw_packet->if_outgoing != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000693 continue;
694
695 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
696
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200697 /* send_outstanding_bat_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000698 * delete the item from the list
699 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200700 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000701 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200702
703 if (pending) {
704 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200705 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200706 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000707 }
708 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
709}