blob: 579f5f00a385689f29a60ac111adf8b8682e1aa6 [file] [log] [blame]
Simon Wunderliche19f9752014-01-04 18:04:25 +01001/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
18#include "main.h"
Antonio Quartullic384ea32011-06-26 03:37:18 +020019#include "distributed-arp-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000020#include "send.h"
21#include "routing.h"
22#include "translation-table.h"
23#include "soft-interface.h"
24#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000025#include "gateway_common.h"
Martin Hundebøllf097e252013-05-23 16:53:01 +020026#include "gateway_client.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000027#include "originator.h"
Martin Hundebøll612d2b42013-01-25 11:12:42 +010028#include "network-coding.h"
Martin Hundebøllee75ed82013-05-23 16:53:03 +020029#include "fragmentation.h"
Antonio Quartulliaf5d4f72012-11-26 00:38:50 +010030
Sven Eckelmannbb079c82012-05-16 20:23:14 +020031static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000032
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000033/* send out an already prepared packet to the given address via the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020034 * specified batman interface
35 */
Sven Eckelmann56303d32012-06-05 22:31:31 +020036int batadv_send_skb_packet(struct sk_buff *skb,
37 struct batadv_hard_iface *hard_iface,
Sven Eckelmann9455e342012-05-12 02:09:37 +020038 const uint8_t *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000039{
Martin Hundebøll612d2b42013-01-25 11:12:42 +010040 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000041 struct ethhdr *ethhdr;
42
Sven Eckelmanne9a4f292012-06-03 22:19:19 +020043 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044 goto send_skb_err;
45
Marek Lindnere6c10f42011-02-18 12:33:20 +000046 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000047 goto send_skb_err;
48
Marek Lindnere6c10f42011-02-18 12:33:20 +000049 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann67969582012-03-26 16:22:45 +020050 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
51 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000052 goto send_skb_err;
53 }
54
55 /* push to the ethernet header. */
Sven Eckelmann04b482a2012-05-12 02:09:38 +020056 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000057 goto send_skb_err;
58
59 skb_reset_mac_header(skb);
60
Antonio Quartulli7ed4be92013-04-08 15:08:18 +020061 ethhdr = eth_hdr(skb);
Marek Lindnere6c10f42011-02-18 12:33:20 +000062 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000063 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
Antonio Quartulli293e9332013-05-19 12:55:16 +020064 ethhdr->h_proto = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000065
66 skb_set_network_header(skb, ETH_HLEN);
Antonio Quartulli293e9332013-05-19 12:55:16 +020067 skb->protocol = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000068
Marek Lindnere6c10f42011-02-18 12:33:20 +000069 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000070
Martin Hundebøll612d2b42013-01-25 11:12:42 +010071 /* Save a clone of the skb to use when decoding coded packets */
72 batadv_nc_skb_store_for_decoding(bat_priv, skb);
73
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000074 /* dev_queue_xmit() returns a negative result on error. However on
75 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020076 * (which is > 0). This will not be treated as an error.
77 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000078 return dev_queue_xmit(skb);
79send_skb_err:
80 kfree_skb(skb);
81 return NET_XMIT_DROP;
82}
83
Martin Hundebøllbb351ba2012-10-16 16:13:48 +020084/**
85 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
86 * @skb: Packet to be transmitted.
87 * @orig_node: Final destination of the packet.
88 * @recv_if: Interface used when receiving the packet (can be NULL).
89 *
90 * Looks up the best next-hop towards the passed originator and passes the
91 * skb on for preparation of MAC header. If the packet originated from this
92 * host, NULL can be passed as recv_if and no interface alternating is
93 * attempted.
94 *
Martin Hundebølle91ecfc2013-04-20 13:54:39 +020095 * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
96 * NET_XMIT_POLICED if the skb is buffered for later transmit.
Martin Hundebøllbb351ba2012-10-16 16:13:48 +020097 */
Martin Hundebølle91ecfc2013-04-20 13:54:39 +020098int batadv_send_skb_to_orig(struct sk_buff *skb,
99 struct batadv_orig_node *orig_node,
100 struct batadv_hard_iface *recv_if)
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200101{
102 struct batadv_priv *bat_priv = orig_node->bat_priv;
103 struct batadv_neigh_node *neigh_node;
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200104 int ret = NET_XMIT_DROP;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200105
106 /* batadv_find_router() increases neigh_nodes refcount if found. */
107 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
108 if (!neigh_node)
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200109 goto out;
110
111 /* Check if the skb is too large to send in one piece and fragment
112 * it if needed.
113 */
114 if (atomic_read(&bat_priv->fragmentation) &&
115 skb->len > neigh_node->if_incoming->net_dev->mtu) {
116 /* Fragment and send packet. */
117 if (batadv_frag_send_packet(skb, orig_node, neigh_node))
118 ret = NET_XMIT_SUCCESS;
119
120 goto out;
121 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200122
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200123 /* try to network code the packet, if it is received on an interface
124 * (i.e. being forwarded). If the packet originates from this node or if
125 * network coding fails, then send the packet as usual.
126 */
127 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
128 ret = NET_XMIT_POLICED;
129 } else {
130 batadv_send_skb_packet(skb, neigh_node->if_incoming,
131 neigh_node->addr);
132 ret = NET_XMIT_SUCCESS;
133 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200134
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200135out:
136 if (neigh_node)
137 batadv_neigh_node_free_ref(neigh_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200138
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200139 return ret;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200140}
141
Martin Hundebøllf097e252013-05-23 16:53:01 +0200142/**
143 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
144 * common fields for unicast packets
145 * @skb: the skb carrying the unicast header to initialize
146 * @hdr_size: amount of bytes to push at the beginning of the skb
147 * @orig_node: the destination node
148 *
149 * Returns false if the buffer extension was not possible or true otherwise.
150 */
151static bool
152batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
153 struct batadv_orig_node *orig_node)
154{
155 struct batadv_unicast_packet *unicast_packet;
156 uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
157
158 if (batadv_skb_head_push(skb, hdr_size) < 0)
159 return false;
160
161 unicast_packet = (struct batadv_unicast_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100162 unicast_packet->version = BATADV_COMPAT_VERSION;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200163 /* batman packet type: unicast */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100164 unicast_packet->packet_type = BATADV_UNICAST;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200165 /* set unicast ttl */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100166 unicast_packet->ttl = BATADV_TTL;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200167 /* copy the destination for faster routing */
168 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
169 /* set the destination tt version number */
170 unicast_packet->ttvn = ttvn;
171
172 return true;
173}
174
175/**
176 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
177 * @skb: the skb containing the payload to encapsulate
178 * @orig_node: the destination node
179 *
180 * Returns false if the payload could not be encapsulated or true otherwise.
181 */
182static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
183 struct batadv_orig_node *orig_node)
184{
185 size_t uni_size = sizeof(struct batadv_unicast_packet);
186
187 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
188}
189
190/**
191 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
192 * unicast 4addr header
193 * @bat_priv: the bat priv with all the soft interface information
194 * @skb: the skb containing the payload to encapsulate
195 * @orig_node: the destination node
196 * @packet_subtype: the unicast 4addr packet subtype to use
197 *
198 * Returns false if the payload could not be encapsulated or true otherwise.
199 */
200bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
201 struct sk_buff *skb,
202 struct batadv_orig_node *orig,
203 int packet_subtype)
204{
205 struct batadv_hard_iface *primary_if;
206 struct batadv_unicast_4addr_packet *uc_4addr_packet;
207 bool ret = false;
208
209 primary_if = batadv_primary_if_get_selected(bat_priv);
210 if (!primary_if)
211 goto out;
212
213 /* Pull the header space and fill the unicast_packet substructure.
214 * We can do that because the first member of the uc_4addr_packet
215 * is of type struct unicast_packet
216 */
217 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
218 orig))
219 goto out;
220
221 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100222 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200223 memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
224 uc_4addr_packet->subtype = packet_subtype;
225 uc_4addr_packet->reserved = 0;
226
227 ret = true;
228out:
229 if (primary_if)
230 batadv_hardif_free_ref(primary_if);
231 return ret;
232}
233
234/**
Linus Lüssinge300d312013-07-03 10:40:00 +0200235 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
Martin Hundebøllf097e252013-05-23 16:53:01 +0200236 * @bat_priv: the bat priv with all the soft interface information
237 * @skb: payload to send
238 * @packet_type: the batman unicast packet type to use
239 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
240 * 4addr packets)
Linus Lüssinge300d312013-07-03 10:40:00 +0200241 * @orig_node: the originator to send the packet to
Antonio Quartullic018ad32013-06-04 12:11:39 +0200242 * @vid: the vid to be used to search the translation table
Martin Hundebøllf097e252013-05-23 16:53:01 +0200243 *
Linus Lüssinge300d312013-07-03 10:40:00 +0200244 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
245 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
246 * as packet_type. Then send this frame to the given orig_node and release a
247 * reference to this orig_node.
248 *
249 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
Martin Hundebøllf097e252013-05-23 16:53:01 +0200250 */
Linus Lüssinge300d312013-07-03 10:40:00 +0200251static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
252 struct sk_buff *skb, int packet_type,
253 int packet_subtype,
254 struct batadv_orig_node *orig_node,
255 unsigned short vid)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200256{
257 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
258 struct batadv_unicast_packet *unicast_packet;
Linus Lüssinge300d312013-07-03 10:40:00 +0200259 int ret = NET_XMIT_DROP;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200260
Simon Wunderlich56a5ca82013-05-28 11:49:47 +0200261 if (!orig_node)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200262 goto out;
263
264 switch (packet_type) {
265 case BATADV_UNICAST:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200266 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
267 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200268 break;
269 case BATADV_UNICAST_4ADDR:
Antonio Quartulli33faa042013-10-19 14:06:05 +0200270 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
271 orig_node,
272 packet_subtype))
273 goto out;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200274 break;
275 default:
276 /* this function supports UNICAST and UNICAST_4ADDR only. It
277 * should never be invoked with any other packet type
278 */
279 goto out;
280 }
281
282 unicast_packet = (struct batadv_unicast_packet *)skb->data;
283
284 /* inform the destination node that we are still missing a correct route
285 * for this client. The destination will receive this packet and will
286 * try to reroute it because the ttvn contained in the header is less
287 * than the current one
288 */
Antonio Quartullic018ad32013-06-04 12:11:39 +0200289 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
Martin Hundebøllf097e252013-05-23 16:53:01 +0200290 unicast_packet->ttvn = unicast_packet->ttvn - 1;
291
292 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
Linus Lüssinge300d312013-07-03 10:40:00 +0200293 ret = NET_XMIT_SUCCESS;
Martin Hundebøllf097e252013-05-23 16:53:01 +0200294
295out:
Martin Hundebøllf097e252013-05-23 16:53:01 +0200296 if (orig_node)
297 batadv_orig_node_free_ref(orig_node);
Linus Lüssinge300d312013-07-03 10:40:00 +0200298 if (ret == NET_XMIT_DROP)
Martin Hundebøllf097e252013-05-23 16:53:01 +0200299 kfree_skb(skb);
300 return ret;
301}
302
Linus Lüssinge300d312013-07-03 10:40:00 +0200303/**
304 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
305 * @bat_priv: the bat priv with all the soft interface information
306 * @skb: payload to send
307 * @packet_type: the batman unicast packet type to use
308 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
309 * 4addr packets)
310 * @vid: the vid to be used to search the translation table
311 *
312 * Look up the recipient node for the destination address in the ethernet
313 * header via the translation table. Wrap the given skb into a batman-adv
314 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
315 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
316 * to the according destination node.
317 *
318 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
319 */
320int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
321 struct sk_buff *skb, int packet_type,
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100322 int packet_subtype, uint8_t *dst_hint,
323 unsigned short vid)
Linus Lüssinge300d312013-07-03 10:40:00 +0200324{
325 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
326 struct batadv_orig_node *orig_node;
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100327 uint8_t *src, *dst;
Linus Lüssinge300d312013-07-03 10:40:00 +0200328
Antonio Quartulli6c413b12013-11-05 19:31:08 +0100329 src = ethhdr->h_source;
330 dst = ethhdr->h_dest;
331
332 /* if we got an hint! let's send the packet to this client (if any) */
333 if (dst_hint) {
334 src = NULL;
335 dst = dst_hint;
336 }
337 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
338
Linus Lüssinge300d312013-07-03 10:40:00 +0200339 return batadv_send_skb_unicast(bat_priv, skb, packet_type,
340 packet_subtype, orig_node, vid);
341}
342
343/**
344 * batadv_send_skb_via_gw - send an skb via gateway lookup
345 * @bat_priv: the bat priv with all the soft interface information
346 * @skb: payload to send
347 * @vid: the vid to be used to search the translation table
348 *
349 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
350 * unicast header and send this frame to this gateway node.
351 *
352 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
353 */
354int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
355 unsigned short vid)
356{
357 struct batadv_orig_node *orig_node;
358
359 orig_node = batadv_gw_get_selected_orig(bat_priv);
360 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
361 orig_node, vid);
362}
363
Sven Eckelmann56303d32012-06-05 22:31:31 +0200364void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000365{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200366 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000367
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200368 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
369 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000370 return;
371
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200372 /* the interface gets activated here to avoid race conditions between
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000373 * the moment of activating the interface in
374 * hardif_activate_interface() where the originator mac is set and
375 * outdated packets (especially uninitialized mac addresses) in the
376 * packet queue
377 */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200378 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
379 hard_iface->if_status = BATADV_IF_ACTIVE;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000380
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800381 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000382}
383
Sven Eckelmann56303d32012-06-05 22:31:31 +0200384static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000385{
386 if (forw_packet->skb)
387 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200388 if (forw_packet->if_incoming)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200389 batadv_hardif_free_ref(forw_packet->if_incoming);
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100390 if (forw_packet->if_outgoing)
391 batadv_hardif_free_ref(forw_packet->if_outgoing);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000392 kfree(forw_packet);
393}
394
Sven Eckelmann56303d32012-06-05 22:31:31 +0200395static void
396_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
397 struct batadv_forw_packet *forw_packet,
398 unsigned long send_time)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000399{
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000400 /* add new packet to packet list */
401 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
402 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
403 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
404
405 /* start timer for this packet */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200406 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000407 send_time);
408}
409
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000410/* add a broadcast packet to the queue and setup timers. broadcast packets
Antonio Quartulli015758d2011-07-09 17:52:13 +0200411 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000412 *
413 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
414 * errors.
415 *
416 * The skb is not consumed, so the caller should make sure that the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200417 * skb is freed.
418 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200419int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
Sven Eckelmann9455e342012-05-12 02:09:37 +0200420 const struct sk_buff *skb,
421 unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000422{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200423 struct batadv_hard_iface *primary_if = NULL;
424 struct batadv_forw_packet *forw_packet;
Sven Eckelmann96412692012-06-05 22:31:30 +0200425 struct batadv_bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200426 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000427
Sven Eckelmann3e348192012-05-16 20:23:22 +0200428 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200429 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
430 "bcast packet queue full\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000431 goto out;
432 }
433
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200434 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200435 if (!primary_if)
Marek Lindnerca06c6e2011-05-14 20:01:22 +0200436 goto out_and_inc;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000437
Sven Eckelmann704509b2011-05-14 23:14:54 +0200438 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000439
440 if (!forw_packet)
441 goto out_and_inc;
442
Sven Eckelmann747e4222011-05-14 23:14:50 +0200443 newskb = skb_copy(skb, GFP_ATOMIC);
444 if (!newskb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000445 goto packet_free;
446
447 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann96412692012-06-05 22:31:30 +0200448 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100449 bcast_packet->ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000450
Sven Eckelmann747e4222011-05-14 23:14:50 +0200451 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000452
Sven Eckelmann747e4222011-05-14 23:14:50 +0200453 forw_packet->skb = newskb;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200454 forw_packet->if_incoming = primary_if;
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100455 forw_packet->if_outgoing = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000456
457 /* how often did we send the bcast packet ? */
458 forw_packet->num_packets = 0;
459
Antonio Quartulli72414442012-12-25 13:14:37 +0100460 INIT_DELAYED_WORK(&forw_packet->delayed_work,
461 batadv_send_outstanding_bcast_packet);
462
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200463 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000464 return NETDEV_TX_OK;
465
466packet_free:
467 kfree(forw_packet);
468out_and_inc:
469 atomic_inc(&bat_priv->bcast_queue_left);
470out:
Marek Lindner32ae9b22011-04-20 15:40:58 +0200471 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200472 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000473 return NETDEV_TX_BUSY;
474}
475
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200476static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000477{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200478 struct batadv_hard_iface *hard_iface;
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200479 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200480 struct batadv_forw_packet *forw_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000481 struct sk_buff *skb1;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200482 struct net_device *soft_iface;
483 struct batadv_priv *bat_priv;
484
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200485 delayed_work = container_of(work, struct delayed_work, work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200486 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
487 delayed_work);
488 soft_iface = forw_packet->if_incoming->soft_iface;
489 bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000490
491 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
492 hlist_del(&forw_packet->list);
493 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
494
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200495 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000496 goto out;
497
Antonio Quartullic384ea32011-06-26 03:37:18 +0200498 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
499 goto out;
500
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000501 /* rebroadcast packet */
502 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200503 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000504 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000505 continue;
506
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100507 if (forw_packet->num_packets >= hard_iface->num_bcasts)
508 continue;
509
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000510 /* send a copy of the saved skb */
511 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
512 if (skb1)
Sven Eckelmann9455e342012-05-12 02:09:37 +0200513 batadv_send_skb_packet(skb1, hard_iface,
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200514 batadv_broadcast_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000515 }
516 rcu_read_unlock();
517
518 forw_packet->num_packets++;
519
520 /* if we still have some more bcasts to send */
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100521 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200522 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
523 msecs_to_jiffies(5));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000524 return;
525 }
526
527out:
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200528 batadv_forw_packet_free(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000529 atomic_inc(&bat_priv->bcast_queue_left);
530}
531
Sven Eckelmann9455e342012-05-12 02:09:37 +0200532void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000533{
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200534 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200535 struct batadv_forw_packet *forw_packet;
536 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000537
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200538 delayed_work = container_of(work, struct delayed_work, work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200539 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
540 delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000541 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
542 spin_lock_bh(&bat_priv->forw_bat_list_lock);
543 hlist_del(&forw_packet->list);
544 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
545
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200546 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000547 goto out;
548
Marek Lindner01c42242011-11-28 21:31:55 +0800549 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000550
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100551 /* we have to have at least one packet in the queue to determine the
552 * queues wake up time unless we are shutting down.
553 *
554 * only re-schedule if this is the "original" copy, e.g. the OGM of the
555 * primary interface should only be rescheduled once per period, but
556 * this function will be called for the forw_packet instances of the
557 * other secondary interfaces as well.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000558 */
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100559 if (forw_packet->own &&
560 forw_packet->if_incoming == forw_packet->if_outgoing)
Sven Eckelmann9455e342012-05-12 02:09:37 +0200561 batadv_schedule_bat_ogm(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000562
563out:
564 /* don't count own packet */
565 if (!forw_packet->own)
566 atomic_inc(&bat_priv->batman_queue_left);
567
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200568 batadv_forw_packet_free(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000569}
570
Sven Eckelmann56303d32012-06-05 22:31:31 +0200571void
572batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
573 const struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000574{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200575 struct batadv_forw_packet *forw_packet;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800576 struct hlist_node *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200577 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000578
Marek Lindnere6c10f42011-02-18 12:33:20 +0000579 if (hard_iface)
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200580 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200581 "purge_outstanding_packets(): %s\n",
582 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000583 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200584 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200585 "purge_outstanding_packets()\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000586
587 /* free bcast list */
588 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800589 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000590 &bat_priv->forw_bcast_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200591 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000592 * we delete only packets belonging to the given interface
593 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000594 if ((hard_iface) &&
595 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000596 continue;
597
598 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
599
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200600 /* batadv_send_outstanding_bcast_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000601 * delete the item from the list
602 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200603 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000604 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200605
606 if (pending) {
607 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200608 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200609 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000610 }
611 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
612
613 /* free batman packet list */
614 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800615 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000616 &bat_priv->forw_bat_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200617 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000618 * we delete only packets belonging to the given interface
619 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000620 if ((hard_iface) &&
Simon Wunderlichef0a9372013-11-13 19:14:49 +0100621 (forw_packet->if_incoming != hard_iface) &&
622 (forw_packet->if_outgoing != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000623 continue;
624
625 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
626
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200627 /* send_outstanding_bat_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000628 * delete the item from the list
629 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200630 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000631 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200632
633 if (pending) {
634 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200635 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200636 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000637 }
638 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
639}