blob: 4bbcf51db6a75ebd7f17d066140ed71d23db3ce5 [file] [log] [blame]
Antonio Quartulli0b873932013-01-04 03:05:31 +01001/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018 */
19
20#include "main.h"
Antonio Quartullic384ea32011-06-26 03:37:18 +020021#include "distributed-arp-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000022#include "send.h"
23#include "routing.h"
24#include "translation-table.h"
25#include "soft-interface.h"
26#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000027#include "gateway_common.h"
Martin Hundebøllf097e252013-05-23 16:53:01 +020028#include "gateway_client.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000029#include "originator.h"
Martin Hundebøll612d2b42013-01-25 11:12:42 +010030#include "network-coding.h"
Martin Hundebøllee75ed82013-05-23 16:53:03 +020031#include "fragmentation.h"
Antonio Quartulliaf5d4f72012-11-26 00:38:50 +010032
Sven Eckelmannbb079c82012-05-16 20:23:14 +020033static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000034
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000035/* send out an already prepared packet to the given address via the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020036 * specified batman interface
37 */
Sven Eckelmann56303d32012-06-05 22:31:31 +020038int batadv_send_skb_packet(struct sk_buff *skb,
39 struct batadv_hard_iface *hard_iface,
Sven Eckelmann9455e342012-05-12 02:09:37 +020040 const uint8_t *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000041{
Martin Hundebøll612d2b42013-01-25 11:12:42 +010042 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000043 struct ethhdr *ethhdr;
44
Sven Eckelmanne9a4f292012-06-03 22:19:19 +020045 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000046 goto send_skb_err;
47
Marek Lindnere6c10f42011-02-18 12:33:20 +000048 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000049 goto send_skb_err;
50
Marek Lindnere6c10f42011-02-18 12:33:20 +000051 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann67969582012-03-26 16:22:45 +020052 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
53 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000054 goto send_skb_err;
55 }
56
57 /* push to the ethernet header. */
Sven Eckelmann04b482a2012-05-12 02:09:38 +020058 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000059 goto send_skb_err;
60
61 skb_reset_mac_header(skb);
62
Antonio Quartulli7ed4be92013-04-08 15:08:18 +020063 ethhdr = eth_hdr(skb);
Marek Lindnere6c10f42011-02-18 12:33:20 +000064 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000065 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
Antonio Quartulli293e9332013-05-19 12:55:16 +020066 ethhdr->h_proto = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000067
68 skb_set_network_header(skb, ETH_HLEN);
Antonio Quartulli293e9332013-05-19 12:55:16 +020069 skb->protocol = htons(ETH_P_BATMAN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000070
Marek Lindnere6c10f42011-02-18 12:33:20 +000071 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000072
Martin Hundebøll612d2b42013-01-25 11:12:42 +010073 /* Save a clone of the skb to use when decoding coded packets */
74 batadv_nc_skb_store_for_decoding(bat_priv, skb);
75
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000076 /* dev_queue_xmit() returns a negative result on error. However on
77 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020078 * (which is > 0). This will not be treated as an error.
79 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000080 return dev_queue_xmit(skb);
81send_skb_err:
82 kfree_skb(skb);
83 return NET_XMIT_DROP;
84}
85
Martin Hundebøllbb351ba2012-10-16 16:13:48 +020086/**
87 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
88 * @skb: Packet to be transmitted.
89 * @orig_node: Final destination of the packet.
90 * @recv_if: Interface used when receiving the packet (can be NULL).
91 *
92 * Looks up the best next-hop towards the passed originator and passes the
93 * skb on for preparation of MAC header. If the packet originated from this
94 * host, NULL can be passed as recv_if and no interface alternating is
95 * attempted.
96 *
Martin Hundebølle91ecfc2013-04-20 13:54:39 +020097 * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
98 * NET_XMIT_POLICED if the skb is buffered for later transmit.
Martin Hundebøllbb351ba2012-10-16 16:13:48 +020099 */
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200100int batadv_send_skb_to_orig(struct sk_buff *skb,
101 struct batadv_orig_node *orig_node,
102 struct batadv_hard_iface *recv_if)
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200103{
104 struct batadv_priv *bat_priv = orig_node->bat_priv;
105 struct batadv_neigh_node *neigh_node;
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200106 int ret = NET_XMIT_DROP;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200107
108 /* batadv_find_router() increases neigh_nodes refcount if found. */
109 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
110 if (!neigh_node)
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200111 goto out;
112
113 /* Check if the skb is too large to send in one piece and fragment
114 * it if needed.
115 */
116 if (atomic_read(&bat_priv->fragmentation) &&
117 skb->len > neigh_node->if_incoming->net_dev->mtu) {
118 /* Fragment and send packet. */
119 if (batadv_frag_send_packet(skb, orig_node, neigh_node))
120 ret = NET_XMIT_SUCCESS;
121
122 goto out;
123 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200124
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200125 /* try to network code the packet, if it is received on an interface
126 * (i.e. being forwarded). If the packet originates from this node or if
127 * network coding fails, then send the packet as usual.
128 */
129 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
130 ret = NET_XMIT_POLICED;
131 } else {
132 batadv_send_skb_packet(skb, neigh_node->if_incoming,
133 neigh_node->addr);
134 ret = NET_XMIT_SUCCESS;
135 }
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200136
Martin Hundebøllee75ed82013-05-23 16:53:03 +0200137out:
138 if (neigh_node)
139 batadv_neigh_node_free_ref(neigh_node);
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200140
Martin Hundebølle91ecfc2013-04-20 13:54:39 +0200141 return ret;
Martin Hundebøllbb351ba2012-10-16 16:13:48 +0200142}
143
Martin Hundebøllf097e252013-05-23 16:53:01 +0200144/**
145 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
146 * common fields for unicast packets
147 * @skb: the skb carrying the unicast header to initialize
148 * @hdr_size: amount of bytes to push at the beginning of the skb
149 * @orig_node: the destination node
150 *
151 * Returns false if the buffer extension was not possible or true otherwise.
152 */
153static bool
154batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
155 struct batadv_orig_node *orig_node)
156{
157 struct batadv_unicast_packet *unicast_packet;
158 uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
159
160 if (batadv_skb_head_push(skb, hdr_size) < 0)
161 return false;
162
163 unicast_packet = (struct batadv_unicast_packet *)skb->data;
164 unicast_packet->header.version = BATADV_COMPAT_VERSION;
165 /* batman packet type: unicast */
166 unicast_packet->header.packet_type = BATADV_UNICAST;
167 /* set unicast ttl */
168 unicast_packet->header.ttl = BATADV_TTL;
169 /* copy the destination for faster routing */
170 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
171 /* set the destination tt version number */
172 unicast_packet->ttvn = ttvn;
173
174 return true;
175}
176
177/**
178 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
179 * @skb: the skb containing the payload to encapsulate
180 * @orig_node: the destination node
181 *
182 * Returns false if the payload could not be encapsulated or true otherwise.
183 */
184static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
185 struct batadv_orig_node *orig_node)
186{
187 size_t uni_size = sizeof(struct batadv_unicast_packet);
188
189 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
190}
191
192/**
193 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
194 * unicast 4addr header
195 * @bat_priv: the bat priv with all the soft interface information
196 * @skb: the skb containing the payload to encapsulate
197 * @orig_node: the destination node
198 * @packet_subtype: the unicast 4addr packet subtype to use
199 *
200 * Returns false if the payload could not be encapsulated or true otherwise.
201 */
202bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
203 struct sk_buff *skb,
204 struct batadv_orig_node *orig,
205 int packet_subtype)
206{
207 struct batadv_hard_iface *primary_if;
208 struct batadv_unicast_4addr_packet *uc_4addr_packet;
209 bool ret = false;
210
211 primary_if = batadv_primary_if_get_selected(bat_priv);
212 if (!primary_if)
213 goto out;
214
215 /* Pull the header space and fill the unicast_packet substructure.
216 * We can do that because the first member of the uc_4addr_packet
217 * is of type struct unicast_packet
218 */
219 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
220 orig))
221 goto out;
222
223 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
224 uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
225 memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
226 uc_4addr_packet->subtype = packet_subtype;
227 uc_4addr_packet->reserved = 0;
228
229 ret = true;
230out:
231 if (primary_if)
232 batadv_hardif_free_ref(primary_if);
233 return ret;
234}
235
236/**
237 * batadv_send_generic_unicast_skb - send an skb as unicast
238 * @bat_priv: the bat priv with all the soft interface information
239 * @skb: payload to send
240 * @packet_type: the batman unicast packet type to use
241 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
242 * 4addr packets)
243 *
244 * Returns 1 in case of error or 0 otherwise.
245 */
246int batadv_send_skb_generic_unicast(struct batadv_priv *bat_priv,
247 struct sk_buff *skb, int packet_type,
248 int packet_subtype)
249{
250 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
251 struct batadv_unicast_packet *unicast_packet;
252 struct batadv_orig_node *orig_node;
253 struct batadv_neigh_node *neigh_node;
254 int ret = NET_RX_DROP;
255
256 /* get routing information */
257 if (is_multicast_ether_addr(ethhdr->h_dest)) {
258 orig_node = batadv_gw_get_selected_orig(bat_priv);
259 if (orig_node)
260 goto find_router;
261 }
262
263 /* check for tt host - increases orig_node refcount.
264 * returns NULL in case of AP isolation
265 */
266 orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
267 ethhdr->h_dest);
268
269find_router:
270 /* find_router():
271 * - if orig_node is NULL it returns NULL
272 * - increases neigh_nodes refcount if found.
273 */
274 neigh_node = batadv_find_router(bat_priv, orig_node, NULL);
275
276 if (!neigh_node)
277 goto out;
278
279 switch (packet_type) {
280 case BATADV_UNICAST:
281 batadv_send_skb_prepare_unicast(skb, orig_node);
282 break;
283 case BATADV_UNICAST_4ADDR:
284 batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, orig_node,
285 packet_subtype);
286 break;
287 default:
288 /* this function supports UNICAST and UNICAST_4ADDR only. It
289 * should never be invoked with any other packet type
290 */
291 goto out;
292 }
293
294 unicast_packet = (struct batadv_unicast_packet *)skb->data;
295
296 /* inform the destination node that we are still missing a correct route
297 * for this client. The destination will receive this packet and will
298 * try to reroute it because the ttvn contained in the header is less
299 * than the current one
300 */
301 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
302 unicast_packet->ttvn = unicast_packet->ttvn - 1;
303
304 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
305 ret = 0;
306
307out:
308 if (neigh_node)
309 batadv_neigh_node_free_ref(neigh_node);
310 if (orig_node)
311 batadv_orig_node_free_ref(orig_node);
312 if (ret == NET_RX_DROP)
313 kfree_skb(skb);
314 return ret;
315}
316
Sven Eckelmann56303d32012-06-05 22:31:31 +0200317void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000318{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200319 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000320
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200321 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
322 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000323 return;
324
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200325 /* the interface gets activated here to avoid race conditions between
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000326 * the moment of activating the interface in
327 * hardif_activate_interface() where the originator mac is set and
328 * outdated packets (especially uninitialized mac addresses) in the
329 * packet queue
330 */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200331 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
332 hard_iface->if_status = BATADV_IF_ACTIVE;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000333
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +0800334 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000335}
336
Sven Eckelmann56303d32012-06-05 22:31:31 +0200337static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000338{
339 if (forw_packet->skb)
340 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200341 if (forw_packet->if_incoming)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200342 batadv_hardif_free_ref(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000343 kfree(forw_packet);
344}
345
Sven Eckelmann56303d32012-06-05 22:31:31 +0200346static void
347_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
348 struct batadv_forw_packet *forw_packet,
349 unsigned long send_time)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000350{
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000351 /* add new packet to packet list */
352 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
353 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
354 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
355
356 /* start timer for this packet */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200357 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000358 send_time);
359}
360
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000361/* add a broadcast packet to the queue and setup timers. broadcast packets
Antonio Quartulli015758d2011-07-09 17:52:13 +0200362 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000363 *
364 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
365 * errors.
366 *
367 * The skb is not consumed, so the caller should make sure that the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200368 * skb is freed.
369 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200370int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
Sven Eckelmann9455e342012-05-12 02:09:37 +0200371 const struct sk_buff *skb,
372 unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000373{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200374 struct batadv_hard_iface *primary_if = NULL;
375 struct batadv_forw_packet *forw_packet;
Sven Eckelmann96412692012-06-05 22:31:30 +0200376 struct batadv_bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200377 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000378
Sven Eckelmann3e348192012-05-16 20:23:22 +0200379 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200380 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
381 "bcast packet queue full\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000382 goto out;
383 }
384
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200385 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200386 if (!primary_if)
Marek Lindnerca06c6e2011-05-14 20:01:22 +0200387 goto out_and_inc;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000388
Sven Eckelmann704509b2011-05-14 23:14:54 +0200389 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000390
391 if (!forw_packet)
392 goto out_and_inc;
393
Sven Eckelmann747e4222011-05-14 23:14:50 +0200394 newskb = skb_copy(skb, GFP_ATOMIC);
395 if (!newskb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000396 goto packet_free;
397
398 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann96412692012-06-05 22:31:30 +0200399 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
Sven Eckelmann76543d12011-11-20 15:47:38 +0100400 bcast_packet->header.ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000401
Sven Eckelmann747e4222011-05-14 23:14:50 +0200402 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000403
Sven Eckelmann747e4222011-05-14 23:14:50 +0200404 forw_packet->skb = newskb;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200405 forw_packet->if_incoming = primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000406
407 /* how often did we send the bcast packet ? */
408 forw_packet->num_packets = 0;
409
Antonio Quartulli72414442012-12-25 13:14:37 +0100410 INIT_DELAYED_WORK(&forw_packet->delayed_work,
411 batadv_send_outstanding_bcast_packet);
412
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200413 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000414 return NETDEV_TX_OK;
415
416packet_free:
417 kfree(forw_packet);
418out_and_inc:
419 atomic_inc(&bat_priv->bcast_queue_left);
420out:
Marek Lindner32ae9b22011-04-20 15:40:58 +0200421 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200422 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000423 return NETDEV_TX_BUSY;
424}
425
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200426static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000427{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200428 struct batadv_hard_iface *hard_iface;
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200429 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200430 struct batadv_forw_packet *forw_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000431 struct sk_buff *skb1;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200432 struct net_device *soft_iface;
433 struct batadv_priv *bat_priv;
434
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200435 delayed_work = container_of(work, struct delayed_work, work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200436 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
437 delayed_work);
438 soft_iface = forw_packet->if_incoming->soft_iface;
439 bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000440
441 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
442 hlist_del(&forw_packet->list);
443 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
444
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200445 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000446 goto out;
447
Antonio Quartullic384ea32011-06-26 03:37:18 +0200448 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
449 goto out;
450
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000451 /* rebroadcast packet */
452 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200453 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000454 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000455 continue;
456
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100457 if (forw_packet->num_packets >= hard_iface->num_bcasts)
458 continue;
459
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000460 /* send a copy of the saved skb */
461 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
462 if (skb1)
Sven Eckelmann9455e342012-05-12 02:09:37 +0200463 batadv_send_skb_packet(skb1, hard_iface,
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200464 batadv_broadcast_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000465 }
466 rcu_read_unlock();
467
468 forw_packet->num_packets++;
469
470 /* if we still have some more bcasts to send */
Matthias Schiffercaf65bf2013-03-09 23:14:23 +0100471 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200472 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
473 msecs_to_jiffies(5));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000474 return;
475 }
476
477out:
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200478 batadv_forw_packet_free(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000479 atomic_inc(&bat_priv->bcast_queue_left);
480}
481
Sven Eckelmann9455e342012-05-12 02:09:37 +0200482void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000483{
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200484 struct delayed_work *delayed_work;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200485 struct batadv_forw_packet *forw_packet;
486 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000487
Sven Eckelmannbbb1f902012-07-08 17:13:15 +0200488 delayed_work = container_of(work, struct delayed_work, work);
Sven Eckelmann56303d32012-06-05 22:31:31 +0200489 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
490 delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000491 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
492 spin_lock_bh(&bat_priv->forw_bat_list_lock);
493 hlist_del(&forw_packet->list);
494 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
495
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200496 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000497 goto out;
498
Marek Lindner01c42242011-11-28 21:31:55 +0800499 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000500
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200501 /* we have to have at least one packet in the queue
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000502 * to determine the queues wake up time unless we are
503 * shutting down
504 */
505 if (forw_packet->own)
Sven Eckelmann9455e342012-05-12 02:09:37 +0200506 batadv_schedule_bat_ogm(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000507
508out:
509 /* don't count own packet */
510 if (!forw_packet->own)
511 atomic_inc(&bat_priv->batman_queue_left);
512
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200513 batadv_forw_packet_free(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000514}
515
Sven Eckelmann56303d32012-06-05 22:31:31 +0200516void
517batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
518 const struct batadv_hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000519{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200520 struct batadv_forw_packet *forw_packet;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800521 struct hlist_node *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200522 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000523
Marek Lindnere6c10f42011-02-18 12:33:20 +0000524 if (hard_iface)
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200525 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200526 "purge_outstanding_packets(): %s\n",
527 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000528 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200529 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200530 "purge_outstanding_packets()\n");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000531
532 /* free bcast list */
533 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800534 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000535 &bat_priv->forw_bcast_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200536 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000537 * we delete only packets belonging to the given interface
538 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000539 if ((hard_iface) &&
540 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000541 continue;
542
543 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
544
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200545 /* batadv_send_outstanding_bcast_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000546 * delete the item from the list
547 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200548 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000549 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200550
551 if (pending) {
552 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200553 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200554 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000555 }
556 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
557
558 /* free batman packet list */
559 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800560 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000561 &bat_priv->forw_bat_list, list) {
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200562 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000563 * we delete only packets belonging to the given interface
564 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000565 if ((hard_iface) &&
566 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000567 continue;
568
569 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
570
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200571 /* send_outstanding_bat_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000572 * delete the item from the list
573 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200574 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000575 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200576
577 if (pending) {
578 hlist_del(&forw_packet->list);
Sven Eckelmannbb079c82012-05-16 20:23:14 +0200579 batadv_forw_packet_free(forw_packet);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200580 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000581 }
582 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
583}