blob: b00a0f537b4e339cede6856beba6f012625ddfcb [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann64afe352011-01-27 10:38:15 +01002 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "send.h"
24#include "routing.h"
25#include "translation-table.h"
26#include "soft-interface.h"
27#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000028#include "vis.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000029#include "gateway_common.h"
30#include "originator.h"
Marek Lindnerb9dacc52011-08-03 09:09:30 +020031#include "bat_ogm.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000032
33static void send_outstanding_bcast_packet(struct work_struct *work);
34
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000035/* send out an already prepared packet to the given address via the
36 * specified batman interface */
Sven Eckelmann747e4222011-05-14 23:14:50 +020037int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
38 const uint8_t *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000039{
40 struct ethhdr *ethhdr;
41
Marek Lindnere6c10f42011-02-18 12:33:20 +000042 if (hard_iface->if_status != IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000043 goto send_skb_err;
44
Marek Lindnere6c10f42011-02-18 12:33:20 +000045 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000046 goto send_skb_err;
47
Marek Lindnere6c10f42011-02-18 12:33:20 +000048 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000049 pr_warning("Interface %s is not up - can't send packet via "
Marek Lindnere6c10f42011-02-18 12:33:20 +000050 "that interface!\n", hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000051 goto send_skb_err;
52 }
53
54 /* push to the ethernet header. */
Sven Eckelmann704509b2011-05-14 23:14:54 +020055 if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000056 goto send_skb_err;
57
58 skb_reset_mac_header(skb);
59
60 ethhdr = (struct ethhdr *) skb_mac_header(skb);
Marek Lindnere6c10f42011-02-18 12:33:20 +000061 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000062 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
63 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
64
65 skb_set_network_header(skb, ETH_HLEN);
66 skb->priority = TC_PRIO_CONTROL;
67 skb->protocol = __constant_htons(ETH_P_BATMAN);
68
Marek Lindnere6c10f42011-02-18 12:33:20 +000069 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000070
71 /* dev_queue_xmit() returns a negative result on error. However on
72 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
73 * (which is > 0). This will not be treated as an error. */
74
75 return dev_queue_xmit(skb);
76send_skb_err:
77 kfree_skb(skb);
78 return NET_XMIT_DROP;
79}
80
Antonio Quartullia73105b2011-04-27 14:27:44 +020081static void realloc_packet_buffer(struct hard_iface *hard_iface,
Marek Lindnerb6da4bf2011-07-29 17:31:50 +020082 int new_len)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000083{
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000084 unsigned char *new_buff;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000085
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000086 new_buff = kmalloc(new_len, GFP_ATOMIC);
87
88 /* keep old buffer if kmalloc should fail */
89 if (new_buff) {
Marek Lindnere6c10f42011-02-18 12:33:20 +000090 memcpy(new_buff, hard_iface->packet_buff,
Marek Lindnerb6da4bf2011-07-29 17:31:50 +020091 BATMAN_OGM_LEN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000092
Marek Lindnere6c10f42011-02-18 12:33:20 +000093 kfree(hard_iface->packet_buff);
94 hard_iface->packet_buff = new_buff;
95 hard_iface->packet_len = new_len;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000096 }
97}
98
Antonio Quartullia73105b2011-04-27 14:27:44 +020099/* when calling this function (hard_iface == primary_if) has to be true */
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200100static int prepare_packet_buffer(struct bat_priv *bat_priv,
Antonio Quartullia73105b2011-04-27 14:27:44 +0200101 struct hard_iface *hard_iface)
102{
103 int new_len;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200104
Marek Lindnerb6da4bf2011-07-29 17:31:50 +0200105 new_len = BATMAN_OGM_LEN +
Antonio Quartullia73105b2011-04-27 14:27:44 +0200106 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
107
108 /* if we have too many changes for one packet don't send any
109 * and wait for the tt table request which will be fragmented */
110 if (new_len > hard_iface->soft_iface->mtu)
Marek Lindnerb6da4bf2011-07-29 17:31:50 +0200111 new_len = BATMAN_OGM_LEN;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200112
113 realloc_packet_buffer(hard_iface, new_len);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200114
115 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
116
117 /* reset the sending counter */
118 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
119
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200120 return tt_changes_fill_buffer(bat_priv,
121 hard_iface->packet_buff + BATMAN_OGM_LEN,
122 hard_iface->packet_len - BATMAN_OGM_LEN);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200123}
124
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200125static int reset_packet_buffer(struct bat_priv *bat_priv,
Marek Lindnerb6da4bf2011-07-29 17:31:50 +0200126 struct hard_iface *hard_iface)
Antonio Quartullia73105b2011-04-27 14:27:44 +0200127{
Marek Lindnerb6da4bf2011-07-29 17:31:50 +0200128 realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200129 return 0;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200130}
131
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200132void schedule_bat_ogm(struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000133{
Marek Lindnere6c10f42011-02-18 12:33:20 +0000134 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200135 struct hard_iface *primary_if;
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200136 int tt_num_changes = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000137
Marek Lindnere6c10f42011-02-18 12:33:20 +0000138 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
139 (hard_iface->if_status == IF_TO_BE_REMOVED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000140 return;
141
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000142 /**
143 * the interface gets activated here to avoid race conditions between
144 * the moment of activating the interface in
145 * hardif_activate_interface() where the originator mac is set and
146 * outdated packets (especially uninitialized mac addresses) in the
147 * packet queue
148 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000149 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
150 hard_iface->if_status = IF_ACTIVE;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000151
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200152 primary_if = primary_if_get_selected(bat_priv);
153
Antonio Quartullia73105b2011-04-27 14:27:44 +0200154 if (hard_iface == primary_if) {
155 /* if at least one change happened */
156 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200157 tt_commit_changes(bat_priv);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200158 tt_num_changes = prepare_packet_buffer(bat_priv,
159 hard_iface);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200160 }
161
Antonio Quartulli015758d2011-07-09 17:52:13 +0200162 /* if the changes have been sent often enough */
Antonio Quartullia73105b2011-04-27 14:27:44 +0200163 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200164 tt_num_changes = reset_packet_buffer(bat_priv,
165 hard_iface);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200166 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000167
Marek Lindner32ae9b22011-04-20 15:40:58 +0200168 if (primary_if)
169 hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000170
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200171 bat_ogm_schedule(hard_iface, tt_num_changes);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000172}
173
174static void forw_packet_free(struct forw_packet *forw_packet)
175{
176 if (forw_packet->skb)
177 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200178 if (forw_packet->if_incoming)
179 hardif_free_ref(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000180 kfree(forw_packet);
181}
182
183static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
184 struct forw_packet *forw_packet,
185 unsigned long send_time)
186{
187 INIT_HLIST_NODE(&forw_packet->list);
188
189 /* add new packet to packet list */
190 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
191 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
192 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
193
194 /* start timer for this packet */
195 INIT_DELAYED_WORK(&forw_packet->delayed_work,
196 send_outstanding_bcast_packet);
197 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
198 send_time);
199}
200
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000201/* add a broadcast packet to the queue and setup timers. broadcast packets
Antonio Quartulli015758d2011-07-09 17:52:13 +0200202 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000203 *
204 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
205 * errors.
206 *
207 * The skb is not consumed, so the caller should make sure that the
208 * skb is freed. */
Sven Eckelmann747e4222011-05-14 23:14:50 +0200209int add_bcast_packet_to_list(struct bat_priv *bat_priv,
Antonio Quartulli86985292011-06-25 19:09:12 +0200210 const struct sk_buff *skb, unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000211{
Marek Lindner32ae9b22011-04-20 15:40:58 +0200212 struct hard_iface *primary_if = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000213 struct forw_packet *forw_packet;
214 struct bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200215 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000216
217 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
218 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
219 goto out;
220 }
221
Marek Lindner32ae9b22011-04-20 15:40:58 +0200222 primary_if = primary_if_get_selected(bat_priv);
223 if (!primary_if)
Marek Lindnerca06c6e2011-05-14 20:01:22 +0200224 goto out_and_inc;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000225
Sven Eckelmann704509b2011-05-14 23:14:54 +0200226 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000227
228 if (!forw_packet)
229 goto out_and_inc;
230
Sven Eckelmann747e4222011-05-14 23:14:50 +0200231 newskb = skb_copy(skb, GFP_ATOMIC);
232 if (!newskb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000233 goto packet_free;
234
235 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann747e4222011-05-14 23:14:50 +0200236 bcast_packet = (struct bcast_packet *)newskb->data;
Sven Eckelmann76543d12011-11-20 15:47:38 +0100237 bcast_packet->header.ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000238
Sven Eckelmann747e4222011-05-14 23:14:50 +0200239 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000240
Sven Eckelmann747e4222011-05-14 23:14:50 +0200241 forw_packet->skb = newskb;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200242 forw_packet->if_incoming = primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000243
244 /* how often did we send the bcast packet ? */
245 forw_packet->num_packets = 0;
246
Antonio Quartulli86985292011-06-25 19:09:12 +0200247 _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000248 return NETDEV_TX_OK;
249
250packet_free:
251 kfree(forw_packet);
252out_and_inc:
253 atomic_inc(&bat_priv->bcast_queue_left);
254out:
Marek Lindner32ae9b22011-04-20 15:40:58 +0200255 if (primary_if)
256 hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000257 return NETDEV_TX_BUSY;
258}
259
260static void send_outstanding_bcast_packet(struct work_struct *work)
261{
Marek Lindnere6c10f42011-02-18 12:33:20 +0000262 struct hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000263 struct delayed_work *delayed_work =
264 container_of(work, struct delayed_work, work);
265 struct forw_packet *forw_packet =
266 container_of(delayed_work, struct forw_packet, delayed_work);
267 struct sk_buff *skb1;
268 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
269 struct bat_priv *bat_priv = netdev_priv(soft_iface);
270
271 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
272 hlist_del(&forw_packet->list);
273 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
274
275 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
276 goto out;
277
278 /* rebroadcast packet */
279 rcu_read_lock();
Marek Lindnere6c10f42011-02-18 12:33:20 +0000280 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
281 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000282 continue;
283
284 /* send a copy of the saved skb */
285 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
286 if (skb1)
Marek Lindnere6c10f42011-02-18 12:33:20 +0000287 send_skb_packet(skb1, hard_iface, broadcast_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000288 }
289 rcu_read_unlock();
290
291 forw_packet->num_packets++;
292
293 /* if we still have some more bcasts to send */
294 if (forw_packet->num_packets < 3) {
295 _add_bcast_packet_to_list(bat_priv, forw_packet,
296 ((5 * HZ) / 1000));
297 return;
298 }
299
300out:
301 forw_packet_free(forw_packet);
302 atomic_inc(&bat_priv->bcast_queue_left);
303}
304
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200305void send_outstanding_bat_ogm_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000306{
307 struct delayed_work *delayed_work =
308 container_of(work, struct delayed_work, work);
309 struct forw_packet *forw_packet =
310 container_of(delayed_work, struct forw_packet, delayed_work);
311 struct bat_priv *bat_priv;
312
313 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
314 spin_lock_bh(&bat_priv->forw_bat_list_lock);
315 hlist_del(&forw_packet->list);
316 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
317
318 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
319 goto out;
320
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200321 bat_ogm_emit(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000322
323 /**
324 * we have to have at least one packet in the queue
325 * to determine the queues wake up time unless we are
326 * shutting down
327 */
328 if (forw_packet->own)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200329 schedule_bat_ogm(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000330
331out:
332 /* don't count own packet */
333 if (!forw_packet->own)
334 atomic_inc(&bat_priv->batman_queue_left);
335
336 forw_packet_free(forw_packet);
337}
338
339void purge_outstanding_packets(struct bat_priv *bat_priv,
Sven Eckelmann747e4222011-05-14 23:14:50 +0200340 const struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000341{
342 struct forw_packet *forw_packet;
343 struct hlist_node *tmp_node, *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200344 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000345
Marek Lindnere6c10f42011-02-18 12:33:20 +0000346 if (hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000347 bat_dbg(DBG_BATMAN, bat_priv,
348 "purge_outstanding_packets(): %s\n",
Marek Lindnere6c10f42011-02-18 12:33:20 +0000349 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000350 else
351 bat_dbg(DBG_BATMAN, bat_priv,
352 "purge_outstanding_packets()\n");
353
354 /* free bcast list */
355 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
356 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
357 &bat_priv->forw_bcast_list, list) {
358
359 /**
Antonio Quartulli015758d2011-07-09 17:52:13 +0200360 * if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000361 * we delete only packets belonging to the given interface
362 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000363 if ((hard_iface) &&
364 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000365 continue;
366
367 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
368
369 /**
370 * send_outstanding_bcast_packet() will lock the list to
371 * delete the item from the list
372 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200373 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000374 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200375
376 if (pending) {
377 hlist_del(&forw_packet->list);
378 forw_packet_free(forw_packet);
379 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000380 }
381 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
382
383 /* free batman packet list */
384 spin_lock_bh(&bat_priv->forw_bat_list_lock);
385 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
386 &bat_priv->forw_bat_list, list) {
387
388 /**
Antonio Quartulli015758d2011-07-09 17:52:13 +0200389 * if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000390 * we delete only packets belonging to the given interface
391 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000392 if ((hard_iface) &&
393 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000394 continue;
395
396 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
397
398 /**
399 * send_outstanding_bat_packet() will lock the list to
400 * delete the item from the list
401 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200402 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000403 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200404
405 if (pending) {
406 hlist_del(&forw_packet->list);
407 forw_packet_free(forw_packet);
408 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000409 }
410 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
411}