blob: af7a6741a685dbe8f06f7a844ef404b364736772 [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann567db7b2012-01-01 00:41:38 +01002 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "send.h"
24#include "routing.h"
25#include "translation-table.h"
26#include "soft-interface.h"
27#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000028#include "vis.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000029#include "gateway_common.h"
30#include "originator.h"
31
32static void send_outstanding_bcast_packet(struct work_struct *work);
33
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000034/* send out an already prepared packet to the given address via the
35 * specified batman interface */
Sven Eckelmann747e4222011-05-14 23:14:50 +020036int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
37 const uint8_t *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000038{
39 struct ethhdr *ethhdr;
40
Marek Lindnere6c10f42011-02-18 12:33:20 +000041 if (hard_iface->if_status != IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000042 goto send_skb_err;
43
Marek Lindnere6c10f42011-02-18 12:33:20 +000044 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000045 goto send_skb_err;
46
Marek Lindnere6c10f42011-02-18 12:33:20 +000047 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +010048 pr_warning("Interface %s is not up - can't send packet via that interface!\n",
49 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000050 goto send_skb_err;
51 }
52
53 /* push to the ethernet header. */
Sven Eckelmann704509b2011-05-14 23:14:54 +020054 if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000055 goto send_skb_err;
56
57 skb_reset_mac_header(skb);
58
Sven Eckelmann40e0c4f2012-03-07 09:07:48 +010059 ethhdr = (struct ethhdr *)skb_mac_header(skb);
Marek Lindnere6c10f42011-02-18 12:33:20 +000060 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000061 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
62 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
63
64 skb_set_network_header(skb, ETH_HLEN);
65 skb->priority = TC_PRIO_CONTROL;
66 skb->protocol = __constant_htons(ETH_P_BATMAN);
67
Marek Lindnere6c10f42011-02-18 12:33:20 +000068 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000069
70 /* dev_queue_xmit() returns a negative result on error. However on
71 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
72 * (which is > 0). This will not be treated as an error. */
73
74 return dev_queue_xmit(skb);
75send_skb_err:
76 kfree_skb(skb);
77 return NET_XMIT_DROP;
78}
79
Antonio Quartullia73105b2011-04-27 14:27:44 +020080static void realloc_packet_buffer(struct hard_iface *hard_iface,
Marek Lindnerb6da4bf2011-07-29 17:31:50 +020081 int new_len)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000082{
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000083 unsigned char *new_buff;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000084
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000085 new_buff = kmalloc(new_len, GFP_ATOMIC);
86
87 /* keep old buffer if kmalloc should fail */
88 if (new_buff) {
Marek Lindnere6c10f42011-02-18 12:33:20 +000089 memcpy(new_buff, hard_iface->packet_buff,
Marek Lindnerb6da4bf2011-07-29 17:31:50 +020090 BATMAN_OGM_LEN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000091
Marek Lindnere6c10f42011-02-18 12:33:20 +000092 kfree(hard_iface->packet_buff);
93 hard_iface->packet_buff = new_buff;
94 hard_iface->packet_len = new_len;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000095 }
96}
97
Antonio Quartullia73105b2011-04-27 14:27:44 +020098/* when calling this function (hard_iface == primary_if) has to be true */
Marek Lindnerb9dacc52011-08-03 09:09:30 +020099static int prepare_packet_buffer(struct bat_priv *bat_priv,
Antonio Quartullia73105b2011-04-27 14:27:44 +0200100 struct hard_iface *hard_iface)
101{
102 int new_len;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200103
Marek Lindnerb6da4bf2011-07-29 17:31:50 +0200104 new_len = BATMAN_OGM_LEN +
Antonio Quartullia73105b2011-04-27 14:27:44 +0200105 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
106
107 /* if we have too many changes for one packet don't send any
108 * and wait for the tt table request which will be fragmented */
109 if (new_len > hard_iface->soft_iface->mtu)
Marek Lindnerb6da4bf2011-07-29 17:31:50 +0200110 new_len = BATMAN_OGM_LEN;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200111
112 realloc_packet_buffer(hard_iface, new_len);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200113
114 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
115
116 /* reset the sending counter */
117 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
118
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200119 return tt_changes_fill_buffer(bat_priv,
120 hard_iface->packet_buff + BATMAN_OGM_LEN,
121 hard_iface->packet_len - BATMAN_OGM_LEN);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200122}
123
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200124static int reset_packet_buffer(struct bat_priv *bat_priv,
Marek Lindnerb6da4bf2011-07-29 17:31:50 +0200125 struct hard_iface *hard_iface)
Antonio Quartullia73105b2011-04-27 14:27:44 +0200126{
Marek Lindnerb6da4bf2011-07-29 17:31:50 +0200127 realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200128 return 0;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200129}
130
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200131void schedule_bat_ogm(struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000132{
Marek Lindnere6c10f42011-02-18 12:33:20 +0000133 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200134 struct hard_iface *primary_if;
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200135 int tt_num_changes = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000136
Marek Lindnere6c10f42011-02-18 12:33:20 +0000137 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
138 (hard_iface->if_status == IF_TO_BE_REMOVED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000139 return;
140
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000141 /**
142 * the interface gets activated here to avoid race conditions between
143 * the moment of activating the interface in
144 * hardif_activate_interface() where the originator mac is set and
145 * outdated packets (especially uninitialized mac addresses) in the
146 * packet queue
147 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000148 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
149 hard_iface->if_status = IF_ACTIVE;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000150
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200151 primary_if = primary_if_get_selected(bat_priv);
152
Antonio Quartullia73105b2011-04-27 14:27:44 +0200153 if (hard_iface == primary_if) {
154 /* if at least one change happened */
155 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
Antonio Quartulli058d0e22011-07-07 01:40:58 +0200156 tt_commit_changes(bat_priv);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200157 tt_num_changes = prepare_packet_buffer(bat_priv,
158 hard_iface);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200159 }
160
Antonio Quartulli015758d2011-07-09 17:52:13 +0200161 /* if the changes have been sent often enough */
Antonio Quartullia73105b2011-04-27 14:27:44 +0200162 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200163 tt_num_changes = reset_packet_buffer(bat_priv,
164 hard_iface);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200165 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000166
Marek Lindner32ae9b22011-04-20 15:40:58 +0200167 if (primary_if)
168 hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000169
Marek Lindner01c42242011-11-28 21:31:55 +0800170 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000171}
172
173static void forw_packet_free(struct forw_packet *forw_packet)
174{
175 if (forw_packet->skb)
176 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200177 if (forw_packet->if_incoming)
178 hardif_free_ref(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000179 kfree(forw_packet);
180}
181
182static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
183 struct forw_packet *forw_packet,
184 unsigned long send_time)
185{
186 INIT_HLIST_NODE(&forw_packet->list);
187
188 /* add new packet to packet list */
189 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
190 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
191 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
192
193 /* start timer for this packet */
194 INIT_DELAYED_WORK(&forw_packet->delayed_work,
195 send_outstanding_bcast_packet);
196 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
197 send_time);
198}
199
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000200/* add a broadcast packet to the queue and setup timers. broadcast packets
Antonio Quartulli015758d2011-07-09 17:52:13 +0200201 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000202 *
203 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
204 * errors.
205 *
206 * The skb is not consumed, so the caller should make sure that the
207 * skb is freed. */
Sven Eckelmann747e4222011-05-14 23:14:50 +0200208int add_bcast_packet_to_list(struct bat_priv *bat_priv,
Antonio Quartulli86985292011-06-25 19:09:12 +0200209 const struct sk_buff *skb, unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000210{
Marek Lindner32ae9b22011-04-20 15:40:58 +0200211 struct hard_iface *primary_if = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000212 struct forw_packet *forw_packet;
213 struct bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200214 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000215
216 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
217 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
218 goto out;
219 }
220
Marek Lindner32ae9b22011-04-20 15:40:58 +0200221 primary_if = primary_if_get_selected(bat_priv);
222 if (!primary_if)
Marek Lindnerca06c6e2011-05-14 20:01:22 +0200223 goto out_and_inc;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000224
Sven Eckelmann704509b2011-05-14 23:14:54 +0200225 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000226
227 if (!forw_packet)
228 goto out_and_inc;
229
Sven Eckelmann747e4222011-05-14 23:14:50 +0200230 newskb = skb_copy(skb, GFP_ATOMIC);
231 if (!newskb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000232 goto packet_free;
233
234 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann747e4222011-05-14 23:14:50 +0200235 bcast_packet = (struct bcast_packet *)newskb->data;
Sven Eckelmann76543d12011-11-20 15:47:38 +0100236 bcast_packet->header.ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000237
Sven Eckelmann747e4222011-05-14 23:14:50 +0200238 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000239
Sven Eckelmann747e4222011-05-14 23:14:50 +0200240 forw_packet->skb = newskb;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200241 forw_packet->if_incoming = primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000242
243 /* how often did we send the bcast packet ? */
244 forw_packet->num_packets = 0;
245
Antonio Quartulli86985292011-06-25 19:09:12 +0200246 _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000247 return NETDEV_TX_OK;
248
249packet_free:
250 kfree(forw_packet);
251out_and_inc:
252 atomic_inc(&bat_priv->bcast_queue_left);
253out:
Marek Lindner32ae9b22011-04-20 15:40:58 +0200254 if (primary_if)
255 hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000256 return NETDEV_TX_BUSY;
257}
258
259static void send_outstanding_bcast_packet(struct work_struct *work)
260{
Marek Lindnere6c10f42011-02-18 12:33:20 +0000261 struct hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000262 struct delayed_work *delayed_work =
263 container_of(work, struct delayed_work, work);
264 struct forw_packet *forw_packet =
265 container_of(delayed_work, struct forw_packet, delayed_work);
266 struct sk_buff *skb1;
267 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
268 struct bat_priv *bat_priv = netdev_priv(soft_iface);
269
270 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
271 hlist_del(&forw_packet->list);
272 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
273
274 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
275 goto out;
276
277 /* rebroadcast packet */
278 rcu_read_lock();
Marek Lindnere6c10f42011-02-18 12:33:20 +0000279 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
280 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000281 continue;
282
283 /* send a copy of the saved skb */
284 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
285 if (skb1)
Marek Lindnere6c10f42011-02-18 12:33:20 +0000286 send_skb_packet(skb1, hard_iface, broadcast_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000287 }
288 rcu_read_unlock();
289
290 forw_packet->num_packets++;
291
292 /* if we still have some more bcasts to send */
293 if (forw_packet->num_packets < 3) {
294 _add_bcast_packet_to_list(bat_priv, forw_packet,
295 ((5 * HZ) / 1000));
296 return;
297 }
298
299out:
300 forw_packet_free(forw_packet);
301 atomic_inc(&bat_priv->bcast_queue_left);
302}
303
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200304void send_outstanding_bat_ogm_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000305{
306 struct delayed_work *delayed_work =
307 container_of(work, struct delayed_work, work);
308 struct forw_packet *forw_packet =
309 container_of(delayed_work, struct forw_packet, delayed_work);
310 struct bat_priv *bat_priv;
311
312 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
313 spin_lock_bh(&bat_priv->forw_bat_list_lock);
314 hlist_del(&forw_packet->list);
315 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
316
317 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
318 goto out;
319
Marek Lindner01c42242011-11-28 21:31:55 +0800320 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000321
322 /**
323 * we have to have at least one packet in the queue
324 * to determine the queues wake up time unless we are
325 * shutting down
326 */
327 if (forw_packet->own)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200328 schedule_bat_ogm(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000329
330out:
331 /* don't count own packet */
332 if (!forw_packet->own)
333 atomic_inc(&bat_priv->batman_queue_left);
334
335 forw_packet_free(forw_packet);
336}
337
338void purge_outstanding_packets(struct bat_priv *bat_priv,
Sven Eckelmann747e4222011-05-14 23:14:50 +0200339 const struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000340{
341 struct forw_packet *forw_packet;
342 struct hlist_node *tmp_node, *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200343 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000344
Marek Lindnere6c10f42011-02-18 12:33:20 +0000345 if (hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000346 bat_dbg(DBG_BATMAN, bat_priv,
347 "purge_outstanding_packets(): %s\n",
Marek Lindnere6c10f42011-02-18 12:33:20 +0000348 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000349 else
350 bat_dbg(DBG_BATMAN, bat_priv,
351 "purge_outstanding_packets()\n");
352
353 /* free bcast list */
354 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
355 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
356 &bat_priv->forw_bcast_list, list) {
357
358 /**
Antonio Quartulli015758d2011-07-09 17:52:13 +0200359 * if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000360 * we delete only packets belonging to the given interface
361 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000362 if ((hard_iface) &&
363 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000364 continue;
365
366 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
367
368 /**
369 * send_outstanding_bcast_packet() will lock the list to
370 * delete the item from the list
371 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200372 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000373 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200374
375 if (pending) {
376 hlist_del(&forw_packet->list);
377 forw_packet_free(forw_packet);
378 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000379 }
380 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
381
382 /* free batman packet list */
383 spin_lock_bh(&bat_priv->forw_bat_list_lock);
384 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
385 &bat_priv->forw_bat_list, list) {
386
387 /**
Antonio Quartulli015758d2011-07-09 17:52:13 +0200388 * if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000389 * we delete only packets belonging to the given interface
390 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000391 if ((hard_iface) &&
392 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000393 continue;
394
395 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
396
397 /**
398 * send_outstanding_bat_packet() will lock the list to
399 * delete the item from the list
400 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200401 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000402 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200403
404 if (pending) {
405 hlist_del(&forw_packet->list);
406 forw_packet_free(forw_packet);
407 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000408 }
409 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
410}