blob: 54091db9d5ffc2724766de05d30019f5acb23f9c [file] [log] [blame]
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018 */
19
20#include "main.h"
21#include "send.h"
22#include "routing.h"
23#include "translation-table.h"
24#include "soft-interface.h"
25#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000026#include "vis.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000027#include "gateway_common.h"
28#include "originator.h"
29
30static void send_outstanding_bcast_packet(struct work_struct *work);
31
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000032/* send out an already prepared packet to the given address via the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020033 * specified batman interface
34 */
Sven Eckelmann9455e342012-05-12 02:09:37 +020035int batadv_send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
36 const uint8_t *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000037{
38 struct ethhdr *ethhdr;
39
Marek Lindnere6c10f42011-02-18 12:33:20 +000040 if (hard_iface->if_status != IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000041 goto send_skb_err;
42
Marek Lindnere6c10f42011-02-18 12:33:20 +000043 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044 goto send_skb_err;
45
Marek Lindnere6c10f42011-02-18 12:33:20 +000046 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann67969582012-03-26 16:22:45 +020047 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
48 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000049 goto send_skb_err;
50 }
51
52 /* push to the ethernet header. */
Sven Eckelmann04b482a2012-05-12 02:09:38 +020053 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000054 goto send_skb_err;
55
56 skb_reset_mac_header(skb);
57
Sven Eckelmann40e0c4f2012-03-07 09:07:48 +010058 ethhdr = (struct ethhdr *)skb_mac_header(skb);
Marek Lindnere6c10f42011-02-18 12:33:20 +000059 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000060 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
61 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
62
63 skb_set_network_header(skb, ETH_HLEN);
64 skb->priority = TC_PRIO_CONTROL;
65 skb->protocol = __constant_htons(ETH_P_BATMAN);
66
Marek Lindnere6c10f42011-02-18 12:33:20 +000067 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000068
69 /* dev_queue_xmit() returns a negative result on error. However on
70 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020071 * (which is > 0). This will not be treated as an error.
72 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000073 return dev_queue_xmit(skb);
74send_skb_err:
75 kfree_skb(skb);
76 return NET_XMIT_DROP;
77}
78
Sven Eckelmann9455e342012-05-12 02:09:37 +020079void batadv_schedule_bat_ogm(struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000080{
Marek Lindnere6c10f42011-02-18 12:33:20 +000081 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000082
Marek Lindnere6c10f42011-02-18 12:33:20 +000083 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
84 (hard_iface->if_status == IF_TO_BE_REMOVED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000085 return;
86
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020087 /* the interface gets activated here to avoid race conditions between
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000088 * the moment of activating the interface in
89 * hardif_activate_interface() where the originator mac is set and
90 * outdated packets (especially uninitialized mac addresses) in the
91 * packet queue
92 */
Marek Lindnere6c10f42011-02-18 12:33:20 +000093 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
94 hard_iface->if_status = IF_ACTIVE;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000095
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +080096 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000097}
98
99static void forw_packet_free(struct forw_packet *forw_packet)
100{
101 if (forw_packet->skb)
102 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200103 if (forw_packet->if_incoming)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200104 batadv_hardif_free_ref(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000105 kfree(forw_packet);
106}
107
108static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
109 struct forw_packet *forw_packet,
110 unsigned long send_time)
111{
112 INIT_HLIST_NODE(&forw_packet->list);
113
114 /* add new packet to packet list */
115 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
116 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
117 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
118
119 /* start timer for this packet */
120 INIT_DELAYED_WORK(&forw_packet->delayed_work,
121 send_outstanding_bcast_packet);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200122 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000123 send_time);
124}
125
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000126/* add a broadcast packet to the queue and setup timers. broadcast packets
Antonio Quartulli015758d2011-07-09 17:52:13 +0200127 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000128 *
129 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
130 * errors.
131 *
132 * The skb is not consumed, so the caller should make sure that the
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200133 * skb is freed.
134 */
Sven Eckelmann9455e342012-05-12 02:09:37 +0200135int batadv_add_bcast_packet_to_list(struct bat_priv *bat_priv,
136 const struct sk_buff *skb,
137 unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000138{
Marek Lindner32ae9b22011-04-20 15:40:58 +0200139 struct hard_iface *primary_if = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000140 struct forw_packet *forw_packet;
141 struct bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200142 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000143
144 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
145 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
146 goto out;
147 }
148
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200149 primary_if = batadv_primary_if_get_selected(bat_priv);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200150 if (!primary_if)
Marek Lindnerca06c6e2011-05-14 20:01:22 +0200151 goto out_and_inc;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000152
Sven Eckelmann704509b2011-05-14 23:14:54 +0200153 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000154
155 if (!forw_packet)
156 goto out_and_inc;
157
Sven Eckelmann747e4222011-05-14 23:14:50 +0200158 newskb = skb_copy(skb, GFP_ATOMIC);
159 if (!newskb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000160 goto packet_free;
161
162 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann747e4222011-05-14 23:14:50 +0200163 bcast_packet = (struct bcast_packet *)newskb->data;
Sven Eckelmann76543d12011-11-20 15:47:38 +0100164 bcast_packet->header.ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000165
Sven Eckelmann747e4222011-05-14 23:14:50 +0200166 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000167
Sven Eckelmann747e4222011-05-14 23:14:50 +0200168 forw_packet->skb = newskb;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200169 forw_packet->if_incoming = primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000170
171 /* how often did we send the bcast packet ? */
172 forw_packet->num_packets = 0;
173
Antonio Quartulli86985292011-06-25 19:09:12 +0200174 _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000175 return NETDEV_TX_OK;
176
177packet_free:
178 kfree(forw_packet);
179out_and_inc:
180 atomic_inc(&bat_priv->bcast_queue_left);
181out:
Marek Lindner32ae9b22011-04-20 15:40:58 +0200182 if (primary_if)
Sven Eckelmanne5d89252012-05-12 13:48:54 +0200183 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000184 return NETDEV_TX_BUSY;
185}
186
187static void send_outstanding_bcast_packet(struct work_struct *work)
188{
Marek Lindnere6c10f42011-02-18 12:33:20 +0000189 struct hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190 struct delayed_work *delayed_work =
191 container_of(work, struct delayed_work, work);
192 struct forw_packet *forw_packet =
193 container_of(delayed_work, struct forw_packet, delayed_work);
194 struct sk_buff *skb1;
195 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
196 struct bat_priv *bat_priv = netdev_priv(soft_iface);
197
198 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
199 hlist_del(&forw_packet->list);
200 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
201
202 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
203 goto out;
204
205 /* rebroadcast packet */
206 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200207 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000208 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000209 continue;
210
211 /* send a copy of the saved skb */
212 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
213 if (skb1)
Sven Eckelmann9455e342012-05-12 02:09:37 +0200214 batadv_send_skb_packet(skb1, hard_iface,
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200215 batadv_broadcast_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000216 }
217 rcu_read_unlock();
218
219 forw_packet->num_packets++;
220
221 /* if we still have some more bcasts to send */
222 if (forw_packet->num_packets < 3) {
223 _add_bcast_packet_to_list(bat_priv, forw_packet,
Marek Lindner0b0094e2012-03-01 15:35:20 +0800224 msecs_to_jiffies(5));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000225 return;
226 }
227
228out:
229 forw_packet_free(forw_packet);
230 atomic_inc(&bat_priv->bcast_queue_left);
231}
232
Sven Eckelmann9455e342012-05-12 02:09:37 +0200233void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000234{
235 struct delayed_work *delayed_work =
236 container_of(work, struct delayed_work, work);
237 struct forw_packet *forw_packet =
238 container_of(delayed_work, struct forw_packet, delayed_work);
239 struct bat_priv *bat_priv;
240
241 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
242 spin_lock_bh(&bat_priv->forw_bat_list_lock);
243 hlist_del(&forw_packet->list);
244 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
245
246 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
247 goto out;
248
Marek Lindner01c42242011-11-28 21:31:55 +0800249 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000250
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200251 /* we have to have at least one packet in the queue
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000252 * to determine the queues wake up time unless we are
253 * shutting down
254 */
255 if (forw_packet->own)
Sven Eckelmann9455e342012-05-12 02:09:37 +0200256 batadv_schedule_bat_ogm(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000257
258out:
259 /* don't count own packet */
260 if (!forw_packet->own)
261 atomic_inc(&bat_priv->batman_queue_left);
262
263 forw_packet_free(forw_packet);
264}
265
Sven Eckelmann9455e342012-05-12 02:09:37 +0200266void batadv_purge_outstanding_packets(struct bat_priv *bat_priv,
267 const struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000268{
269 struct forw_packet *forw_packet;
270 struct hlist_node *tmp_node, *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200271 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000272
Marek Lindnere6c10f42011-02-18 12:33:20 +0000273 if (hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000274 bat_dbg(DBG_BATMAN, bat_priv,
275 "purge_outstanding_packets(): %s\n",
Marek Lindnere6c10f42011-02-18 12:33:20 +0000276 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000277 else
278 bat_dbg(DBG_BATMAN, bat_priv,
279 "purge_outstanding_packets()\n");
280
281 /* free bcast list */
282 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
283 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
284 &bat_priv->forw_bcast_list, list) {
285
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200286 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000287 * we delete only packets belonging to the given interface
288 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000289 if ((hard_iface) &&
290 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000291 continue;
292
293 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
294
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200295 /* send_outstanding_bcast_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000296 * delete the item from the list
297 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200298 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000299 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200300
301 if (pending) {
302 hlist_del(&forw_packet->list);
303 forw_packet_free(forw_packet);
304 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000305 }
306 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
307
308 /* free batman packet list */
309 spin_lock_bh(&bat_priv->forw_bat_list_lock);
310 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
311 &bat_priv->forw_bat_list, list) {
312
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200313 /* if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000314 * we delete only packets belonging to the given interface
315 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000316 if ((hard_iface) &&
317 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000318 continue;
319
320 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
321
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200322 /* send_outstanding_bat_packet() will lock the list to
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000323 * delete the item from the list
324 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200325 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000326 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200327
328 if (pending) {
329 hlist_del(&forw_packet->list);
330 forw_packet_free(forw_packet);
331 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000332 }
333 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
334}