Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 1 | /* |
Sven Eckelmann | 567db7b | 2012-01-01 00:41:38 +0100 | [diff] [blame] | 2 | * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 3 | * |
| 4 | * Marek Lindner, Simon Wunderlich |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of version 2 of the GNU General Public |
| 8 | * License as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
| 18 | * 02110-1301, USA |
| 19 | * |
| 20 | */ |
| 21 | |
| 22 | #include "main.h" |
| 23 | #include "send.h" |
| 24 | #include "routing.h" |
| 25 | #include "translation-table.h" |
| 26 | #include "soft-interface.h" |
| 27 | #include "hard-interface.h" |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 28 | #include "vis.h" |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 29 | #include "gateway_common.h" |
| 30 | #include "originator.h" |
| 31 | |
| 32 | static void send_outstanding_bcast_packet(struct work_struct *work); |
| 33 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 34 | /* send out an already prepared packet to the given address via the |
| 35 | * specified batman interface */ |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 36 | int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, |
| 37 | const uint8_t *dst_addr) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 38 | { |
| 39 | struct ethhdr *ethhdr; |
| 40 | |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 41 | if (hard_iface->if_status != IF_ACTIVE) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 42 | goto send_skb_err; |
| 43 | |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 44 | if (unlikely(!hard_iface->net_dev)) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 45 | goto send_skb_err; |
| 46 | |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 47 | if (!(hard_iface->net_dev->flags & IFF_UP)) { |
Sven Eckelmann | 86ceb36 | 2012-03-07 09:07:45 +0100 | [diff] [blame] | 48 | pr_warning("Interface %s is not up - can't send packet via that interface!\n", |
| 49 | hard_iface->net_dev->name); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 50 | goto send_skb_err; |
| 51 | } |
| 52 | |
| 53 | /* push to the ethernet header. */ |
Sven Eckelmann | 704509b | 2011-05-14 23:14:54 +0200 | [diff] [blame] | 54 | if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 55 | goto send_skb_err; |
| 56 | |
| 57 | skb_reset_mac_header(skb); |
| 58 | |
Sven Eckelmann | 40e0c4f | 2012-03-07 09:07:48 +0100 | [diff] [blame] | 59 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 60 | memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 61 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); |
| 62 | ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); |
| 63 | |
| 64 | skb_set_network_header(skb, ETH_HLEN); |
| 65 | skb->priority = TC_PRIO_CONTROL; |
| 66 | skb->protocol = __constant_htons(ETH_P_BATMAN); |
| 67 | |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 68 | skb->dev = hard_iface->net_dev; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 69 | |
| 70 | /* dev_queue_xmit() returns a negative result on error. However on |
| 71 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP |
| 72 | * (which is > 0). This will not be treated as an error. */ |
| 73 | |
| 74 | return dev_queue_xmit(skb); |
| 75 | send_skb_err: |
| 76 | kfree_skb(skb); |
| 77 | return NET_XMIT_DROP; |
| 78 | } |
| 79 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 80 | static void realloc_packet_buffer(struct hard_iface *hard_iface, |
Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 81 | int new_len) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 82 | { |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 83 | unsigned char *new_buff; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 84 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 85 | new_buff = kmalloc(new_len, GFP_ATOMIC); |
| 86 | |
| 87 | /* keep old buffer if kmalloc should fail */ |
| 88 | if (new_buff) { |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 89 | memcpy(new_buff, hard_iface->packet_buff, |
Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 90 | BATMAN_OGM_LEN); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 91 | |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 92 | kfree(hard_iface->packet_buff); |
| 93 | hard_iface->packet_buff = new_buff; |
| 94 | hard_iface->packet_len = new_len; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 95 | } |
| 96 | } |
| 97 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 98 | /* when calling this function (hard_iface == primary_if) has to be true */ |
Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 99 | static int prepare_packet_buffer(struct bat_priv *bat_priv, |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 100 | struct hard_iface *hard_iface) |
| 101 | { |
| 102 | int new_len; |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 103 | |
Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 104 | new_len = BATMAN_OGM_LEN + |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 105 | tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); |
| 106 | |
| 107 | /* if we have too many changes for one packet don't send any |
| 108 | * and wait for the tt table request which will be fragmented */ |
| 109 | if (new_len > hard_iface->soft_iface->mtu) |
Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 110 | new_len = BATMAN_OGM_LEN; |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 111 | |
| 112 | realloc_packet_buffer(hard_iface, new_len); |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 113 | |
| 114 | atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv)); |
| 115 | |
| 116 | /* reset the sending counter */ |
| 117 | atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); |
| 118 | |
Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 119 | return tt_changes_fill_buffer(bat_priv, |
| 120 | hard_iface->packet_buff + BATMAN_OGM_LEN, |
| 121 | hard_iface->packet_len - BATMAN_OGM_LEN); |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 122 | } |
| 123 | |
Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 124 | static int reset_packet_buffer(struct bat_priv *bat_priv, |
Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 125 | struct hard_iface *hard_iface) |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 126 | { |
Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 127 | realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN); |
Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 128 | return 0; |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 129 | } |
| 130 | |
Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 131 | void schedule_bat_ogm(struct hard_iface *hard_iface) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 132 | { |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 133 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 134 | struct hard_iface *primary_if; |
Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 135 | int tt_num_changes = -1; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 136 | |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 137 | if ((hard_iface->if_status == IF_NOT_IN_USE) || |
| 138 | (hard_iface->if_status == IF_TO_BE_REMOVED)) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 139 | return; |
| 140 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 141 | /** |
| 142 | * the interface gets activated here to avoid race conditions between |
| 143 | * the moment of activating the interface in |
| 144 | * hardif_activate_interface() where the originator mac is set and |
| 145 | * outdated packets (especially uninitialized mac addresses) in the |
| 146 | * packet queue |
| 147 | */ |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 148 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) |
| 149 | hard_iface->if_status = IF_ACTIVE; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 150 | |
Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 151 | primary_if = primary_if_get_selected(bat_priv); |
| 152 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 153 | if (hard_iface == primary_if) { |
| 154 | /* if at least one change happened */ |
| 155 | if (atomic_read(&bat_priv->tt_local_changes) > 0) { |
Antonio Quartulli | 058d0e2 | 2011-07-07 01:40:58 +0200 | [diff] [blame] | 156 | tt_commit_changes(bat_priv); |
Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 157 | tt_num_changes = prepare_packet_buffer(bat_priv, |
| 158 | hard_iface); |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 159 | } |
| 160 | |
Antonio Quartulli | 015758d | 2011-07-09 17:52:13 +0200 | [diff] [blame] | 161 | /* if the changes have been sent often enough */ |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 162 | if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt)) |
Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 163 | tt_num_changes = reset_packet_buffer(bat_priv, |
| 164 | hard_iface); |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 165 | } |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 166 | |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 167 | if (primary_if) |
| 168 | hardif_free_ref(primary_if); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 169 | |
Marek Lindner | 01c4224 | 2011-11-28 21:31:55 +0800 | [diff] [blame] | 170 | bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 171 | } |
| 172 | |
| 173 | static void forw_packet_free(struct forw_packet *forw_packet) |
| 174 | { |
| 175 | if (forw_packet->skb) |
| 176 | kfree_skb(forw_packet->skb); |
Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 177 | if (forw_packet->if_incoming) |
| 178 | hardif_free_ref(forw_packet->if_incoming); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 179 | kfree(forw_packet); |
| 180 | } |
| 181 | |
| 182 | static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, |
| 183 | struct forw_packet *forw_packet, |
| 184 | unsigned long send_time) |
| 185 | { |
| 186 | INIT_HLIST_NODE(&forw_packet->list); |
| 187 | |
| 188 | /* add new packet to packet list */ |
| 189 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
| 190 | hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list); |
| 191 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
| 192 | |
| 193 | /* start timer for this packet */ |
| 194 | INIT_DELAYED_WORK(&forw_packet->delayed_work, |
| 195 | send_outstanding_bcast_packet); |
| 196 | queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work, |
| 197 | send_time); |
| 198 | } |
| 199 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 200 | /* add a broadcast packet to the queue and setup timers. broadcast packets |
Antonio Quartulli | 015758d | 2011-07-09 17:52:13 +0200 | [diff] [blame] | 201 | * are sent multiple times to increase probability for being received. |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 202 | * |
| 203 | * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on |
| 204 | * errors. |
| 205 | * |
| 206 | * The skb is not consumed, so the caller should make sure that the |
| 207 | * skb is freed. */ |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 208 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, |
Antonio Quartulli | 8698529 | 2011-06-25 19:09:12 +0200 | [diff] [blame] | 209 | const struct sk_buff *skb, unsigned long delay) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 210 | { |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 211 | struct hard_iface *primary_if = NULL; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 212 | struct forw_packet *forw_packet; |
| 213 | struct bcast_packet *bcast_packet; |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 214 | struct sk_buff *newskb; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 215 | |
| 216 | if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { |
| 217 | bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); |
| 218 | goto out; |
| 219 | } |
| 220 | |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 221 | primary_if = primary_if_get_selected(bat_priv); |
| 222 | if (!primary_if) |
Marek Lindner | ca06c6e | 2011-05-14 20:01:22 +0200 | [diff] [blame] | 223 | goto out_and_inc; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 224 | |
Sven Eckelmann | 704509b | 2011-05-14 23:14:54 +0200 | [diff] [blame] | 225 | forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 226 | |
| 227 | if (!forw_packet) |
| 228 | goto out_and_inc; |
| 229 | |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 230 | newskb = skb_copy(skb, GFP_ATOMIC); |
| 231 | if (!newskb) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 232 | goto packet_free; |
| 233 | |
| 234 | /* as we have a copy now, it is safe to decrease the TTL */ |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 235 | bcast_packet = (struct bcast_packet *)newskb->data; |
Sven Eckelmann | 76543d1 | 2011-11-20 15:47:38 +0100 | [diff] [blame] | 236 | bcast_packet->header.ttl--; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 237 | |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 238 | skb_reset_mac_header(newskb); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 239 | |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 240 | forw_packet->skb = newskb; |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 241 | forw_packet->if_incoming = primary_if; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 242 | |
| 243 | /* how often did we send the bcast packet ? */ |
| 244 | forw_packet->num_packets = 0; |
| 245 | |
Antonio Quartulli | 8698529 | 2011-06-25 19:09:12 +0200 | [diff] [blame] | 246 | _add_bcast_packet_to_list(bat_priv, forw_packet, delay); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 247 | return NETDEV_TX_OK; |
| 248 | |
| 249 | packet_free: |
| 250 | kfree(forw_packet); |
| 251 | out_and_inc: |
| 252 | atomic_inc(&bat_priv->bcast_queue_left); |
| 253 | out: |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 254 | if (primary_if) |
| 255 | hardif_free_ref(primary_if); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 256 | return NETDEV_TX_BUSY; |
| 257 | } |
| 258 | |
| 259 | static void send_outstanding_bcast_packet(struct work_struct *work) |
| 260 | { |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 261 | struct hard_iface *hard_iface; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 262 | struct delayed_work *delayed_work = |
| 263 | container_of(work, struct delayed_work, work); |
| 264 | struct forw_packet *forw_packet = |
| 265 | container_of(delayed_work, struct forw_packet, delayed_work); |
| 266 | struct sk_buff *skb1; |
| 267 | struct net_device *soft_iface = forw_packet->if_incoming->soft_iface; |
| 268 | struct bat_priv *bat_priv = netdev_priv(soft_iface); |
| 269 | |
| 270 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
| 271 | hlist_del(&forw_packet->list); |
| 272 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
| 273 | |
| 274 | if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) |
| 275 | goto out; |
| 276 | |
| 277 | /* rebroadcast packet */ |
| 278 | rcu_read_lock(); |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 279 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
| 280 | if (hard_iface->soft_iface != soft_iface) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 281 | continue; |
| 282 | |
| 283 | /* send a copy of the saved skb */ |
| 284 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); |
| 285 | if (skb1) |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 286 | send_skb_packet(skb1, hard_iface, broadcast_addr); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 287 | } |
| 288 | rcu_read_unlock(); |
| 289 | |
| 290 | forw_packet->num_packets++; |
| 291 | |
| 292 | /* if we still have some more bcasts to send */ |
| 293 | if (forw_packet->num_packets < 3) { |
| 294 | _add_bcast_packet_to_list(bat_priv, forw_packet, |
| 295 | ((5 * HZ) / 1000)); |
| 296 | return; |
| 297 | } |
| 298 | |
| 299 | out: |
| 300 | forw_packet_free(forw_packet); |
| 301 | atomic_inc(&bat_priv->bcast_queue_left); |
| 302 | } |
| 303 | |
Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 304 | void send_outstanding_bat_ogm_packet(struct work_struct *work) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 305 | { |
| 306 | struct delayed_work *delayed_work = |
| 307 | container_of(work, struct delayed_work, work); |
| 308 | struct forw_packet *forw_packet = |
| 309 | container_of(delayed_work, struct forw_packet, delayed_work); |
| 310 | struct bat_priv *bat_priv; |
| 311 | |
| 312 | bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); |
| 313 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
| 314 | hlist_del(&forw_packet->list); |
| 315 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |
| 316 | |
| 317 | if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) |
| 318 | goto out; |
| 319 | |
Marek Lindner | 01c4224 | 2011-11-28 21:31:55 +0800 | [diff] [blame] | 320 | bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 321 | |
| 322 | /** |
| 323 | * we have to have at least one packet in the queue |
| 324 | * to determine the queues wake up time unless we are |
| 325 | * shutting down |
| 326 | */ |
| 327 | if (forw_packet->own) |
Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 328 | schedule_bat_ogm(forw_packet->if_incoming); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 329 | |
| 330 | out: |
| 331 | /* don't count own packet */ |
| 332 | if (!forw_packet->own) |
| 333 | atomic_inc(&bat_priv->batman_queue_left); |
| 334 | |
| 335 | forw_packet_free(forw_packet); |
| 336 | } |
| 337 | |
| 338 | void purge_outstanding_packets(struct bat_priv *bat_priv, |
Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 339 | const struct hard_iface *hard_iface) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 340 | { |
| 341 | struct forw_packet *forw_packet; |
| 342 | struct hlist_node *tmp_node, *safe_tmp_node; |
Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 343 | bool pending; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 344 | |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 345 | if (hard_iface) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 346 | bat_dbg(DBG_BATMAN, bat_priv, |
| 347 | "purge_outstanding_packets(): %s\n", |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 348 | hard_iface->net_dev->name); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 349 | else |
| 350 | bat_dbg(DBG_BATMAN, bat_priv, |
| 351 | "purge_outstanding_packets()\n"); |
| 352 | |
| 353 | /* free bcast list */ |
| 354 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
| 355 | hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, |
| 356 | &bat_priv->forw_bcast_list, list) { |
| 357 | |
| 358 | /** |
Antonio Quartulli | 015758d | 2011-07-09 17:52:13 +0200 | [diff] [blame] | 359 | * if purge_outstanding_packets() was called with an argument |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 360 | * we delete only packets belonging to the given interface |
| 361 | */ |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 362 | if ((hard_iface) && |
| 363 | (forw_packet->if_incoming != hard_iface)) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 364 | continue; |
| 365 | |
| 366 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
| 367 | |
| 368 | /** |
| 369 | * send_outstanding_bcast_packet() will lock the list to |
| 370 | * delete the item from the list |
| 371 | */ |
Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 372 | pending = cancel_delayed_work_sync(&forw_packet->delayed_work); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 373 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 374 | |
| 375 | if (pending) { |
| 376 | hlist_del(&forw_packet->list); |
| 377 | forw_packet_free(forw_packet); |
| 378 | } |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 379 | } |
| 380 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
| 381 | |
| 382 | /* free batman packet list */ |
| 383 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
| 384 | hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, |
| 385 | &bat_priv->forw_bat_list, list) { |
| 386 | |
| 387 | /** |
Antonio Quartulli | 015758d | 2011-07-09 17:52:13 +0200 | [diff] [blame] | 388 | * if purge_outstanding_packets() was called with an argument |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 389 | * we delete only packets belonging to the given interface |
| 390 | */ |
Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 391 | if ((hard_iface) && |
| 392 | (forw_packet->if_incoming != hard_iface)) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 393 | continue; |
| 394 | |
| 395 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |
| 396 | |
| 397 | /** |
| 398 | * send_outstanding_bat_packet() will lock the list to |
| 399 | * delete the item from the list |
| 400 | */ |
Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 401 | pending = cancel_delayed_work_sync(&forw_packet->delayed_work); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 402 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 403 | |
| 404 | if (pending) { |
| 405 | hlist_del(&forw_packet->list); |
| 406 | forw_packet_free(forw_packet); |
| 407 | } |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 408 | } |
| 409 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |
| 410 | } |