Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2010 B.A.T.M.A.N. contributors: |
| 3 | * |
| 4 | * Andreas Langer |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of version 2 of the GNU General Public |
| 8 | * License as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
| 18 | * 02110-1301, USA |
| 19 | * |
| 20 | */ |
| 21 | |
| 22 | #include "main.h" |
| 23 | #include "unicast.h" |
| 24 | #include "send.h" |
| 25 | #include "soft-interface.h" |
| 26 | #include "gateway_client.h" |
| 27 | #include "originator.h" |
| 28 | #include "hash.h" |
| 29 | #include "translation-table.h" |
| 30 | #include "routing.h" |
| 31 | #include "hard-interface.h" |
| 32 | |
| 33 | |
| 34 | static struct sk_buff *frag_merge_packet(struct list_head *head, |
| 35 | struct frag_packet_list_entry *tfp, |
| 36 | struct sk_buff *skb) |
| 37 | { |
| 38 | struct unicast_frag_packet *up = |
| 39 | (struct unicast_frag_packet *)skb->data; |
| 40 | struct sk_buff *tmp_skb; |
| 41 | struct unicast_packet *unicast_packet; |
| 42 | int hdr_len = sizeof(struct unicast_packet), |
| 43 | uni_diff = sizeof(struct unicast_frag_packet) - hdr_len; |
| 44 | |
| 45 | /* set skb to the first part and tmp_skb to the second part */ |
| 46 | if (up->flags & UNI_FRAG_HEAD) { |
| 47 | tmp_skb = tfp->skb; |
| 48 | } else { |
| 49 | tmp_skb = skb; |
| 50 | skb = tfp->skb; |
| 51 | } |
| 52 | |
| 53 | skb_pull(tmp_skb, sizeof(struct unicast_frag_packet)); |
| 54 | if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) { |
| 55 | /* free buffered skb, skb will be freed later */ |
| 56 | kfree_skb(tfp->skb); |
| 57 | return NULL; |
| 58 | } |
| 59 | |
| 60 | /* move free entry to end */ |
| 61 | tfp->skb = NULL; |
| 62 | tfp->seqno = 0; |
| 63 | list_move_tail(&tfp->list, head); |
| 64 | |
| 65 | memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len); |
| 66 | kfree_skb(tmp_skb); |
| 67 | |
| 68 | memmove(skb->data + uni_diff, skb->data, hdr_len); |
| 69 | unicast_packet = (struct unicast_packet *) skb_pull(skb, uni_diff); |
| 70 | unicast_packet->packet_type = BAT_UNICAST; |
| 71 | |
| 72 | return skb; |
| 73 | } |
| 74 | |
| 75 | static void frag_create_entry(struct list_head *head, struct sk_buff *skb) |
| 76 | { |
| 77 | struct frag_packet_list_entry *tfp; |
| 78 | struct unicast_frag_packet *up = |
| 79 | (struct unicast_frag_packet *)skb->data; |
| 80 | |
| 81 | /* free and oldest packets stand at the end */ |
| 82 | tfp = list_entry((head)->prev, typeof(*tfp), list); |
| 83 | kfree_skb(tfp->skb); |
| 84 | |
| 85 | tfp->seqno = ntohs(up->seqno); |
| 86 | tfp->skb = skb; |
| 87 | list_move(&tfp->list, head); |
| 88 | return; |
| 89 | } |
| 90 | |
| 91 | static int frag_create_buffer(struct list_head *head) |
| 92 | { |
| 93 | int i; |
| 94 | struct frag_packet_list_entry *tfp; |
| 95 | |
| 96 | for (i = 0; i < FRAG_BUFFER_SIZE; i++) { |
| 97 | tfp = kmalloc(sizeof(struct frag_packet_list_entry), |
| 98 | GFP_ATOMIC); |
| 99 | if (!tfp) { |
| 100 | frag_list_free(head); |
| 101 | return -ENOMEM; |
| 102 | } |
| 103 | tfp->skb = NULL; |
| 104 | tfp->seqno = 0; |
| 105 | INIT_LIST_HEAD(&tfp->list); |
| 106 | list_add(&tfp->list, head); |
| 107 | } |
| 108 | |
| 109 | return 0; |
| 110 | } |
| 111 | |
| 112 | static struct frag_packet_list_entry *frag_search_packet(struct list_head *head, |
| 113 | struct unicast_frag_packet *up) |
| 114 | { |
| 115 | struct frag_packet_list_entry *tfp; |
| 116 | struct unicast_frag_packet *tmp_up = NULL; |
| 117 | uint16_t search_seqno; |
| 118 | |
| 119 | if (up->flags & UNI_FRAG_HEAD) |
| 120 | search_seqno = ntohs(up->seqno)+1; |
| 121 | else |
| 122 | search_seqno = ntohs(up->seqno)-1; |
| 123 | |
| 124 | list_for_each_entry(tfp, head, list) { |
| 125 | |
| 126 | if (!tfp->skb) |
| 127 | continue; |
| 128 | |
| 129 | if (tfp->seqno == ntohs(up->seqno)) |
| 130 | goto mov_tail; |
| 131 | |
| 132 | tmp_up = (struct unicast_frag_packet *)tfp->skb->data; |
| 133 | |
| 134 | if (tfp->seqno == search_seqno) { |
| 135 | |
| 136 | if ((tmp_up->flags & UNI_FRAG_HEAD) != |
| 137 | (up->flags & UNI_FRAG_HEAD)) |
| 138 | return tfp; |
| 139 | else |
| 140 | goto mov_tail; |
| 141 | } |
| 142 | } |
| 143 | return NULL; |
| 144 | |
| 145 | mov_tail: |
| 146 | list_move_tail(&tfp->list, head); |
| 147 | return NULL; |
| 148 | } |
| 149 | |
| 150 | void frag_list_free(struct list_head *head) |
| 151 | { |
| 152 | struct frag_packet_list_entry *pf, *tmp_pf; |
| 153 | |
| 154 | if (!list_empty(head)) { |
| 155 | |
| 156 | list_for_each_entry_safe(pf, tmp_pf, head, list) { |
| 157 | kfree_skb(pf->skb); |
| 158 | list_del(&pf->list); |
| 159 | kfree(pf); |
| 160 | } |
| 161 | } |
| 162 | return; |
| 163 | } |
| 164 | |
| 165 | /* frag_reassemble_skb(): |
| 166 | * returns NET_RX_DROP if the operation failed - skb is left intact |
| 167 | * returns NET_RX_SUCCESS if the fragment was buffered (skb_new will be NULL) |
| 168 | * or the skb could be reassembled (skb_new will point to the new packet and |
| 169 | * skb was freed) |
| 170 | */ |
| 171 | int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, |
| 172 | struct sk_buff **new_skb) |
| 173 | { |
| 174 | struct orig_node *orig_node; |
| 175 | struct frag_packet_list_entry *tmp_frag_entry; |
| 176 | int ret = NET_RX_DROP; |
| 177 | struct unicast_frag_packet *unicast_packet = |
| 178 | (struct unicast_frag_packet *)skb->data; |
| 179 | |
| 180 | *new_skb = NULL; |
| 181 | spin_lock_bh(&bat_priv->orig_hash_lock); |
| 182 | orig_node = ((struct orig_node *) |
| 183 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, |
| 184 | unicast_packet->orig)); |
| 185 | |
| 186 | if (!orig_node) { |
| 187 | pr_debug("couldn't find originator in orig_hash\n"); |
| 188 | goto out; |
| 189 | } |
| 190 | |
| 191 | orig_node->last_frag_packet = jiffies; |
| 192 | |
| 193 | if (list_empty(&orig_node->frag_list) && |
| 194 | frag_create_buffer(&orig_node->frag_list)) { |
| 195 | pr_debug("couldn't create frag buffer\n"); |
| 196 | goto out; |
| 197 | } |
| 198 | |
| 199 | tmp_frag_entry = frag_search_packet(&orig_node->frag_list, |
| 200 | unicast_packet); |
| 201 | |
| 202 | if (!tmp_frag_entry) { |
| 203 | frag_create_entry(&orig_node->frag_list, skb); |
| 204 | ret = NET_RX_SUCCESS; |
| 205 | goto out; |
| 206 | } |
| 207 | |
| 208 | *new_skb = frag_merge_packet(&orig_node->frag_list, tmp_frag_entry, |
| 209 | skb); |
| 210 | /* if not, merge failed */ |
| 211 | if (*new_skb) |
| 212 | ret = NET_RX_SUCCESS; |
| 213 | out: |
| 214 | spin_unlock_bh(&bat_priv->orig_hash_lock); |
| 215 | |
| 216 | return ret; |
| 217 | } |
| 218 | |
| 219 | int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, |
| 220 | struct batman_if *batman_if, uint8_t dstaddr[]) |
| 221 | { |
| 222 | struct unicast_packet tmp_uc, *unicast_packet; |
| 223 | struct sk_buff *frag_skb; |
| 224 | struct unicast_frag_packet *frag1, *frag2; |
| 225 | int uc_hdr_len = sizeof(struct unicast_packet); |
| 226 | int ucf_hdr_len = sizeof(struct unicast_frag_packet); |
Sven Eckelmann | 5c77d8b | 2011-01-25 21:59:26 +0000 | [diff] [blame] | 227 | int data_len = skb->len - uc_hdr_len; |
Sven Eckelmann | ae361ce | 2011-01-25 22:02:31 +0000 | [diff] [blame^] | 228 | int large_tail = 0; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 229 | |
| 230 | if (!bat_priv->primary_if) |
| 231 | goto dropped; |
| 232 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 233 | frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); |
Jesper Juhl | ed7809d | 2011-01-13 21:53:38 +0100 | [diff] [blame] | 234 | if (!frag_skb) |
| 235 | goto dropped; |
Sven Eckelmann | 5c77d8b | 2011-01-25 21:59:26 +0000 | [diff] [blame] | 236 | skb_reserve(frag_skb, ucf_hdr_len); |
Jesper Juhl | ed7809d | 2011-01-13 21:53:38 +0100 | [diff] [blame] | 237 | |
| 238 | unicast_packet = (struct unicast_packet *) skb->data; |
| 239 | memcpy(&tmp_uc, unicast_packet, uc_hdr_len); |
Sven Eckelmann | 5c77d8b | 2011-01-25 21:59:26 +0000 | [diff] [blame] | 240 | skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 241 | |
| 242 | if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || |
| 243 | my_skb_head_push(frag_skb, ucf_hdr_len) < 0) |
| 244 | goto drop_frag; |
| 245 | |
| 246 | frag1 = (struct unicast_frag_packet *)skb->data; |
| 247 | frag2 = (struct unicast_frag_packet *)frag_skb->data; |
| 248 | |
| 249 | memcpy(frag1, &tmp_uc, sizeof(struct unicast_packet)); |
| 250 | |
| 251 | frag1->ttl--; |
| 252 | frag1->version = COMPAT_VERSION; |
| 253 | frag1->packet_type = BAT_UNICAST_FRAG; |
| 254 | |
| 255 | memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); |
| 256 | memcpy(frag2, frag1, sizeof(struct unicast_frag_packet)); |
| 257 | |
Sven Eckelmann | ae361ce | 2011-01-25 22:02:31 +0000 | [diff] [blame^] | 258 | if (data_len & 1) |
| 259 | large_tail = UNI_FRAG_LARGETAIL; |
| 260 | |
| 261 | frag1->flags = UNI_FRAG_HEAD | large_tail; |
| 262 | frag2->flags = large_tail; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 263 | |
| 264 | frag1->seqno = htons((uint16_t)atomic_inc_return( |
| 265 | &batman_if->frag_seqno)); |
| 266 | frag2->seqno = htons((uint16_t)atomic_inc_return( |
| 267 | &batman_if->frag_seqno)); |
| 268 | |
| 269 | send_skb_packet(skb, batman_if, dstaddr); |
| 270 | send_skb_packet(frag_skb, batman_if, dstaddr); |
| 271 | return NET_RX_SUCCESS; |
| 272 | |
| 273 | drop_frag: |
| 274 | kfree_skb(frag_skb); |
| 275 | dropped: |
| 276 | kfree_skb(skb); |
| 277 | return NET_RX_DROP; |
| 278 | } |
| 279 | |
| 280 | int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) |
| 281 | { |
| 282 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; |
| 283 | struct unicast_packet *unicast_packet; |
| 284 | struct orig_node *orig_node; |
| 285 | struct batman_if *batman_if; |
| 286 | struct neigh_node *router; |
| 287 | int data_len = skb->len; |
| 288 | uint8_t dstaddr[6]; |
| 289 | |
| 290 | spin_lock_bh(&bat_priv->orig_hash_lock); |
| 291 | |
| 292 | /* get routing information */ |
| 293 | if (is_multicast_ether_addr(ethhdr->h_dest)) |
| 294 | orig_node = (struct orig_node *)gw_get_selected(bat_priv); |
| 295 | else |
| 296 | orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, |
| 297 | compare_orig, |
| 298 | choose_orig, |
| 299 | ethhdr->h_dest)); |
| 300 | |
| 301 | /* check for hna host */ |
| 302 | if (!orig_node) |
| 303 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); |
| 304 | |
| 305 | router = find_router(bat_priv, orig_node, NULL); |
| 306 | |
| 307 | if (!router) |
| 308 | goto unlock; |
| 309 | |
| 310 | /* don't lock while sending the packets ... we therefore |
| 311 | * copy the required data before sending */ |
| 312 | |
| 313 | batman_if = router->if_incoming; |
| 314 | memcpy(dstaddr, router->addr, ETH_ALEN); |
| 315 | |
| 316 | spin_unlock_bh(&bat_priv->orig_hash_lock); |
| 317 | |
| 318 | if (batman_if->if_status != IF_ACTIVE) |
| 319 | goto dropped; |
| 320 | |
| 321 | if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0) |
| 322 | goto dropped; |
| 323 | |
| 324 | unicast_packet = (struct unicast_packet *)skb->data; |
| 325 | |
| 326 | unicast_packet->version = COMPAT_VERSION; |
| 327 | /* batman packet type: unicast */ |
| 328 | unicast_packet->packet_type = BAT_UNICAST; |
| 329 | /* set unicast ttl */ |
| 330 | unicast_packet->ttl = TTL; |
| 331 | /* copy the destination for faster routing */ |
| 332 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); |
| 333 | |
| 334 | if (atomic_read(&bat_priv->fragmentation) && |
| 335 | data_len + sizeof(struct unicast_packet) > |
| 336 | batman_if->net_dev->mtu) { |
| 337 | /* send frag skb decreases ttl */ |
| 338 | unicast_packet->ttl++; |
| 339 | return frag_send_skb(skb, bat_priv, batman_if, |
| 340 | dstaddr); |
| 341 | } |
| 342 | send_skb_packet(skb, batman_if, dstaddr); |
| 343 | return 0; |
| 344 | |
| 345 | unlock: |
| 346 | spin_unlock_bh(&bat_priv->orig_hash_lock); |
| 347 | dropped: |
| 348 | kfree_skb(skb); |
| 349 | return 1; |
| 350 | } |