Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * IPV6 GSO/GRO offload support |
| 3 | * Linux INET6 implementation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License |
| 7 | * as published by the Free Software Foundation; either version |
| 8 | * 2 of the License, or (at your option) any later version. |
| 9 | * |
| 10 | * UDPv6 GSO support |
| 11 | */ |
| 12 | #include <linux/skbuff.h> |
| 13 | #include <net/protocol.h> |
| 14 | #include <net/ipv6.h> |
| 15 | #include <net/udp.h> |
Vlad Yasevich | d4d0d35 | 2012-11-15 16:35:37 +0000 | [diff] [blame] | 16 | #include <net/ip6_checksum.h> |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 17 | #include "ip6_offload.h" |
| 18 | |
| 19 | static int udp6_ufo_send_check(struct sk_buff *skb) |
| 20 | { |
| 21 | const struct ipv6hdr *ipv6h; |
| 22 | struct udphdr *uh; |
| 23 | |
| 24 | if (!pskb_may_pull(skb, sizeof(*uh))) |
| 25 | return -EINVAL; |
| 26 | |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 27 | if (likely(!skb->encapsulation)) { |
| 28 | ipv6h = ipv6_hdr(skb); |
| 29 | uh = udp_hdr(skb); |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 30 | |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 31 | uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, |
| 32 | IPPROTO_UDP, 0); |
| 33 | skb->csum_start = skb_transport_header(skb) - skb->head; |
| 34 | skb->csum_offset = offsetof(struct udphdr, check); |
| 35 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 36 | } |
| 37 | |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 38 | return 0; |
| 39 | } |
| 40 | |
| 41 | static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 42 | netdev_features_t features) |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 43 | { |
| 44 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
| 45 | unsigned int mss; |
| 46 | unsigned int unfrag_ip6hlen, unfrag_len; |
| 47 | struct frag_hdr *fptr; |
Pravin B Shelar | 1e2bd51 | 2013-05-30 06:45:27 +0000 | [diff] [blame] | 48 | u8 *packet_start, *prevhdr; |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 49 | u8 nexthdr; |
| 50 | u8 frag_hdr_sz = sizeof(struct frag_hdr); |
| 51 | int offset; |
| 52 | __wsum csum; |
Pravin B Shelar | 1e2bd51 | 2013-05-30 06:45:27 +0000 | [diff] [blame] | 53 | int tnl_hlen; |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 54 | |
| 55 | mss = skb_shinfo(skb)->gso_size; |
| 56 | if (unlikely(skb->len <= mss)) |
| 57 | goto out; |
| 58 | |
| 59 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
| 60 | /* Packet is from an untrusted source, reset gso_segs. */ |
| 61 | int type = skb_shinfo(skb)->gso_type; |
| 62 | |
Pravin B Shelar | 7313626 | 2013-03-07 13:21:51 +0000 | [diff] [blame] | 63 | if (unlikely(type & ~(SKB_GSO_UDP | |
| 64 | SKB_GSO_DODGY | |
| 65 | SKB_GSO_UDP_TUNNEL | |
Simon Horman | 0d89d20 | 2013-05-23 21:02:52 +0000 | [diff] [blame] | 66 | SKB_GSO_GRE | |
Eric Dumazet | cb32f51 | 2013-10-19 11:42:57 -0700 | [diff] [blame] | 67 | SKB_GSO_IPIP | |
Eric Dumazet | 61c1db7 | 2013-10-20 20:47:30 -0700 | [diff] [blame] | 68 | SKB_GSO_SIT | |
Simon Horman | 0d89d20 | 2013-05-23 21:02:52 +0000 | [diff] [blame] | 69 | SKB_GSO_MPLS) || |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 70 | !(type & (SKB_GSO_UDP)))) |
| 71 | goto out; |
| 72 | |
| 73 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); |
| 74 | |
| 75 | segs = NULL; |
| 76 | goto out; |
| 77 | } |
| 78 | |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 79 | if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) |
Cong Wang | eb3c0d8 | 2013-08-31 13:44:38 +0800 | [diff] [blame] | 80 | segs = skb_udp_tunnel_segment(skb, features); |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 81 | else { |
| 82 | /* Do software UFO. Complete and fill in the UDP checksum as HW cannot |
| 83 | * do checksum of UDP packets sent as multiple IP fragments. |
| 84 | */ |
| 85 | offset = skb_checksum_start_offset(skb); |
| 86 | csum = skb_checksum(skb, offset, skb->len - offset, 0); |
| 87 | offset += skb->csum_offset; |
| 88 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); |
| 89 | skb->ip_summed = CHECKSUM_NONE; |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 90 | |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 91 | /* Check if there is enough headroom to insert fragment header. */ |
| 92 | tnl_hlen = skb_tnl_header_len(skb); |
Hannes Frederic Sowa | 0e033e0 | 2013-11-05 02:41:27 +0100 | [diff] [blame] | 93 | if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) { |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 94 | if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) |
| 95 | goto out; |
| 96 | } |
| 97 | |
| 98 | /* Find the unfragmentable header and shift it left by frag_hdr_sz |
| 99 | * bytes to insert fragment header. |
| 100 | */ |
| 101 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); |
| 102 | nexthdr = *prevhdr; |
| 103 | *prevhdr = NEXTHDR_FRAGMENT; |
| 104 | unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + |
| 105 | unfrag_ip6hlen + tnl_hlen; |
| 106 | packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; |
| 107 | memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); |
| 108 | |
| 109 | SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; |
| 110 | skb->mac_header -= frag_hdr_sz; |
| 111 | skb->network_header -= frag_hdr_sz; |
| 112 | |
| 113 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); |
| 114 | fptr->nexthdr = nexthdr; |
| 115 | fptr->reserved = 0; |
Hannes Frederic Sowa | 916e4cf | 2014-02-21 02:55:35 +0100 | [diff] [blame] | 116 | fptr->identification = skb_shinfo(skb)->ip6_frag_id; |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 117 | |
| 118 | /* Fragment the skb. ipv6 header and the remaining fields of the |
| 119 | * fragment header are updated in ipv6_gso_segment() |
| 120 | */ |
| 121 | segs = skb_segment(skb, features); |
Pravin B Shelar | 1e2bd51 | 2013-05-30 06:45:27 +0000 | [diff] [blame] | 122 | } |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 123 | |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 124 | out: |
| 125 | return segs; |
| 126 | } |
| 127 | static const struct net_offload udpv6_offload = { |
Vlad Yasevich | f191a1d | 2012-11-15 08:49:23 +0000 | [diff] [blame] | 128 | .callbacks = { |
| 129 | .gso_send_check = udp6_ufo_send_check, |
| 130 | .gso_segment = udp6_ufo_fragment, |
| 131 | }, |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 132 | }; |
| 133 | |
| 134 | int __init udp_offload_init(void) |
| 135 | { |
| 136 | return inet6_add_offload(&udpv6_offload, IPPROTO_UDP); |
| 137 | } |