Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * IPV6 GSO/GRO offload support |
| 3 | * Linux INET6 implementation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License |
| 7 | * as published by the Free Software Foundation; either version |
| 8 | * 2 of the License, or (at your option) any later version. |
| 9 | * |
| 10 | * UDPv6 GSO support |
| 11 | */ |
| 12 | #include <linux/skbuff.h> |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 13 | #include <linux/netdevice.h> |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 14 | #include <net/protocol.h> |
| 15 | #include <net/ipv6.h> |
| 16 | #include <net/udp.h> |
Vlad Yasevich | d4d0d35 | 2012-11-15 16:35:37 +0000 | [diff] [blame] | 17 | #include <net/ip6_checksum.h> |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 18 | #include "ip6_offload.h" |
| 19 | |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 20 | static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 21 | netdev_features_t features) |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 22 | { |
| 23 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
| 24 | unsigned int mss; |
| 25 | unsigned int unfrag_ip6hlen, unfrag_len; |
| 26 | struct frag_hdr *fptr; |
Pravin B Shelar | 1e2bd51 | 2013-05-30 06:45:27 +0000 | [diff] [blame] | 27 | u8 *packet_start, *prevhdr; |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 28 | u8 nexthdr; |
| 29 | u8 frag_hdr_sz = sizeof(struct frag_hdr); |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 30 | __wsum csum; |
Pravin B Shelar | 1e2bd51 | 2013-05-30 06:45:27 +0000 | [diff] [blame] | 31 | int tnl_hlen; |
David S. Miller | 3fa202e | 2017-05-17 22:54:11 -0400 | [diff] [blame] | 32 | int err; |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 33 | |
| 34 | mss = skb_shinfo(skb)->gso_size; |
| 35 | if (unlikely(skb->len <= mss)) |
| 36 | goto out; |
| 37 | |
| 38 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
| 39 | /* Packet is from an untrusted source, reset gso_segs. */ |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 40 | |
| 41 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); |
| 42 | |
Vlad Yasevich | 0508c07 | 2015-02-03 16:36:15 -0500 | [diff] [blame] | 43 | /* Set the IPv6 fragment id if not set yet */ |
| 44 | if (!skb_shinfo(skb)->ip6_frag_id) |
Hannes Frederic Sowa | 5a352dd | 2015-03-25 17:07:45 +0100 | [diff] [blame] | 45 | ipv6_proxy_select_ident(dev_net(skb->dev), skb); |
Vlad Yasevich | 0508c07 | 2015-02-03 16:36:15 -0500 | [diff] [blame] | 46 | |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 47 | segs = NULL; |
| 48 | goto out; |
| 49 | } |
| 50 | |
Tom Herbert | 0f4f4ff | 2014-06-04 17:20:16 -0700 | [diff] [blame] | 51 | if (skb->encapsulation && skb_shinfo(skb)->gso_type & |
| 52 | (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)) |
Tom Herbert | 8bce6d7 | 2014-09-29 20:22:29 -0700 | [diff] [blame] | 53 | segs = skb_udp_tunnel_segment(skb, features, true); |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 54 | else { |
Tom Herbert | f71470b | 2014-09-20 14:52:29 -0700 | [diff] [blame] | 55 | const struct ipv6hdr *ipv6h; |
| 56 | struct udphdr *uh; |
| 57 | |
Willem de Bruijn | 3110e21 | 2018-01-19 09:29:18 -0500 | [diff] [blame] | 58 | if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP)) |
| 59 | goto out; |
| 60 | |
Tom Herbert | f71470b | 2014-09-20 14:52:29 -0700 | [diff] [blame] | 61 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
| 62 | goto out; |
| 63 | |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 64 | /* Do software UFO. Complete and fill in the UDP checksum as HW cannot |
| 65 | * do checksum of UDP packets sent as multiple IP fragments. |
| 66 | */ |
Tom Herbert | f71470b | 2014-09-20 14:52:29 -0700 | [diff] [blame] | 67 | |
| 68 | uh = udp_hdr(skb); |
| 69 | ipv6h = ipv6_hdr(skb); |
| 70 | |
| 71 | uh->check = 0; |
| 72 | csum = skb_checksum(skb, 0, skb->len, 0); |
| 73 | uh->check = udp_v6_check(skb->len, &ipv6h->saddr, |
| 74 | &ipv6h->daddr, csum); |
Tom Herbert | f71470b | 2014-09-20 14:52:29 -0700 | [diff] [blame] | 75 | if (uh->check == 0) |
| 76 | uh->check = CSUM_MANGLED_0; |
| 77 | |
Willem de Bruijn | 69ffc96 | 2017-08-08 14:22:55 -0400 | [diff] [blame] | 78 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 79 | |
Alexander Duyck | 2246387 | 2016-02-24 16:46:21 -0800 | [diff] [blame] | 80 | /* If there is no outer header we can fake a checksum offload |
| 81 | * due to the fact that we have already done the checksum in |
| 82 | * software prior to segmenting the frame. |
| 83 | */ |
| 84 | if (!skb->encap_hdr_csum) |
| 85 | features |= NETIF_F_HW_CSUM; |
| 86 | |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 87 | /* Check if there is enough headroom to insert fragment header. */ |
| 88 | tnl_hlen = skb_tnl_header_len(skb); |
Hannes Frederic Sowa | 0e033e0 | 2013-11-05 02:41:27 +0100 | [diff] [blame] | 89 | if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) { |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 90 | if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) |
| 91 | goto out; |
| 92 | } |
| 93 | |
| 94 | /* Find the unfragmentable header and shift it left by frag_hdr_sz |
| 95 | * bytes to insert fragment header. |
| 96 | */ |
David S. Miller | 3fa202e | 2017-05-17 22:54:11 -0400 | [diff] [blame] | 97 | err = ip6_find_1stfragopt(skb, &prevhdr); |
| 98 | if (err < 0) |
| 99 | return ERR_PTR(err); |
| 100 | unfrag_ip6hlen = err; |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 101 | nexthdr = *prevhdr; |
| 102 | *prevhdr = NEXTHDR_FRAGMENT; |
| 103 | unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + |
| 104 | unfrag_ip6hlen + tnl_hlen; |
| 105 | packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; |
| 106 | memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); |
| 107 | |
| 108 | SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; |
| 109 | skb->mac_header -= frag_hdr_sz; |
| 110 | skb->network_header -= frag_hdr_sz; |
| 111 | |
| 112 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); |
| 113 | fptr->nexthdr = nexthdr; |
| 114 | fptr->reserved = 0; |
Sabrina Dubroca | 8e199df | 2015-03-19 11:22:32 +0100 | [diff] [blame] | 115 | if (!skb_shinfo(skb)->ip6_frag_id) |
Hannes Frederic Sowa | 5a352dd | 2015-03-25 17:07:45 +0100 | [diff] [blame] | 116 | ipv6_proxy_select_ident(dev_net(skb->dev), skb); |
Sabrina Dubroca | 8e199df | 2015-03-19 11:22:32 +0100 | [diff] [blame] | 117 | fptr->identification = skb_shinfo(skb)->ip6_frag_id; |
Cong Wang | d949d82 | 2013-08-31 13:44:37 +0800 | [diff] [blame] | 118 | |
| 119 | /* Fragment the skb. ipv6 header and the remaining fields of the |
| 120 | * fragment header are updated in ipv6_gso_segment() |
| 121 | */ |
| 122 | segs = skb_segment(skb, features); |
Pravin B Shelar | 1e2bd51 | 2013-05-30 06:45:27 +0000 | [diff] [blame] | 123 | } |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 124 | |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 125 | out: |
| 126 | return segs; |
| 127 | } |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 128 | |
| 129 | static struct sk_buff **udp6_gro_receive(struct sk_buff **head, |
| 130 | struct sk_buff *skb) |
| 131 | { |
| 132 | struct udphdr *uh = udp_gro_udphdr(skb); |
| 133 | |
Tom Herbert | 2abb7cd | 2014-08-31 15:12:43 -0700 | [diff] [blame] | 134 | if (unlikely(!uh)) |
| 135 | goto flush; |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 136 | |
Tom Herbert | 2abb7cd | 2014-08-31 15:12:43 -0700 | [diff] [blame] | 137 | /* Don't bother verifying checksum if we're going to flush anyway. */ |
Scott Wood | 2d8f7e2 | 2014-09-10 21:23:18 -0500 | [diff] [blame] | 138 | if (NAPI_GRO_CB(skb)->flush) |
Tom Herbert | 2abb7cd | 2014-08-31 15:12:43 -0700 | [diff] [blame] | 139 | goto skip; |
| 140 | |
| 141 | if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, |
| 142 | ip6_gro_compute_pseudo)) |
| 143 | goto flush; |
| 144 | else if (uh->check) |
| 145 | skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, |
| 146 | ip6_gro_compute_pseudo); |
| 147 | |
| 148 | skip: |
Tom Herbert | efc98d0 | 2014-10-03 15:48:08 -0700 | [diff] [blame] | 149 | NAPI_GRO_CB(skb)->is_ipv6 = 1; |
Tom Herbert | a602456 | 2016-04-05 08:22:51 -0700 | [diff] [blame] | 150 | return udp_gro_receive(head, skb, uh, udp6_lib_lookup_skb); |
Tom Herbert | 2abb7cd | 2014-08-31 15:12:43 -0700 | [diff] [blame] | 151 | |
| 152 | flush: |
| 153 | NAPI_GRO_CB(skb)->flush = 1; |
| 154 | return NULL; |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 155 | } |
| 156 | |
Eric Dumazet | cc9c668 | 2014-09-09 08:16:17 -0700 | [diff] [blame] | 157 | static int udp6_gro_complete(struct sk_buff *skb, int nhoff) |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 158 | { |
| 159 | const struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
| 160 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); |
| 161 | |
Tom Herbert | 6db93ea | 2015-02-10 16:30:29 -0800 | [diff] [blame] | 162 | if (uh->check) { |
| 163 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 164 | uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr, |
| 165 | &ipv6h->daddr, 0); |
Tom Herbert | 6db93ea | 2015-02-10 16:30:29 -0800 | [diff] [blame] | 166 | } else { |
| 167 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; |
| 168 | } |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 169 | |
Tom Herbert | a602456 | 2016-04-05 08:22:51 -0700 | [diff] [blame] | 170 | return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb); |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 171 | } |
| 172 | |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 173 | static const struct net_offload udpv6_offload = { |
Vlad Yasevich | f191a1d | 2012-11-15 08:49:23 +0000 | [diff] [blame] | 174 | .callbacks = { |
Vlad Yasevich | f191a1d | 2012-11-15 08:49:23 +0000 | [diff] [blame] | 175 | .gso_segment = udp6_ufo_fragment, |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 176 | .gro_receive = udp6_gro_receive, |
| 177 | .gro_complete = udp6_gro_complete, |
Vlad Yasevich | f191a1d | 2012-11-15 08:49:23 +0000 | [diff] [blame] | 178 | }, |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 179 | }; |
| 180 | |
Tom Herbert | a602456 | 2016-04-05 08:22:51 -0700 | [diff] [blame] | 181 | int udpv6_offload_init(void) |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 182 | { |
| 183 | return inet6_add_offload(&udpv6_offload, IPPROTO_UDP); |
| 184 | } |
Tom Herbert | a602456 | 2016-04-05 08:22:51 -0700 | [diff] [blame] | 185 | |
| 186 | int udpv6_offload_exit(void) |
| 187 | { |
| 188 | return inet6_del_offload(&udpv6_offload, IPPROTO_UDP); |
| 189 | } |