Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 1 | /* |
| 2 | * IPV4 GSO/GRO offload support |
| 3 | * Linux INET implementation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License |
| 7 | * as published by the Free Software Foundation; either version |
| 8 | * 2 of the License, or (at your option) any later version. |
| 9 | * |
| 10 | * TCPv4 GSO/GRO support |
| 11 | */ |
| 12 | |
| 13 | #include <linux/skbuff.h> |
| 14 | #include <net/tcp.h> |
| 15 | #include <net/protocol.h> |
| 16 | |
Eric Dumazet | 28be6e0 | 2013-10-18 10:36:17 -0700 | [diff] [blame] | 17 | struct sk_buff *tcp_gso_segment(struct sk_buff *skb, |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 18 | netdev_features_t features) |
| 19 | { |
| 20 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
Eric Dumazet | 0d08c42 | 2013-10-25 17:26:17 -0700 | [diff] [blame] | 21 | unsigned int sum_truesize = 0; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 22 | struct tcphdr *th; |
| 23 | unsigned int thlen; |
| 24 | unsigned int seq; |
| 25 | __be32 delta; |
| 26 | unsigned int oldlen; |
| 27 | unsigned int mss; |
| 28 | struct sk_buff *gso_skb = skb; |
| 29 | __sum16 newcheck; |
| 30 | bool ooo_okay, copy_destructor; |
| 31 | |
| 32 | if (!pskb_may_pull(skb, sizeof(*th))) |
| 33 | goto out; |
| 34 | |
| 35 | th = tcp_hdr(skb); |
| 36 | thlen = th->doff * 4; |
| 37 | if (thlen < sizeof(*th)) |
| 38 | goto out; |
| 39 | |
| 40 | if (!pskb_may_pull(skb, thlen)) |
| 41 | goto out; |
| 42 | |
| 43 | oldlen = (u16)~skb->len; |
| 44 | __skb_pull(skb, thlen); |
| 45 | |
| 46 | mss = tcp_skb_mss(skb); |
| 47 | if (unlikely(skb->len <= mss)) |
| 48 | goto out; |
| 49 | |
| 50 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
| 51 | /* Packet is from an untrusted source, reset gso_segs. */ |
| 52 | int type = skb_shinfo(skb)->gso_type; |
| 53 | |
| 54 | if (unlikely(type & |
| 55 | ~(SKB_GSO_TCPV4 | |
| 56 | SKB_GSO_DODGY | |
| 57 | SKB_GSO_TCP_ECN | |
| 58 | SKB_GSO_TCPV6 | |
| 59 | SKB_GSO_GRE | |
Eric Dumazet | cb32f51 | 2013-10-19 11:42:57 -0700 | [diff] [blame] | 60 | SKB_GSO_IPIP | |
Eric Dumazet | 61c1db7 | 2013-10-20 20:47:30 -0700 | [diff] [blame] | 61 | SKB_GSO_SIT | |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 62 | SKB_GSO_MPLS | |
| 63 | SKB_GSO_UDP_TUNNEL | |
| 64 | 0) || |
| 65 | !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) |
| 66 | goto out; |
| 67 | |
| 68 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); |
| 69 | |
| 70 | segs = NULL; |
| 71 | goto out; |
| 72 | } |
| 73 | |
| 74 | copy_destructor = gso_skb->destructor == tcp_wfree; |
| 75 | ooo_okay = gso_skb->ooo_okay; |
| 76 | /* All segments but the first should have ooo_okay cleared */ |
| 77 | skb->ooo_okay = 0; |
| 78 | |
| 79 | segs = skb_segment(skb, features); |
| 80 | if (IS_ERR(segs)) |
| 81 | goto out; |
| 82 | |
| 83 | /* Only first segment might have ooo_okay set */ |
| 84 | segs->ooo_okay = ooo_okay; |
| 85 | |
| 86 | delta = htonl(oldlen + (thlen + mss)); |
| 87 | |
| 88 | skb = segs; |
| 89 | th = tcp_hdr(skb); |
| 90 | seq = ntohl(th->seq); |
| 91 | |
| 92 | newcheck = ~csum_fold((__force __wsum)((__force u32)th->check + |
| 93 | (__force u32)delta)); |
| 94 | |
| 95 | do { |
| 96 | th->fin = th->psh = 0; |
| 97 | th->check = newcheck; |
| 98 | |
| 99 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
| 100 | th->check = |
| 101 | csum_fold(csum_partial(skb_transport_header(skb), |
| 102 | thlen, skb->csum)); |
| 103 | |
| 104 | seq += mss; |
| 105 | if (copy_destructor) { |
| 106 | skb->destructor = gso_skb->destructor; |
| 107 | skb->sk = gso_skb->sk; |
Eric Dumazet | 0d08c42 | 2013-10-25 17:26:17 -0700 | [diff] [blame] | 108 | sum_truesize += skb->truesize; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 109 | } |
| 110 | skb = skb->next; |
| 111 | th = tcp_hdr(skb); |
| 112 | |
| 113 | th->seq = htonl(seq); |
| 114 | th->cwr = 0; |
| 115 | } while (skb->next); |
| 116 | |
| 117 | /* Following permits TCP Small Queues to work well with GSO : |
| 118 | * The callback to TCP stack will be called at the time last frag |
| 119 | * is freed at TX completion, and not right now when gso_skb |
| 120 | * is freed by GSO engine |
| 121 | */ |
| 122 | if (copy_destructor) { |
| 123 | swap(gso_skb->sk, skb->sk); |
| 124 | swap(gso_skb->destructor, skb->destructor); |
Eric Dumazet | 0d08c42 | 2013-10-25 17:26:17 -0700 | [diff] [blame] | 125 | sum_truesize += skb->truesize; |
| 126 | atomic_add(sum_truesize - gso_skb->truesize, |
| 127 | &skb->sk->sk_wmem_alloc); |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | delta = htonl(oldlen + (skb_tail_pointer(skb) - |
| 131 | skb_transport_header(skb)) + |
| 132 | skb->data_len); |
| 133 | th->check = ~csum_fold((__force __wsum)((__force u32)th->check + |
| 134 | (__force u32)delta)); |
| 135 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
| 136 | th->check = csum_fold(csum_partial(skb_transport_header(skb), |
| 137 | thlen, skb->csum)); |
| 138 | out: |
| 139 | return segs; |
| 140 | } |
Eric Dumazet | 28be6e0 | 2013-10-18 10:36:17 -0700 | [diff] [blame] | 141 | EXPORT_SYMBOL(tcp_gso_segment); |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 142 | |
| 143 | struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) |
| 144 | { |
| 145 | struct sk_buff **pp = NULL; |
| 146 | struct sk_buff *p; |
| 147 | struct tcphdr *th; |
| 148 | struct tcphdr *th2; |
| 149 | unsigned int len; |
| 150 | unsigned int thlen; |
| 151 | __be32 flags; |
| 152 | unsigned int mss = 1; |
| 153 | unsigned int hlen; |
| 154 | unsigned int off; |
| 155 | int flush = 1; |
| 156 | int i; |
| 157 | |
| 158 | off = skb_gro_offset(skb); |
| 159 | hlen = off + sizeof(*th); |
| 160 | th = skb_gro_header_fast(skb, off); |
| 161 | if (skb_gro_header_hard(skb, hlen)) { |
| 162 | th = skb_gro_header_slow(skb, hlen, off); |
| 163 | if (unlikely(!th)) |
| 164 | goto out; |
| 165 | } |
| 166 | |
| 167 | thlen = th->doff * 4; |
| 168 | if (thlen < sizeof(*th)) |
| 169 | goto out; |
| 170 | |
| 171 | hlen = off + thlen; |
| 172 | if (skb_gro_header_hard(skb, hlen)) { |
| 173 | th = skb_gro_header_slow(skb, hlen, off); |
| 174 | if (unlikely(!th)) |
| 175 | goto out; |
| 176 | } |
| 177 | |
| 178 | skb_gro_pull(skb, thlen); |
| 179 | |
| 180 | len = skb_gro_len(skb); |
| 181 | flags = tcp_flag_word(th); |
| 182 | |
| 183 | for (; (p = *head); head = &p->next) { |
| 184 | if (!NAPI_GRO_CB(p)->same_flow) |
| 185 | continue; |
| 186 | |
| 187 | th2 = tcp_hdr(p); |
| 188 | |
| 189 | if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { |
| 190 | NAPI_GRO_CB(p)->same_flow = 0; |
| 191 | continue; |
| 192 | } |
| 193 | |
| 194 | goto found; |
| 195 | } |
| 196 | |
| 197 | goto out_check_final; |
| 198 | |
| 199 | found: |
| 200 | flush = NAPI_GRO_CB(p)->flush; |
| 201 | flush |= (__force int)(flags & TCP_FLAG_CWR); |
| 202 | flush |= (__force int)((flags ^ tcp_flag_word(th2)) & |
| 203 | ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); |
| 204 | flush |= (__force int)(th->ack_seq ^ th2->ack_seq); |
| 205 | for (i = sizeof(*th); i < thlen; i += 4) |
| 206 | flush |= *(u32 *)((u8 *)th + i) ^ |
| 207 | *(u32 *)((u8 *)th2 + i); |
| 208 | |
| 209 | mss = tcp_skb_mss(p); |
| 210 | |
| 211 | flush |= (len - 1) >= mss; |
| 212 | flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); |
| 213 | |
| 214 | if (flush || skb_gro_receive(head, skb)) { |
| 215 | mss = 1; |
| 216 | goto out_check_final; |
| 217 | } |
| 218 | |
| 219 | p = *head; |
| 220 | th2 = tcp_hdr(p); |
| 221 | tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); |
| 222 | |
| 223 | out_check_final: |
| 224 | flush = len < mss; |
| 225 | flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | |
| 226 | TCP_FLAG_RST | TCP_FLAG_SYN | |
| 227 | TCP_FLAG_FIN)); |
| 228 | |
| 229 | if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) |
| 230 | pp = head; |
| 231 | |
| 232 | out: |
| 233 | NAPI_GRO_CB(skb)->flush |= flush; |
| 234 | |
| 235 | return pp; |
| 236 | } |
| 237 | EXPORT_SYMBOL(tcp_gro_receive); |
| 238 | |
| 239 | int tcp_gro_complete(struct sk_buff *skb) |
| 240 | { |
| 241 | struct tcphdr *th = tcp_hdr(skb); |
| 242 | |
| 243 | skb->csum_start = skb_transport_header(skb) - skb->head; |
| 244 | skb->csum_offset = offsetof(struct tcphdr, check); |
| 245 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 246 | |
| 247 | skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; |
| 248 | |
| 249 | if (th->cwr) |
| 250 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
| 251 | |
| 252 | return 0; |
| 253 | } |
| 254 | EXPORT_SYMBOL(tcp_gro_complete); |
| 255 | |
| 256 | static int tcp_v4_gso_send_check(struct sk_buff *skb) |
| 257 | { |
| 258 | const struct iphdr *iph; |
| 259 | struct tcphdr *th; |
| 260 | |
| 261 | if (!pskb_may_pull(skb, sizeof(*th))) |
| 262 | return -EINVAL; |
| 263 | |
| 264 | iph = ip_hdr(skb); |
| 265 | th = tcp_hdr(skb); |
| 266 | |
| 267 | th->check = 0; |
| 268 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 269 | __tcp_v4_send_check(skb, iph->saddr, iph->daddr); |
| 270 | return 0; |
| 271 | } |
| 272 | |
| 273 | static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) |
| 274 | { |
| 275 | const struct iphdr *iph = skb_gro_network_header(skb); |
| 276 | __wsum wsum; |
| 277 | __sum16 sum; |
| 278 | |
Herbert Xu | cc5c00b | 2013-11-22 10:31:29 +0800 | [diff] [blame^] | 279 | /* Don't bother verifying checksum if we're going to flush anyway. */ |
| 280 | if (NAPI_GRO_CB(skb)->flush) |
| 281 | goto skip_csum; |
| 282 | |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 283 | switch (skb->ip_summed) { |
| 284 | case CHECKSUM_COMPLETE: |
| 285 | if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, |
| 286 | skb->csum)) { |
| 287 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 288 | break; |
| 289 | } |
| 290 | flush: |
| 291 | NAPI_GRO_CB(skb)->flush = 1; |
| 292 | return NULL; |
| 293 | |
| 294 | case CHECKSUM_NONE: |
| 295 | wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr, |
| 296 | skb_gro_len(skb), IPPROTO_TCP, 0); |
| 297 | sum = csum_fold(skb_checksum(skb, |
| 298 | skb_gro_offset(skb), |
| 299 | skb_gro_len(skb), |
| 300 | wsum)); |
| 301 | if (sum) |
| 302 | goto flush; |
| 303 | |
| 304 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 305 | break; |
| 306 | } |
| 307 | |
Herbert Xu | cc5c00b | 2013-11-22 10:31:29 +0800 | [diff] [blame^] | 308 | skip_csum: |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 309 | return tcp_gro_receive(head, skb); |
| 310 | } |
| 311 | |
| 312 | static int tcp4_gro_complete(struct sk_buff *skb) |
| 313 | { |
| 314 | const struct iphdr *iph = ip_hdr(skb); |
| 315 | struct tcphdr *th = tcp_hdr(skb); |
| 316 | |
| 317 | th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), |
| 318 | iph->saddr, iph->daddr, 0); |
| 319 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
| 320 | |
| 321 | return tcp_gro_complete(skb); |
| 322 | } |
| 323 | |
| 324 | static const struct net_offload tcpv4_offload = { |
| 325 | .callbacks = { |
| 326 | .gso_send_check = tcp_v4_gso_send_check, |
Eric Dumazet | 28be6e0 | 2013-10-18 10:36:17 -0700 | [diff] [blame] | 327 | .gso_segment = tcp_gso_segment, |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 328 | .gro_receive = tcp4_gro_receive, |
| 329 | .gro_complete = tcp4_gro_complete, |
| 330 | }, |
| 331 | }; |
| 332 | |
| 333 | int __init tcpv4_offload_init(void) |
| 334 | { |
| 335 | return inet_add_offload(&tcpv4_offload, IPPROTO_TCP); |
| 336 | } |