Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _INET_ECN_H_ |
| 3 | #define _INET_ECN_H_ |
| 4 | |
| 5 | #include <linux/ip.h> |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 6 | #include <linux/skbuff.h> |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 7 | |
| 8 | #include <net/inet_sock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <net/dsfield.h> |
| 10 | |
| 11 | enum { |
| 12 | INET_ECN_NOT_ECT = 0, |
| 13 | INET_ECN_ECT_1 = 1, |
| 14 | INET_ECN_ECT_0 = 2, |
| 15 | INET_ECN_CE = 3, |
| 16 | INET_ECN_MASK = 3, |
| 17 | }; |
| 18 | |
stephen hemminger | eccc1bb | 2012-09-25 11:02:48 +0000 | [diff] [blame] | 19 | extern int sysctl_tunnel_ecn_log; |
| 20 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | static inline int INET_ECN_is_ce(__u8 dsfield) |
| 22 | { |
| 23 | return (dsfield & INET_ECN_MASK) == INET_ECN_CE; |
| 24 | } |
| 25 | |
| 26 | static inline int INET_ECN_is_not_ect(__u8 dsfield) |
| 27 | { |
| 28 | return (dsfield & INET_ECN_MASK) == INET_ECN_NOT_ECT; |
| 29 | } |
| 30 | |
| 31 | static inline int INET_ECN_is_capable(__u8 dsfield) |
| 32 | { |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 33 | return dsfield & INET_ECN_ECT_0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | } |
| 35 | |
Eric Dumazet | b5d9c9c | 2011-10-22 01:25:23 -0400 | [diff] [blame] | 36 | /* |
| 37 | * RFC 3168 9.1.1 |
| 38 | * The full-functionality option for ECN encapsulation is to copy the |
| 39 | * ECN codepoint of the inside header to the outside header on |
| 40 | * encapsulation if the inside header is not-ECT or ECT, and to set the |
| 41 | * ECN codepoint of the outside header to ECT(0) if the ECN codepoint of |
| 42 | * the inside header is CE. |
| 43 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) |
| 45 | { |
| 46 | outer &= ~INET_ECN_MASK; |
| 47 | outer |= !INET_ECN_is_ce(inner) ? (inner & INET_ECN_MASK) : |
| 48 | INET_ECN_ECT_0; |
| 49 | return outer; |
| 50 | } |
| 51 | |
Steinar H. Gunderson | ca06707 | 2011-05-06 23:44:46 +0000 | [diff] [blame] | 52 | static inline void INET_ECN_xmit(struct sock *sk) |
| 53 | { |
| 54 | inet_sk(sk)->tos |= INET_ECN_ECT_0; |
| 55 | if (inet6_sk(sk) != NULL) |
| 56 | inet6_sk(sk)->tclass |= INET_ECN_ECT_0; |
| 57 | } |
| 58 | |
| 59 | static inline void INET_ECN_dontxmit(struct sock *sk) |
| 60 | { |
| 61 | inet_sk(sk)->tos &= ~INET_ECN_MASK; |
| 62 | if (inet6_sk(sk) != NULL) |
| 63 | inet6_sk(sk)->tclass &= ~INET_ECN_MASK; |
| 64 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
| 66 | #define IP6_ECN_flow_init(label) do { \ |
| 67 | (label) &= ~htonl(INET_ECN_MASK << 20); \ |
| 68 | } while (0) |
| 69 | |
| 70 | #define IP6_ECN_flow_xmit(sk, label) do { \ |
YOSHIFUJI Hideaki | e9df2e8 | 2008-04-13 23:40:51 -0700 | [diff] [blame] | 71 | if (INET_ECN_is_capable(inet6_sk(sk)->tclass)) \ |
Al Viro | 95026cd | 2006-11-03 00:55:35 -0800 | [diff] [blame] | 72 | (label) |= htonl(INET_ECN_ECT_0 << 20); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | } while (0) |
| 74 | |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 75 | static inline int IP_ECN_set_ce(struct iphdr *iph) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | { |
Al Viro | 5c78f27 | 2006-11-14 21:42:26 -0800 | [diff] [blame] | 77 | u32 check = (__force u32)iph->check; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | u32 ecn = (iph->tos + 1) & INET_ECN_MASK; |
| 79 | |
| 80 | /* |
| 81 | * After the last operation we have (in binary): |
| 82 | * INET_ECN_NOT_ECT => 01 |
| 83 | * INET_ECN_ECT_1 => 10 |
| 84 | * INET_ECN_ECT_0 => 11 |
| 85 | * INET_ECN_CE => 00 |
| 86 | */ |
| 87 | if (!(ecn & 2)) |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 88 | return !ecn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | |
| 90 | /* |
| 91 | * The following gives us: |
| 92 | * INET_ECN_ECT_1 => check += htons(0xFFFD) |
| 93 | * INET_ECN_ECT_0 => check += htons(0xFFFE) |
| 94 | */ |
Al Viro | 5c78f27 | 2006-11-14 21:42:26 -0800 | [diff] [blame] | 95 | check += (__force u16)htons(0xFFFB) + (__force u16)htons(ecn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | |
Al Viro | 5c78f27 | 2006-11-14 21:42:26 -0800 | [diff] [blame] | 97 | iph->check = (__force __sum16)(check + (check>=0xFFFF)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | iph->tos |= INET_ECN_CE; |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 99 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | static inline void IP_ECN_clear(struct iphdr *iph) |
| 103 | { |
| 104 | iph->tos &= ~INET_ECN_MASK; |
| 105 | } |
| 106 | |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 107 | static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | { |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 109 | dscp &= ~INET_ECN_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | ipv4_change_dsfield(inner, INET_ECN_MASK, dscp); |
| 111 | } |
| 112 | |
| 113 | struct ipv6hdr; |
| 114 | |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 115 | /* Note: |
| 116 | * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE, |
| 117 | * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE |
| 118 | * In IPv6 case, no checksum compensates the change in IPv6 header, |
| 119 | * so we have to update skb->csum. |
| 120 | */ |
| 121 | static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | { |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 123 | __be32 from, to; |
| 124 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph))) |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 126 | return 0; |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 127 | |
| 128 | from = *(__be32 *)iph; |
| 129 | to = from | htonl(INET_ECN_CE << 20); |
| 130 | *(__be32 *)iph = to; |
| 131 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
Johannes Berg | c15c0ab | 2016-08-12 07:48:21 +0200 | [diff] [blame] | 132 | skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), |
| 133 | (__force __wsum)to); |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 134 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | } |
| 136 | |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 137 | static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | { |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 139 | dscp &= ~INET_ECN_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | ipv6_change_dsfield(inner, INET_ECN_MASK, dscp); |
| 141 | } |
| 142 | |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 143 | static inline int INET_ECN_set_ce(struct sk_buff *skb) |
| 144 | { |
| 145 | switch (skb->protocol) { |
Harvey Harrison | f3a7c66 | 2009-02-14 22:58:35 -0800 | [diff] [blame] | 146 | case cpu_to_be16(ETH_P_IP): |
Simon Horman | ced14f6 | 2013-05-28 20:34:25 +0000 | [diff] [blame] | 147 | if (skb_network_header(skb) + sizeof(struct iphdr) <= |
| 148 | skb_tail_pointer(skb)) |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 149 | return IP_ECN_set_ce(ip_hdr(skb)); |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 150 | break; |
| 151 | |
Harvey Harrison | f3a7c66 | 2009-02-14 22:58:35 -0800 | [diff] [blame] | 152 | case cpu_to_be16(ETH_P_IPV6): |
Simon Horman | ced14f6 | 2013-05-28 20:34:25 +0000 | [diff] [blame] | 153 | if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= |
| 154 | skb_tail_pointer(skb)) |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 155 | return IP6_ECN_set_ce(skb, ipv6_hdr(skb)); |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 156 | break; |
| 157 | } |
| 158 | |
| 159 | return 0; |
| 160 | } |
| 161 | |
stephen hemminger | eccc1bb | 2012-09-25 11:02:48 +0000 | [diff] [blame] | 162 | /* |
Neal Cardwell | d28071d | 2014-05-04 20:55:39 -0400 | [diff] [blame] | 163 | * RFC 6040 4.2 |
stephen hemminger | eccc1bb | 2012-09-25 11:02:48 +0000 | [diff] [blame] | 164 | * To decapsulate the inner header at the tunnel egress, a compliant |
| 165 | * tunnel egress MUST set the outgoing ECN field to the codepoint at the |
| 166 | * intersection of the appropriate arriving inner header (row) and outer |
| 167 | * header (column) in Figure 4 |
| 168 | * |
| 169 | * +---------+------------------------------------------------+ |
| 170 | * |Arriving | Arriving Outer Header | |
| 171 | * | Inner +---------+------------+------------+------------+ |
| 172 | * | Header | Not-ECT | ECT(0) | ECT(1) | CE | |
| 173 | * +---------+---------+------------+------------+------------+ |
| 174 | * | Not-ECT | Not-ECT |Not-ECT(!!!)|Not-ECT(!!!)| <drop>(!!!)| |
| 175 | * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE | |
| 176 | * | ECT(1) | ECT(1) | ECT(1) (!) | ECT(1) | CE | |
| 177 | * | CE | CE | CE | CE(!!!)| CE | |
| 178 | * +---------+---------+------------+------------+------------+ |
| 179 | * |
| 180 | * Figure 4: New IP in IP Decapsulation Behaviour |
| 181 | * |
| 182 | * returns 0 on success |
| 183 | * 1 if something is broken and should be logged (!!! above) |
| 184 | * 2 if packet should be dropped |
| 185 | */ |
| 186 | static inline int INET_ECN_decapsulate(struct sk_buff *skb, |
| 187 | __u8 outer, __u8 inner) |
| 188 | { |
| 189 | if (INET_ECN_is_not_ect(inner)) { |
| 190 | switch (outer & INET_ECN_MASK) { |
| 191 | case INET_ECN_NOT_ECT: |
| 192 | return 0; |
| 193 | case INET_ECN_ECT_0: |
| 194 | case INET_ECN_ECT_1: |
| 195 | return 1; |
| 196 | case INET_ECN_CE: |
| 197 | return 2; |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | if (INET_ECN_is_ce(outer)) |
| 202 | INET_ECN_set_ce(skb); |
| 203 | |
| 204 | return 0; |
| 205 | } |
| 206 | |
| 207 | static inline int IP_ECN_decapsulate(const struct iphdr *oiph, |
| 208 | struct sk_buff *skb) |
| 209 | { |
| 210 | __u8 inner; |
| 211 | |
| 212 | if (skb->protocol == htons(ETH_P_IP)) |
| 213 | inner = ip_hdr(skb)->tos; |
| 214 | else if (skb->protocol == htons(ETH_P_IPV6)) |
| 215 | inner = ipv6_get_dsfield(ipv6_hdr(skb)); |
| 216 | else |
| 217 | return 0; |
| 218 | |
| 219 | return INET_ECN_decapsulate(skb, oiph->tos, inner); |
| 220 | } |
| 221 | |
| 222 | static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h, |
| 223 | struct sk_buff *skb) |
| 224 | { |
| 225 | __u8 inner; |
| 226 | |
| 227 | if (skb->protocol == htons(ETH_P_IP)) |
| 228 | inner = ip_hdr(skb)->tos; |
| 229 | else if (skb->protocol == htons(ETH_P_IPV6)) |
| 230 | inner = ipv6_get_dsfield(ipv6_hdr(skb)); |
| 231 | else |
| 232 | return 0; |
| 233 | |
| 234 | return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner); |
| 235 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | #endif |