Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _INET_ECN_H_ |
| 2 | #define _INET_ECN_H_ |
| 3 | |
| 4 | #include <linux/ip.h> |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 5 | #include <linux/skbuff.h> |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 6 | |
| 7 | #include <net/inet_sock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <net/dsfield.h> |
| 9 | |
| 10 | enum { |
| 11 | INET_ECN_NOT_ECT = 0, |
| 12 | INET_ECN_ECT_1 = 1, |
| 13 | INET_ECN_ECT_0 = 2, |
| 14 | INET_ECN_CE = 3, |
| 15 | INET_ECN_MASK = 3, |
| 16 | }; |
| 17 | |
stephen hemminger | eccc1bb | 2012-09-25 11:02:48 +0000 | [diff] [blame] | 18 | extern int sysctl_tunnel_ecn_log; |
| 19 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | static inline int INET_ECN_is_ce(__u8 dsfield) |
| 21 | { |
| 22 | return (dsfield & INET_ECN_MASK) == INET_ECN_CE; |
| 23 | } |
| 24 | |
| 25 | static inline int INET_ECN_is_not_ect(__u8 dsfield) |
| 26 | { |
| 27 | return (dsfield & INET_ECN_MASK) == INET_ECN_NOT_ECT; |
| 28 | } |
| 29 | |
| 30 | static inline int INET_ECN_is_capable(__u8 dsfield) |
| 31 | { |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 32 | return dsfield & INET_ECN_ECT_0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | } |
| 34 | |
Eric Dumazet | b5d9c9c | 2011-10-22 01:25:23 -0400 | [diff] [blame] | 35 | /* |
| 36 | * RFC 3168 9.1.1 |
| 37 | * The full-functionality option for ECN encapsulation is to copy the |
| 38 | * ECN codepoint of the inside header to the outside header on |
| 39 | * encapsulation if the inside header is not-ECT or ECT, and to set the |
| 40 | * ECN codepoint of the outside header to ECT(0) if the ECN codepoint of |
| 41 | * the inside header is CE. |
| 42 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) |
| 44 | { |
| 45 | outer &= ~INET_ECN_MASK; |
| 46 | outer |= !INET_ECN_is_ce(inner) ? (inner & INET_ECN_MASK) : |
| 47 | INET_ECN_ECT_0; |
| 48 | return outer; |
| 49 | } |
| 50 | |
Steinar H. Gunderson | ca06707 | 2011-05-06 23:44:46 +0000 | [diff] [blame] | 51 | static inline void INET_ECN_xmit(struct sock *sk) |
| 52 | { |
| 53 | inet_sk(sk)->tos |= INET_ECN_ECT_0; |
| 54 | if (inet6_sk(sk) != NULL) |
| 55 | inet6_sk(sk)->tclass |= INET_ECN_ECT_0; |
| 56 | } |
| 57 | |
| 58 | static inline void INET_ECN_dontxmit(struct sock *sk) |
| 59 | { |
| 60 | inet_sk(sk)->tos &= ~INET_ECN_MASK; |
| 61 | if (inet6_sk(sk) != NULL) |
| 62 | inet6_sk(sk)->tclass &= ~INET_ECN_MASK; |
| 63 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
| 65 | #define IP6_ECN_flow_init(label) do { \ |
| 66 | (label) &= ~htonl(INET_ECN_MASK << 20); \ |
| 67 | } while (0) |
| 68 | |
| 69 | #define IP6_ECN_flow_xmit(sk, label) do { \ |
YOSHIFUJI Hideaki | e9df2e8 | 2008-04-13 23:40:51 -0700 | [diff] [blame] | 70 | if (INET_ECN_is_capable(inet6_sk(sk)->tclass)) \ |
Al Viro | 95026cd | 2006-11-03 00:55:35 -0800 | [diff] [blame] | 71 | (label) |= htonl(INET_ECN_ECT_0 << 20); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | } while (0) |
| 73 | |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 74 | static inline int IP_ECN_set_ce(struct iphdr *iph) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | { |
Al Viro | 5c78f27 | 2006-11-14 21:42:26 -0800 | [diff] [blame] | 76 | u32 check = (__force u32)iph->check; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | u32 ecn = (iph->tos + 1) & INET_ECN_MASK; |
| 78 | |
| 79 | /* |
| 80 | * After the last operation we have (in binary): |
| 81 | * INET_ECN_NOT_ECT => 01 |
| 82 | * INET_ECN_ECT_1 => 10 |
| 83 | * INET_ECN_ECT_0 => 11 |
| 84 | * INET_ECN_CE => 00 |
| 85 | */ |
| 86 | if (!(ecn & 2)) |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 87 | return !ecn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | |
| 89 | /* |
| 90 | * The following gives us: |
| 91 | * INET_ECN_ECT_1 => check += htons(0xFFFD) |
| 92 | * INET_ECN_ECT_0 => check += htons(0xFFFE) |
| 93 | */ |
Al Viro | 5c78f27 | 2006-11-14 21:42:26 -0800 | [diff] [blame] | 94 | check += (__force u16)htons(0xFFFB) + (__force u16)htons(ecn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
Al Viro | 5c78f27 | 2006-11-14 21:42:26 -0800 | [diff] [blame] | 96 | iph->check = (__force __sum16)(check + (check>=0xFFFF)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | iph->tos |= INET_ECN_CE; |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 98 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | static inline void IP_ECN_clear(struct iphdr *iph) |
| 102 | { |
| 103 | iph->tos &= ~INET_ECN_MASK; |
| 104 | } |
| 105 | |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 106 | static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | { |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 108 | dscp &= ~INET_ECN_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | ipv4_change_dsfield(inner, INET_ECN_MASK, dscp); |
| 110 | } |
| 111 | |
| 112 | struct ipv6hdr; |
| 113 | |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 114 | /* Note: |
| 115 | * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE, |
| 116 | * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE |
| 117 | * In IPv6 case, no checksum compensates the change in IPv6 header, |
| 118 | * so we have to update skb->csum. |
| 119 | */ |
| 120 | static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | { |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 122 | __be32 from, to; |
| 123 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph))) |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 125 | return 0; |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 126 | |
| 127 | from = *(__be32 *)iph; |
| 128 | to = from | htonl(INET_ECN_CE << 20); |
| 129 | *(__be32 *)iph = to; |
| 130 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
Johannes Berg | c15c0ab | 2016-08-12 07:48:21 +0200 | [diff] [blame^] | 131 | skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), |
| 132 | (__force __wsum)to); |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 133 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | static inline void IP6_ECN_clear(struct ipv6hdr *iph) |
| 137 | { |
Al Viro | 92d9ece | 2006-11-08 00:24:47 -0800 | [diff] [blame] | 138 | *(__be32*)iph &= ~htonl(INET_ECN_MASK << 20); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | } |
| 140 | |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 141 | static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | { |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 143 | dscp &= ~INET_ECN_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | ipv6_change_dsfield(inner, INET_ECN_MASK, dscp); |
| 145 | } |
| 146 | |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 147 | static inline int INET_ECN_set_ce(struct sk_buff *skb) |
| 148 | { |
| 149 | switch (skb->protocol) { |
Harvey Harrison | f3a7c66 | 2009-02-14 22:58:35 -0800 | [diff] [blame] | 150 | case cpu_to_be16(ETH_P_IP): |
Simon Horman | ced14f6 | 2013-05-28 20:34:25 +0000 | [diff] [blame] | 151 | if (skb_network_header(skb) + sizeof(struct iphdr) <= |
| 152 | skb_tail_pointer(skb)) |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 153 | return IP_ECN_set_ce(ip_hdr(skb)); |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 154 | break; |
| 155 | |
Harvey Harrison | f3a7c66 | 2009-02-14 22:58:35 -0800 | [diff] [blame] | 156 | case cpu_to_be16(ETH_P_IPV6): |
Simon Horman | ced14f6 | 2013-05-28 20:34:25 +0000 | [diff] [blame] | 157 | if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= |
| 158 | skb_tail_pointer(skb)) |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 159 | return IP6_ECN_set_ce(skb, ipv6_hdr(skb)); |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 160 | break; |
| 161 | } |
| 162 | |
| 163 | return 0; |
| 164 | } |
| 165 | |
stephen hemminger | eccc1bb | 2012-09-25 11:02:48 +0000 | [diff] [blame] | 166 | /* |
Neal Cardwell | d28071d | 2014-05-04 20:55:39 -0400 | [diff] [blame] | 167 | * RFC 6040 4.2 |
stephen hemminger | eccc1bb | 2012-09-25 11:02:48 +0000 | [diff] [blame] | 168 | * To decapsulate the inner header at the tunnel egress, a compliant |
| 169 | * tunnel egress MUST set the outgoing ECN field to the codepoint at the |
| 170 | * intersection of the appropriate arriving inner header (row) and outer |
| 171 | * header (column) in Figure 4 |
| 172 | * |
| 173 | * +---------+------------------------------------------------+ |
| 174 | * |Arriving | Arriving Outer Header | |
| 175 | * | Inner +---------+------------+------------+------------+ |
| 176 | * | Header | Not-ECT | ECT(0) | ECT(1) | CE | |
| 177 | * +---------+---------+------------+------------+------------+ |
| 178 | * | Not-ECT | Not-ECT |Not-ECT(!!!)|Not-ECT(!!!)| <drop>(!!!)| |
| 179 | * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE | |
| 180 | * | ECT(1) | ECT(1) | ECT(1) (!) | ECT(1) | CE | |
| 181 | * | CE | CE | CE | CE(!!!)| CE | |
| 182 | * +---------+---------+------------+------------+------------+ |
| 183 | * |
| 184 | * Figure 4: New IP in IP Decapsulation Behaviour |
| 185 | * |
| 186 | * returns 0 on success |
| 187 | * 1 if something is broken and should be logged (!!! above) |
| 188 | * 2 if packet should be dropped |
| 189 | */ |
| 190 | static inline int INET_ECN_decapsulate(struct sk_buff *skb, |
| 191 | __u8 outer, __u8 inner) |
| 192 | { |
| 193 | if (INET_ECN_is_not_ect(inner)) { |
| 194 | switch (outer & INET_ECN_MASK) { |
| 195 | case INET_ECN_NOT_ECT: |
| 196 | return 0; |
| 197 | case INET_ECN_ECT_0: |
| 198 | case INET_ECN_ECT_1: |
| 199 | return 1; |
| 200 | case INET_ECN_CE: |
| 201 | return 2; |
| 202 | } |
| 203 | } |
| 204 | |
| 205 | if (INET_ECN_is_ce(outer)) |
| 206 | INET_ECN_set_ce(skb); |
| 207 | |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | static inline int IP_ECN_decapsulate(const struct iphdr *oiph, |
| 212 | struct sk_buff *skb) |
| 213 | { |
| 214 | __u8 inner; |
| 215 | |
| 216 | if (skb->protocol == htons(ETH_P_IP)) |
| 217 | inner = ip_hdr(skb)->tos; |
| 218 | else if (skb->protocol == htons(ETH_P_IPV6)) |
| 219 | inner = ipv6_get_dsfield(ipv6_hdr(skb)); |
| 220 | else |
| 221 | return 0; |
| 222 | |
| 223 | return INET_ECN_decapsulate(skb, oiph->tos, inner); |
| 224 | } |
| 225 | |
| 226 | static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h, |
| 227 | struct sk_buff *skb) |
| 228 | { |
| 229 | __u8 inner; |
| 230 | |
| 231 | if (skb->protocol == htons(ETH_P_IP)) |
| 232 | inner = ip_hdr(skb)->tos; |
| 233 | else if (skb->protocol == htons(ETH_P_IPV6)) |
| 234 | inner = ipv6_get_dsfield(ipv6_hdr(skb)); |
| 235 | else |
| 236 | return 0; |
| 237 | |
| 238 | return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner); |
| 239 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | #endif |