Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _INET_ECN_H_ |
| 2 | #define _INET_ECN_H_ |
| 3 | |
| 4 | #include <linux/ip.h> |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 5 | #include <linux/skbuff.h> |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 6 | |
| 7 | #include <net/inet_sock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <net/dsfield.h> |
| 9 | |
| 10 | enum { |
| 11 | INET_ECN_NOT_ECT = 0, |
| 12 | INET_ECN_ECT_1 = 1, |
| 13 | INET_ECN_ECT_0 = 2, |
| 14 | INET_ECN_CE = 3, |
| 15 | INET_ECN_MASK = 3, |
| 16 | }; |
| 17 | |
stephen hemminger | eccc1bb | 2012-09-25 11:02:48 +0000 | [diff] [blame] | 18 | extern int sysctl_tunnel_ecn_log; |
| 19 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | static inline int INET_ECN_is_ce(__u8 dsfield) |
| 21 | { |
| 22 | return (dsfield & INET_ECN_MASK) == INET_ECN_CE; |
| 23 | } |
| 24 | |
| 25 | static inline int INET_ECN_is_not_ect(__u8 dsfield) |
| 26 | { |
| 27 | return (dsfield & INET_ECN_MASK) == INET_ECN_NOT_ECT; |
| 28 | } |
| 29 | |
| 30 | static inline int INET_ECN_is_capable(__u8 dsfield) |
| 31 | { |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 32 | return dsfield & INET_ECN_ECT_0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | } |
| 34 | |
Eric Dumazet | b5d9c9c | 2011-10-22 01:25:23 -0400 | [diff] [blame] | 35 | /* |
| 36 | * RFC 3168 9.1.1 |
| 37 | * The full-functionality option for ECN encapsulation is to copy the |
| 38 | * ECN codepoint of the inside header to the outside header on |
| 39 | * encapsulation if the inside header is not-ECT or ECT, and to set the |
| 40 | * ECN codepoint of the outside header to ECT(0) if the ECN codepoint of |
| 41 | * the inside header is CE. |
| 42 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) |
| 44 | { |
| 45 | outer &= ~INET_ECN_MASK; |
| 46 | outer |= !INET_ECN_is_ce(inner) ? (inner & INET_ECN_MASK) : |
| 47 | INET_ECN_ECT_0; |
| 48 | return outer; |
| 49 | } |
| 50 | |
Steinar H. Gunderson | ca06707 | 2011-05-06 23:44:46 +0000 | [diff] [blame] | 51 | static inline void INET_ECN_xmit(struct sock *sk) |
| 52 | { |
| 53 | inet_sk(sk)->tos |= INET_ECN_ECT_0; |
| 54 | if (inet6_sk(sk) != NULL) |
| 55 | inet6_sk(sk)->tclass |= INET_ECN_ECT_0; |
| 56 | } |
| 57 | |
| 58 | static inline void INET_ECN_dontxmit(struct sock *sk) |
| 59 | { |
| 60 | inet_sk(sk)->tos &= ~INET_ECN_MASK; |
| 61 | if (inet6_sk(sk) != NULL) |
| 62 | inet6_sk(sk)->tclass &= ~INET_ECN_MASK; |
| 63 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
| 65 | #define IP6_ECN_flow_init(label) do { \ |
| 66 | (label) &= ~htonl(INET_ECN_MASK << 20); \ |
| 67 | } while (0) |
| 68 | |
| 69 | #define IP6_ECN_flow_xmit(sk, label) do { \ |
YOSHIFUJI Hideaki | e9df2e8 | 2008-04-13 23:40:51 -0700 | [diff] [blame] | 70 | if (INET_ECN_is_capable(inet6_sk(sk)->tclass)) \ |
Al Viro | 95026cd | 2006-11-03 00:55:35 -0800 | [diff] [blame] | 71 | (label) |= htonl(INET_ECN_ECT_0 << 20); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | } while (0) |
| 73 | |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 74 | static inline int IP_ECN_set_ce(struct iphdr *iph) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | { |
Al Viro | 5c78f27 | 2006-11-14 21:42:26 -0800 | [diff] [blame] | 76 | u32 check = (__force u32)iph->check; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | u32 ecn = (iph->tos + 1) & INET_ECN_MASK; |
| 78 | |
| 79 | /* |
| 80 | * After the last operation we have (in binary): |
| 81 | * INET_ECN_NOT_ECT => 01 |
| 82 | * INET_ECN_ECT_1 => 10 |
| 83 | * INET_ECN_ECT_0 => 11 |
| 84 | * INET_ECN_CE => 00 |
| 85 | */ |
| 86 | if (!(ecn & 2)) |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 87 | return !ecn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | |
| 89 | /* |
| 90 | * The following gives us: |
| 91 | * INET_ECN_ECT_1 => check += htons(0xFFFD) |
| 92 | * INET_ECN_ECT_0 => check += htons(0xFFFE) |
| 93 | */ |
Al Viro | 5c78f27 | 2006-11-14 21:42:26 -0800 | [diff] [blame] | 94 | check += (__force u16)htons(0xFFFB) + (__force u16)htons(ecn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
Al Viro | 5c78f27 | 2006-11-14 21:42:26 -0800 | [diff] [blame] | 96 | iph->check = (__force __sum16)(check + (check>=0xFFFF)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | iph->tos |= INET_ECN_CE; |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 98 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | static inline void IP_ECN_clear(struct iphdr *iph) |
| 102 | { |
| 103 | iph->tos &= ~INET_ECN_MASK; |
| 104 | } |
| 105 | |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 106 | static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | { |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 108 | dscp &= ~INET_ECN_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | ipv4_change_dsfield(inner, INET_ECN_MASK, dscp); |
| 110 | } |
| 111 | |
| 112 | struct ipv6hdr; |
| 113 | |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 114 | /* Note: |
| 115 | * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE, |
| 116 | * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE |
| 117 | * In IPv6 case, no checksum compensates the change in IPv6 header, |
| 118 | * so we have to update skb->csum. |
| 119 | */ |
| 120 | static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | { |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 122 | __be32 from, to; |
| 123 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph))) |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 125 | return 0; |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 126 | |
| 127 | from = *(__be32 *)iph; |
| 128 | to = from | htonl(INET_ECN_CE << 20); |
| 129 | *(__be32 *)iph = to; |
| 130 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
| 131 | skb->csum = csum_add(csum_sub(skb->csum, from), to); |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 132 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | static inline void IP6_ECN_clear(struct ipv6hdr *iph) |
| 136 | { |
Al Viro | 92d9ece | 2006-11-08 00:24:47 -0800 | [diff] [blame] | 137 | *(__be32*)iph &= ~htonl(INET_ECN_MASK << 20); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | } |
| 139 | |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 140 | static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | { |
Herbert Xu | 29bb43b4 | 2007-11-13 21:40:13 -0800 | [diff] [blame] | 142 | dscp &= ~INET_ECN_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | ipv6_change_dsfield(inner, INET_ECN_MASK, dscp); |
| 144 | } |
| 145 | |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 146 | static inline int INET_ECN_set_ce(struct sk_buff *skb) |
| 147 | { |
| 148 | switch (skb->protocol) { |
Harvey Harrison | f3a7c66 | 2009-02-14 22:58:35 -0800 | [diff] [blame] | 149 | case cpu_to_be16(ETH_P_IP): |
Simon Horman | ced14f6 | 2013-05-28 20:34:25 +0000 | [diff] [blame] | 150 | if (skb_network_header(skb) + sizeof(struct iphdr) <= |
| 151 | skb_tail_pointer(skb)) |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 152 | return IP_ECN_set_ce(ip_hdr(skb)); |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 153 | break; |
| 154 | |
Harvey Harrison | f3a7c66 | 2009-02-14 22:58:35 -0800 | [diff] [blame] | 155 | case cpu_to_be16(ETH_P_IPV6): |
Simon Horman | ced14f6 | 2013-05-28 20:34:25 +0000 | [diff] [blame] | 156 | if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= |
| 157 | skb_tail_pointer(skb)) |
Eric Dumazet | 34ae6a1 | 2016-01-15 04:56:56 -0800 | [diff] [blame] | 158 | return IP6_ECN_set_ce(skb, ipv6_hdr(skb)); |
Thomas Graf | 2566a50 | 2005-11-05 21:14:04 +0100 | [diff] [blame] | 159 | break; |
| 160 | } |
| 161 | |
| 162 | return 0; |
| 163 | } |
| 164 | |
stephen hemminger | eccc1bb | 2012-09-25 11:02:48 +0000 | [diff] [blame] | 165 | /* |
Neal Cardwell | d28071d | 2014-05-04 20:55:39 -0400 | [diff] [blame] | 166 | * RFC 6040 4.2 |
stephen hemminger | eccc1bb | 2012-09-25 11:02:48 +0000 | [diff] [blame] | 167 | * To decapsulate the inner header at the tunnel egress, a compliant |
| 168 | * tunnel egress MUST set the outgoing ECN field to the codepoint at the |
| 169 | * intersection of the appropriate arriving inner header (row) and outer |
| 170 | * header (column) in Figure 4 |
| 171 | * |
| 172 | * +---------+------------------------------------------------+ |
| 173 | * |Arriving | Arriving Outer Header | |
| 174 | * | Inner +---------+------------+------------+------------+ |
| 175 | * | Header | Not-ECT | ECT(0) | ECT(1) | CE | |
| 176 | * +---------+---------+------------+------------+------------+ |
| 177 | * | Not-ECT | Not-ECT |Not-ECT(!!!)|Not-ECT(!!!)| <drop>(!!!)| |
| 178 | * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE | |
| 179 | * | ECT(1) | ECT(1) | ECT(1) (!) | ECT(1) | CE | |
| 180 | * | CE | CE | CE | CE(!!!)| CE | |
| 181 | * +---------+---------+------------+------------+------------+ |
| 182 | * |
| 183 | * Figure 4: New IP in IP Decapsulation Behaviour |
| 184 | * |
| 185 | * returns 0 on success |
| 186 | * 1 if something is broken and should be logged (!!! above) |
| 187 | * 2 if packet should be dropped |
| 188 | */ |
| 189 | static inline int INET_ECN_decapsulate(struct sk_buff *skb, |
| 190 | __u8 outer, __u8 inner) |
| 191 | { |
| 192 | if (INET_ECN_is_not_ect(inner)) { |
| 193 | switch (outer & INET_ECN_MASK) { |
| 194 | case INET_ECN_NOT_ECT: |
| 195 | return 0; |
| 196 | case INET_ECN_ECT_0: |
| 197 | case INET_ECN_ECT_1: |
| 198 | return 1; |
| 199 | case INET_ECN_CE: |
| 200 | return 2; |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | if (INET_ECN_is_ce(outer)) |
| 205 | INET_ECN_set_ce(skb); |
| 206 | |
| 207 | return 0; |
| 208 | } |
| 209 | |
| 210 | static inline int IP_ECN_decapsulate(const struct iphdr *oiph, |
| 211 | struct sk_buff *skb) |
| 212 | { |
| 213 | __u8 inner; |
| 214 | |
| 215 | if (skb->protocol == htons(ETH_P_IP)) |
| 216 | inner = ip_hdr(skb)->tos; |
| 217 | else if (skb->protocol == htons(ETH_P_IPV6)) |
| 218 | inner = ipv6_get_dsfield(ipv6_hdr(skb)); |
| 219 | else |
| 220 | return 0; |
| 221 | |
| 222 | return INET_ECN_decapsulate(skb, oiph->tos, inner); |
| 223 | } |
| 224 | |
| 225 | static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h, |
| 226 | struct sk_buff *skb) |
| 227 | { |
| 228 | __u8 inner; |
| 229 | |
| 230 | if (skb->protocol == htons(ETH_P_IP)) |
| 231 | inner = ip_hdr(skb)->tos; |
| 232 | else if (skb->protocol == htons(ETH_P_IPV6)) |
| 233 | inner = ipv6_get_dsfield(ipv6_hdr(skb)); |
| 234 | else |
| 235 | return 0; |
| 236 | |
| 237 | return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner); |
| 238 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | #endif |