Vlad Yasevich | 3c73a03 | 2012-11-15 08:49:20 +0000 | [diff] [blame] | 1 | /* |
| 2 | * IPv6 library code, needed by static components when full IPv6 support is |
| 3 | * not configured or static. These functions are needed by GSO/GRO implementation. |
| 4 | */ |
| 5 | #include <linux/export.h> |
Ben Hutchings | 5188cd4 | 2014-10-30 18:27:17 +0000 | [diff] [blame] | 6 | #include <net/ip.h> |
Vlad Yasevich | 3c73a03 | 2012-11-15 08:49:20 +0000 | [diff] [blame] | 7 | #include <net/ipv6.h> |
| 8 | #include <net/ip6_fib.h> |
Cong Wang | 3ce9b35 | 2013-08-31 13:44:28 +0800 | [diff] [blame] | 9 | #include <net/addrconf.h> |
Hannes Frederic Sowa | 6dfac5c | 2014-03-30 18:28:03 +0200 | [diff] [blame] | 10 | #include <net/secure_seq.h> |
Pablo Neira Ayuso | a263653 | 2015-06-17 10:28:27 -0500 | [diff] [blame] | 11 | #include <linux/netfilter.h> |
Vlad Yasevich | 3c73a03 | 2012-11-15 08:49:20 +0000 | [diff] [blame] | 12 | |
Eric Dumazet | b97a2f3 | 2019-08-17 00:01:27 +0100 | [diff] [blame] | 13 | static u32 __ipv6_select_ident(struct net *net, |
Martin KaFai Lau | fd0273d | 2015-05-22 20:55:57 -0700 | [diff] [blame] | 14 | const struct in6_addr *dst, |
| 15 | const struct in6_addr *src) |
Vlad Yasevich | 0508c07 | 2015-02-03 16:36:15 -0500 | [diff] [blame] | 16 | { |
Eric Dumazet | b97a2f3 | 2019-08-17 00:01:27 +0100 | [diff] [blame] | 17 | const struct { |
| 18 | struct in6_addr dst; |
| 19 | struct in6_addr src; |
| 20 | } __aligned(SIPHASH_ALIGNMENT) combined = { |
| 21 | .dst = *dst, |
| 22 | .src = *src, |
| 23 | }; |
Vlad Yasevich | 0508c07 | 2015-02-03 16:36:15 -0500 | [diff] [blame] | 24 | u32 hash, id; |
| 25 | |
Eric Dumazet | b97a2f3 | 2019-08-17 00:01:27 +0100 | [diff] [blame] | 26 | /* Note the following code is not safe, but this is okay. */ |
| 27 | if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key))) |
| 28 | get_random_bytes(&net->ipv4.ip_id_key, |
| 29 | sizeof(net->ipv4.ip_id_key)); |
| 30 | |
| 31 | hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key); |
Vlad Yasevich | 0508c07 | 2015-02-03 16:36:15 -0500 | [diff] [blame] | 32 | |
| 33 | /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve, |
| 34 | * set the hight order instead thus minimizing possible future |
| 35 | * collisions. |
| 36 | */ |
| 37 | id = ip_idents_reserve(hash, 1); |
| 38 | if (unlikely(!id)) |
| 39 | id = 1 << 31; |
| 40 | |
| 41 | return id; |
| 42 | } |
| 43 | |
Ben Hutchings | 5188cd4 | 2014-10-30 18:27:17 +0000 | [diff] [blame] | 44 | /* This function exists only for tap drivers that must support broken |
| 45 | * clients requesting UFO without specifying an IPv6 fragment ID. |
| 46 | * |
| 47 | * This is similar to ipv6_select_ident() but we use an independent hash |
| 48 | * seed to limit information leakage. |
| 49 | * |
| 50 | * The network header must be set before calling this. |
| 51 | */ |
Hannes Frederic Sowa | 5a352dd | 2015-03-25 17:07:45 +0100 | [diff] [blame] | 52 | void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) |
Ben Hutchings | 5188cd4 | 2014-10-30 18:27:17 +0000 | [diff] [blame] | 53 | { |
Ben Hutchings | 5188cd4 | 2014-10-30 18:27:17 +0000 | [diff] [blame] | 54 | struct in6_addr buf[2]; |
| 55 | struct in6_addr *addrs; |
Vlad Yasevich | 0508c07 | 2015-02-03 16:36:15 -0500 | [diff] [blame] | 56 | u32 id; |
Ben Hutchings | 5188cd4 | 2014-10-30 18:27:17 +0000 | [diff] [blame] | 57 | |
| 58 | addrs = skb_header_pointer(skb, |
| 59 | skb_network_offset(skb) + |
| 60 | offsetof(struct ipv6hdr, saddr), |
| 61 | sizeof(buf), buf); |
| 62 | if (!addrs) |
| 63 | return; |
| 64 | |
Eric Dumazet | b97a2f3 | 2019-08-17 00:01:27 +0100 | [diff] [blame] | 65 | id = __ipv6_select_ident(net, &addrs[1], &addrs[0]); |
Vlad Yasevich | 51f3077 | 2015-02-09 09:38:20 -0500 | [diff] [blame] | 66 | skb_shinfo(skb)->ip6_frag_id = htonl(id); |
Ben Hutchings | 5188cd4 | 2014-10-30 18:27:17 +0000 | [diff] [blame] | 67 | } |
| 68 | EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); |
| 69 | |
Eric Dumazet | 7f15986 | 2015-05-25 16:02:21 -0700 | [diff] [blame] | 70 | __be32 ipv6_select_ident(struct net *net, |
| 71 | const struct in6_addr *daddr, |
| 72 | const struct in6_addr *saddr) |
Vlad Yasevich | 0508c07 | 2015-02-03 16:36:15 -0500 | [diff] [blame] | 73 | { |
Vlad Yasevich | 0508c07 | 2015-02-03 16:36:15 -0500 | [diff] [blame] | 74 | u32 id; |
| 75 | |
Eric Dumazet | b97a2f3 | 2019-08-17 00:01:27 +0100 | [diff] [blame] | 76 | id = __ipv6_select_ident(net, daddr, saddr); |
Martin KaFai Lau | 286c234 | 2015-05-22 20:55:56 -0700 | [diff] [blame] | 77 | return htonl(id); |
Vlad Yasevich | 0508c07 | 2015-02-03 16:36:15 -0500 | [diff] [blame] | 78 | } |
| 79 | EXPORT_SYMBOL(ipv6_select_ident); |
| 80 | |
Vlad Yasevich | 3c73a03 | 2012-11-15 08:49:20 +0000 | [diff] [blame] | 81 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) |
| 82 | { |
Sabrina Dubroca | 4a2ffe1 | 2017-07-19 22:28:55 +0200 | [diff] [blame] | 83 | unsigned int offset = sizeof(struct ipv6hdr); |
Simon Horman | 29a3cad | 2013-05-28 20:34:26 +0000 | [diff] [blame] | 84 | unsigned int packet_len = skb_tail_pointer(skb) - |
| 85 | skb_network_header(skb); |
Vlad Yasevich | 3c73a03 | 2012-11-15 08:49:20 +0000 | [diff] [blame] | 86 | int found_rhdr = 0; |
| 87 | *nexthdr = &ipv6_hdr(skb)->nexthdr; |
| 88 | |
Craig Gallek | a2c845e | 2017-05-16 14:36:23 -0400 | [diff] [blame] | 89 | while (offset <= packet_len) { |
| 90 | struct ipv6_opt_hdr *exthdr; |
Vlad Yasevich | 3c73a03 | 2012-11-15 08:49:20 +0000 | [diff] [blame] | 91 | |
| 92 | switch (**nexthdr) { |
| 93 | |
| 94 | case NEXTHDR_HOP: |
| 95 | break; |
| 96 | case NEXTHDR_ROUTING: |
| 97 | found_rhdr = 1; |
| 98 | break; |
| 99 | case NEXTHDR_DEST: |
| 100 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
| 101 | if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) |
| 102 | break; |
| 103 | #endif |
| 104 | if (found_rhdr) |
| 105 | return offset; |
| 106 | break; |
Ian Morris | 67ba415 | 2014-08-24 21:53:10 +0100 | [diff] [blame] | 107 | default: |
Vlad Yasevich | 3c73a03 | 2012-11-15 08:49:20 +0000 | [diff] [blame] | 108 | return offset; |
| 109 | } |
| 110 | |
Craig Gallek | a2c845e | 2017-05-16 14:36:23 -0400 | [diff] [blame] | 111 | if (offset + sizeof(struct ipv6_opt_hdr) > packet_len) |
| 112 | return -EINVAL; |
| 113 | |
Vlad Yasevich | 3c73a03 | 2012-11-15 08:49:20 +0000 | [diff] [blame] | 114 | exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + |
| 115 | offset); |
Stefano Brivio | dccb31b | 2017-08-18 14:40:53 +0200 | [diff] [blame] | 116 | offset += ipv6_optlen(exthdr); |
| 117 | if (offset > IPV6_MAXPLEN) |
Sabrina Dubroca | 4a2ffe1 | 2017-07-19 22:28:55 +0200 | [diff] [blame] | 118 | return -EINVAL; |
Craig Gallek | a2c845e | 2017-05-16 14:36:23 -0400 | [diff] [blame] | 119 | *nexthdr = &exthdr->nexthdr; |
Vlad Yasevich | 3c73a03 | 2012-11-15 08:49:20 +0000 | [diff] [blame] | 120 | } |
| 121 | |
Craig Gallek | a2c845e | 2017-05-16 14:36:23 -0400 | [diff] [blame] | 122 | return -EINVAL; |
Vlad Yasevich | 3c73a03 | 2012-11-15 08:49:20 +0000 | [diff] [blame] | 123 | } |
| 124 | EXPORT_SYMBOL(ip6_find_1stfragopt); |
Cong Wang | 3ce9b35 | 2013-08-31 13:44:28 +0800 | [diff] [blame] | 125 | |
| 126 | #if IS_ENABLED(CONFIG_IPV6) |
| 127 | int ip6_dst_hoplimit(struct dst_entry *dst) |
| 128 | { |
| 129 | int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); |
| 130 | if (hoplimit == 0) { |
| 131 | struct net_device *dev = dst->dev; |
| 132 | struct inet6_dev *idev; |
| 133 | |
| 134 | rcu_read_lock(); |
| 135 | idev = __in6_dev_get(dev); |
| 136 | if (idev) |
| 137 | hoplimit = idev->cnf.hop_limit; |
| 138 | else |
| 139 | hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit; |
| 140 | rcu_read_unlock(); |
| 141 | } |
| 142 | return hoplimit; |
| 143 | } |
| 144 | EXPORT_SYMBOL(ip6_dst_hoplimit); |
| 145 | #endif |
Cong Wang | 788787b | 2013-08-31 13:44:29 +0800 | [diff] [blame] | 146 | |
Eric W. Biederman | cf91a99 | 2015-10-07 16:48:45 -0500 | [diff] [blame] | 147 | int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) |
Cong Wang | 788787b | 2013-08-31 13:44:29 +0800 | [diff] [blame] | 148 | { |
| 149 | int len; |
| 150 | |
| 151 | len = skb->len - sizeof(struct ipv6hdr); |
| 152 | if (len > IPV6_MAXPLEN) |
| 153 | len = 0; |
| 154 | ipv6_hdr(skb)->payload_len = htons(len); |
huizhang | f6c20c5 | 2014-06-09 12:37:25 +0800 | [diff] [blame] | 155 | IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); |
Cong Wang | 788787b | 2013-08-31 13:44:29 +0800 | [diff] [blame] | 156 | |
David Ahern | a8e3e1a | 2016-09-10 12:09:53 -0700 | [diff] [blame] | 157 | /* if egress device is enslaved to an L3 master device pass the |
| 158 | * skb to its handler for processing |
| 159 | */ |
| 160 | skb = l3mdev_ip6_out(sk, skb); |
| 161 | if (unlikely(!skb)) |
| 162 | return 0; |
| 163 | |
Eli Cooper | b4e479a | 2016-12-01 10:05:11 +0800 | [diff] [blame] | 164 | skb->protocol = htons(ETH_P_IPV6); |
| 165 | |
Eric W. Biederman | 29a26a5 | 2015-09-15 20:04:16 -0500 | [diff] [blame] | 166 | return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, |
| 167 | net, sk, skb, NULL, skb_dst(skb)->dev, |
Eric W. Biederman | 13206b6 | 2015-10-07 16:48:35 -0500 | [diff] [blame] | 168 | dst_output); |
Cong Wang | 788787b | 2013-08-31 13:44:29 +0800 | [diff] [blame] | 169 | } |
| 170 | EXPORT_SYMBOL_GPL(__ip6_local_out); |
| 171 | |
Eric W. Biederman | 33224b1 | 2015-10-07 16:48:46 -0500 | [diff] [blame] | 172 | int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) |
Cong Wang | 788787b | 2013-08-31 13:44:29 +0800 | [diff] [blame] | 173 | { |
| 174 | int err; |
| 175 | |
Eric W. Biederman | cf91a99 | 2015-10-07 16:48:45 -0500 | [diff] [blame] | 176 | err = __ip6_local_out(net, sk, skb); |
Cong Wang | 788787b | 2013-08-31 13:44:29 +0800 | [diff] [blame] | 177 | if (likely(err == 1)) |
Eric W. Biederman | 13206b6 | 2015-10-07 16:48:35 -0500 | [diff] [blame] | 178 | err = dst_output(net, sk, skb); |
Cong Wang | 788787b | 2013-08-31 13:44:29 +0800 | [diff] [blame] | 179 | |
| 180 | return err; |
| 181 | } |
| 182 | EXPORT_SYMBOL_GPL(ip6_local_out); |