Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Linux INET6 implementation |
| 3 | * |
| 4 | * Authors: |
| 5 | * Pedro Roque <roque@di.fc.ul.pt> |
| 6 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License |
| 9 | * as published by the Free Software Foundation; either version |
| 10 | * 2 of the License, or (at your option) any later version. |
| 11 | */ |
| 12 | |
| 13 | #ifndef _NET_IPV6_H |
| 14 | #define _NET_IPV6_H |
| 15 | |
| 16 | #include <linux/ipv6.h> |
| 17 | #include <linux/hardirq.h> |
Eric Dumazet | 08dcdbf | 2013-02-21 12:18:52 +0000 | [diff] [blame] | 18 | #include <linux/jhash.h> |
Herbert Xu | 20283d8 | 2007-07-30 17:05:49 -0700 | [diff] [blame] | 19 | #include <net/if_inet6.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <net/ndisc.h> |
| 21 | #include <net/flow.h> |
Jiri Pirko | 1bd758e | 2015-05-12 14:56:07 +0200 | [diff] [blame] | 22 | #include <net/flow_dissector.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <net/snmp.h> |
| 24 | |
| 25 | #define SIN6_LEN_RFC2133 24 |
| 26 | |
| 27 | #define IPV6_MAXPLEN 65535 |
| 28 | |
| 29 | /* |
| 30 | * NextHeader field of IPv6 header |
| 31 | */ |
| 32 | |
| 33 | #define NEXTHDR_HOP 0 /* Hop-by-hop option header. */ |
| 34 | #define NEXTHDR_TCP 6 /* TCP segment. */ |
| 35 | #define NEXTHDR_UDP 17 /* UDP message. */ |
| 36 | #define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */ |
| 37 | #define NEXTHDR_ROUTING 43 /* Routing header. */ |
| 38 | #define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */ |
xeb@mail.ru | c12b395 | 2012-08-10 00:51:50 +0000 | [diff] [blame] | 39 | #define NEXTHDR_GRE 47 /* GRE header. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #define NEXTHDR_ESP 50 /* Encapsulating security payload. */ |
| 41 | #define NEXTHDR_AUTH 51 /* Authentication header. */ |
| 42 | #define NEXTHDR_ICMP 58 /* ICMP for IPv6. */ |
| 43 | #define NEXTHDR_NONE 59 /* No next header */ |
| 44 | #define NEXTHDR_DEST 60 /* Destination options header. */ |
Joe Stringer | 280c571 | 2013-07-23 13:37:45 +0900 | [diff] [blame] | 45 | #define NEXTHDR_SCTP 132 /* SCTP message. */ |
Masahide NAKAMURA | 2b74165 | 2006-08-23 20:34:26 -0700 | [diff] [blame] | 46 | #define NEXTHDR_MOBILITY 135 /* Mobility header. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
| 48 | #define NEXTHDR_MAX 255 |
| 49 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | #define IPV6_DEFAULT_HOPLIMIT 64 |
| 51 | #define IPV6_DEFAULT_MCASTHOPS 1 |
| 52 | |
| 53 | /* |
| 54 | * Addr type |
| 55 | * |
| 56 | * type - unicast | multicast |
| 57 | * scope - local | site | global |
| 58 | * v4 - compat |
| 59 | * v4mapped |
| 60 | * any |
| 61 | * loopback |
| 62 | */ |
| 63 | |
| 64 | #define IPV6_ADDR_ANY 0x0000U |
| 65 | |
| 66 | #define IPV6_ADDR_UNICAST 0x0001U |
| 67 | #define IPV6_ADDR_MULTICAST 0x0002U |
| 68 | |
| 69 | #define IPV6_ADDR_LOOPBACK 0x0010U |
| 70 | #define IPV6_ADDR_LINKLOCAL 0x0020U |
| 71 | #define IPV6_ADDR_SITELOCAL 0x0040U |
| 72 | |
| 73 | #define IPV6_ADDR_COMPATv4 0x0080U |
| 74 | |
| 75 | #define IPV6_ADDR_SCOPE_MASK 0x00f0U |
| 76 | |
| 77 | #define IPV6_ADDR_MAPPED 0x1000U |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | |
| 79 | /* |
| 80 | * Addr scopes |
| 81 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | #define IPV6_ADDR_MC_SCOPE(a) \ |
| 83 | ((a)->s6_addr[1] & 0x0f) /* nonstandard */ |
| 84 | #define __IPV6_ADDR_SCOPE_INVALID -1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | #define IPV6_ADDR_SCOPE_NODELOCAL 0x01 |
| 86 | #define IPV6_ADDR_SCOPE_LINKLOCAL 0x02 |
| 87 | #define IPV6_ADDR_SCOPE_SITELOCAL 0x05 |
| 88 | #define IPV6_ADDR_SCOPE_ORGLOCAL 0x08 |
| 89 | #define IPV6_ADDR_SCOPE_GLOBAL 0x0e |
| 90 | |
| 91 | /* |
Linus Lüssing | 5ced133 | 2011-02-15 13:19:20 +0000 | [diff] [blame] | 92 | * Addr flags |
| 93 | */ |
Linus Lüssing | 5ced133 | 2011-02-15 13:19:20 +0000 | [diff] [blame] | 94 | #define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \ |
| 95 | ((a)->s6_addr[1] & 0x10) |
| 96 | #define IPV6_ADDR_MC_FLAG_PREFIX(a) \ |
| 97 | ((a)->s6_addr[1] & 0x20) |
| 98 | #define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \ |
| 99 | ((a)->s6_addr[1] & 0x40) |
Linus Lüssing | 5ced133 | 2011-02-15 13:19:20 +0000 | [diff] [blame] | 100 | |
| 101 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | * fragmentation header |
| 103 | */ |
| 104 | |
| 105 | struct frag_hdr { |
Al Viro | 44473a6 | 2006-11-08 00:21:46 -0800 | [diff] [blame] | 106 | __u8 nexthdr; |
| 107 | __u8 reserved; |
| 108 | __be16 frag_off; |
| 109 | __be32 identification; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | }; |
| 111 | |
Paul Durrant | 1431fb3 | 2013-12-03 17:39:29 +0000 | [diff] [blame] | 112 | #define IP6_MF 0x0001 |
| 113 | #define IP6_OFFSET 0xFFF8 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | |
Lorenzo Colitti | e110861 | 2014-05-13 10:17:33 -0700 | [diff] [blame] | 115 | #define IP6_REPLY_MARK(net, mark) \ |
| 116 | ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0) |
| 117 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | #include <net/sock.h> |
| 119 | |
| 120 | /* sysctls */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | extern int sysctl_mld_max_msf; |
Hannes Frederic Sowa | 2f71193 | 2014-09-02 15:49:25 +0200 | [diff] [blame] | 122 | extern int sysctl_mld_qrv; |
Pavel Emelyanov | 3d7cc2b | 2008-01-09 00:33:11 -0800 | [diff] [blame] | 123 | |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 124 | #define _DEVINC(net, statname, mod, idev, field) \ |
David L Stevens | 14878f7 | 2007-09-16 16:52:35 -0700 | [diff] [blame] | 125 | ({ \ |
| 126 | struct inet6_dev *_idev = (idev); \ |
| 127 | if (likely(_idev != NULL)) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 128 | mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\ |
| 129 | mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\ |
David L Stevens | 14878f7 | 2007-09-16 16:52:35 -0700 | [diff] [blame] | 130 | }) |
| 131 | |
Eric Dumazet | be281e5 | 2011-05-19 01:14:23 +0000 | [diff] [blame] | 132 | /* per device counters are atomic_long_t */ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 133 | #define _DEVINCATOMIC(net, statname, mod, idev, field) \ |
Eric Dumazet | be281e5 | 2011-05-19 01:14:23 +0000 | [diff] [blame] | 134 | ({ \ |
| 135 | struct inet6_dev *_idev = (idev); \ |
| 136 | if (likely(_idev != NULL)) \ |
| 137 | SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 138 | mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\ |
Eric Dumazet | be281e5 | 2011-05-19 01:14:23 +0000 | [diff] [blame] | 139 | }) |
| 140 | |
Eric Dumazet | 2a24444 | 2011-11-13 01:24:04 +0000 | [diff] [blame] | 141 | /* per device and per net counters are atomic_long_t */ |
| 142 | #define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \ |
| 143 | ({ \ |
| 144 | struct inet6_dev *_idev = (idev); \ |
| 145 | if (likely(_idev != NULL)) \ |
| 146 | SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ |
| 147 | SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\ |
| 148 | }) |
| 149 | |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 150 | #define _DEVADD(net, statname, mod, idev, field, val) \ |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 151 | ({ \ |
| 152 | struct inet6_dev *_idev = (idev); \ |
| 153 | if (likely(_idev != NULL)) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 154 | mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \ |
| 155 | mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\ |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 156 | }) |
| 157 | |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 158 | #define _DEVUPD(net, statname, mod, idev, field, val) \ |
Neil Horman | edf391f | 2009-04-27 02:45:02 -0700 | [diff] [blame] | 159 | ({ \ |
| 160 | struct inet6_dev *_idev = (idev); \ |
| 161 | if (likely(_idev != NULL)) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 162 | mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \ |
| 163 | mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\ |
Neil Horman | edf391f | 2009-04-27 02:45:02 -0700 | [diff] [blame] | 164 | }) |
| 165 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | /* MIBs */ |
David L Stevens | 14878f7 | 2007-09-16 16:52:35 -0700 | [diff] [blame] | 167 | |
Denis V. Lunev | 087fe24 | 2008-10-08 10:35:11 -0700 | [diff] [blame] | 168 | #define IP6_INC_STATS(net, idev,field) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 169 | _DEVINC(net, ipv6, , idev, field) |
Eric Dumazet | 1d01550 | 2016-04-27 16:44:40 -0700 | [diff] [blame] | 170 | #define __IP6_INC_STATS(net, idev,field) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 171 | _DEVINC(net, ipv6, __, idev, field) |
Neil Horman | edf391f | 2009-04-27 02:45:02 -0700 | [diff] [blame] | 172 | #define IP6_ADD_STATS(net, idev,field,val) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 173 | _DEVADD(net, ipv6, , idev, field, val) |
Eric Dumazet | 1d01550 | 2016-04-27 16:44:40 -0700 | [diff] [blame] | 174 | #define __IP6_ADD_STATS(net, idev,field,val) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 175 | _DEVADD(net, ipv6, __, idev, field, val) |
Neil Horman | edf391f | 2009-04-27 02:45:02 -0700 | [diff] [blame] | 176 | #define IP6_UPD_PO_STATS(net, idev,field,val) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 177 | _DEVUPD(net, ipv6, , idev, field, val) |
Eric Dumazet | c2005eb | 2016-04-27 16:44:41 -0700 | [diff] [blame] | 178 | #define __IP6_UPD_PO_STATS(net, idev,field,val) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 179 | _DEVUPD(net, ipv6, __, idev, field, val) |
Denis V. Lunev | 087fe24 | 2008-10-08 10:35:11 -0700 | [diff] [blame] | 180 | #define ICMP6_INC_STATS(net, idev, field) \ |
Eric Dumazet | be281e5 | 2011-05-19 01:14:23 +0000 | [diff] [blame] | 181 | _DEVINCATOMIC(net, icmpv6, , idev, field) |
Eric Dumazet | a16292a | 2016-04-27 16:44:36 -0700 | [diff] [blame] | 182 | #define __ICMP6_INC_STATS(net, idev, field) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 183 | _DEVINCATOMIC(net, icmpv6, __, idev, field) |
David L Stevens | 14878f7 | 2007-09-16 16:52:35 -0700 | [diff] [blame] | 184 | |
Denis V. Lunev | 087fe24 | 2008-10-08 10:35:11 -0700 | [diff] [blame] | 185 | #define ICMP6MSGOUT_INC_STATS(net, idev, field) \ |
Eric Dumazet | 2a24444 | 2011-11-13 01:24:04 +0000 | [diff] [blame] | 186 | _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) |
Eric Dumazet | f3832ed | 2016-04-27 16:44:42 -0700 | [diff] [blame] | 187 | #define ICMP6MSGIN_INC_STATS(net, idev, field) \ |
Eric Dumazet | 2a24444 | 2011-11-13 01:24:04 +0000 | [diff] [blame] | 188 | _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field) |
David L Stevens | 14878f7 | 2007-09-16 16:52:35 -0700 | [diff] [blame] | 189 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 190 | struct ip6_ra_chain { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | struct ip6_ra_chain *next; |
| 192 | struct sock *sk; |
| 193 | int sel; |
| 194 | void (*destructor)(struct sock *); |
| 195 | }; |
| 196 | |
| 197 | extern struct ip6_ra_chain *ip6_ra_chain; |
| 198 | extern rwlock_t ip6_ra_lock; |
| 199 | |
| 200 | /* |
| 201 | This structure is prepared by protocol, when parsing |
| 202 | ancillary data and passed to IPv6. |
| 203 | */ |
| 204 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 205 | struct ipv6_txoptions { |
Eric Dumazet | 45f6fad | 2015-11-29 19:37:57 -0800 | [diff] [blame] | 206 | atomic_t refcnt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | /* Length of this structure */ |
| 208 | int tot_len; |
| 209 | |
| 210 | /* length of extension headers */ |
| 211 | |
| 212 | __u16 opt_flen; /* after fragment hdr */ |
| 213 | __u16 opt_nflen; /* before fragment hdr */ |
| 214 | |
| 215 | struct ipv6_opt_hdr *hopopt; |
| 216 | struct ipv6_opt_hdr *dst0opt; |
| 217 | struct ipv6_rt_hdr *srcrt; /* Routing Header */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | struct ipv6_opt_hdr *dst1opt; |
Eric Dumazet | 45f6fad | 2015-11-29 19:37:57 -0800 | [diff] [blame] | 219 | struct rcu_head rcu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ |
| 221 | }; |
| 222 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 223 | struct ip6_flowlabel { |
Eric Dumazet | 7f0e44a | 2013-03-07 04:20:32 +0000 | [diff] [blame] | 224 | struct ip6_flowlabel __rcu *next; |
Al Viro | 90bcaf7 | 2006-11-08 00:25:17 -0800 | [diff] [blame] | 225 | __be32 label; |
Eric Dumazet | db3459d | 2007-05-03 17:39:04 -0700 | [diff] [blame] | 226 | atomic_t users; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | struct in6_addr dst; |
| 228 | struct ipv6_txoptions *opt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | unsigned long linger; |
YOSHIFUJI Hideaki / 吉藤英明 | d3aedd5 | 2013-01-30 09:27:47 +0000 | [diff] [blame] | 230 | struct rcu_head rcu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | u8 share; |
Eric W. Biederman | 4f82f45 | 2012-05-24 10:37:59 -0600 | [diff] [blame] | 232 | union { |
| 233 | struct pid *pid; |
| 234 | kuid_t uid; |
| 235 | } owner; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | unsigned long lastuse; |
| 237 | unsigned long expires; |
Benjamin Thery | 60e8fbc | 2008-03-26 16:53:08 -0700 | [diff] [blame] | 238 | struct net *fl_net; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | }; |
| 240 | |
Tom Herbert | 82a584b | 2015-04-29 15:33:21 -0700 | [diff] [blame] | 241 | #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) |
| 242 | #define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF) |
| 243 | #define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000) |
| 244 | |
Florent Fourcot | 37cfee9 | 2013-12-08 15:46:58 +0100 | [diff] [blame] | 245 | #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) |
Li RongQing | d76ed22 | 2014-01-15 17:03:30 +0800 | [diff] [blame] | 246 | #define IPV6_TCLASS_SHIFT 20 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 248 | struct ipv6_fl_socklist { |
Eric Dumazet | 7f0e44a | 2013-03-07 04:20:32 +0000 | [diff] [blame] | 249 | struct ipv6_fl_socklist __rcu *next; |
| 250 | struct ip6_flowlabel *fl; |
| 251 | struct rcu_head rcu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | }; |
| 253 | |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 254 | struct ipcm6_cookie { |
| 255 | __s16 hlimit; |
| 256 | __s16 tclass; |
| 257 | __s8 dontfrag; |
| 258 | struct ipv6_txoptions *opt; |
| 259 | }; |
| 260 | |
Eric Dumazet | 45f6fad | 2015-11-29 19:37:57 -0800 | [diff] [blame] | 261 | static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) |
| 262 | { |
| 263 | struct ipv6_txoptions *opt; |
| 264 | |
| 265 | rcu_read_lock(); |
| 266 | opt = rcu_dereference(np->opt); |
Benjamin Poirier | e550785c | 2016-02-17 16:20:33 -0800 | [diff] [blame] | 267 | if (opt) { |
| 268 | if (!atomic_inc_not_zero(&opt->refcnt)) |
| 269 | opt = NULL; |
| 270 | else |
| 271 | opt = rcu_pointer_handoff(opt); |
| 272 | } |
Eric Dumazet | 45f6fad | 2015-11-29 19:37:57 -0800 | [diff] [blame] | 273 | rcu_read_unlock(); |
| 274 | return opt; |
| 275 | } |
| 276 | |
| 277 | static inline void txopt_put(struct ipv6_txoptions *opt) |
| 278 | { |
| 279 | if (opt && atomic_dec_and_test(&opt->refcnt)) |
| 280 | kfree_rcu(opt, rcu); |
| 281 | } |
| 282 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 283 | struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); |
| 284 | struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, |
| 285 | struct ip6_flowlabel *fl, |
| 286 | struct ipv6_txoptions *fopt); |
| 287 | void fl6_free_socklist(struct sock *sk); |
| 288 | int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen); |
Florent Fourcot | 46e5f40 | 2014-01-17 17:15:04 +0100 | [diff] [blame] | 289 | int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, |
| 290 | int flags); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 291 | int ip6_flowlabel_init(void); |
| 292 | void ip6_flowlabel_cleanup(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | |
| 294 | static inline void fl6_sock_release(struct ip6_flowlabel *fl) |
| 295 | { |
| 296 | if (fl) |
| 297 | atomic_dec(&fl->users); |
| 298 | } |
| 299 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 300 | void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info); |
David S. Miller | b94f1c0 | 2012-07-12 00:33:37 -0700 | [diff] [blame] | 301 | |
Lorenzo Colitti | 6d0bfe2 | 2013-05-22 20:17:31 +0000 | [diff] [blame] | 302 | int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, |
| 303 | struct icmp6hdr *thdr, int len); |
| 304 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 305 | int ip6_ra_control(struct sock *sk, int sel); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 307 | int ipv6_parse_hopopts(struct sk_buff *skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 309 | struct ipv6_txoptions *ipv6_dup_options(struct sock *sk, |
| 310 | struct ipv6_txoptions *opt); |
| 311 | struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, |
| 312 | struct ipv6_txoptions *opt, |
| 313 | int newtype, |
| 314 | struct ipv6_opt_hdr __user *newopt, |
| 315 | int newoptlen); |
Huw Davies | e67ae21 | 2016-06-27 15:02:50 -0400 | [diff] [blame] | 316 | struct ipv6_txoptions * |
| 317 | ipv6_renew_options_kern(struct sock *sk, |
| 318 | struct ipv6_txoptions *opt, |
| 319 | int newtype, |
| 320 | struct ipv6_opt_hdr *newopt, |
| 321 | int newoptlen); |
YOSHIFUJI Hideaki | df9890c | 2005-11-20 12:23:18 +0900 | [diff] [blame] | 322 | struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, |
| 323 | struct ipv6_txoptions *opt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | |
Eric Dumazet | a224772 | 2014-09-27 09:50:56 -0700 | [diff] [blame] | 325 | bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, |
| 326 | const struct inet6_skb_parm *opt); |
Huw Davies | ceba183 | 2016-06-27 15:02:51 -0400 | [diff] [blame] | 327 | struct ipv6_txoptions *ipv6_update_options(struct sock *sk, |
| 328 | struct ipv6_txoptions *opt); |
Arnaldo Carvalho de Melo | 399c07d | 2005-12-13 23:24:28 -0800 | [diff] [blame] | 329 | |
Shmulik Ladkani | aeaf6e9 | 2012-11-30 10:25:59 +0000 | [diff] [blame] | 330 | static inline bool ipv6_accept_ra(struct inet6_dev *idev) |
| 331 | { |
| 332 | /* If forwarding is enabled, RA are not accepted unless the special |
| 333 | * hybrid mode (accept_ra=2) is enabled. |
| 334 | */ |
| 335 | return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 : |
| 336 | idev->cnf.accept_ra; |
| 337 | } |
| 338 | |
Amerigo Wang | d4915c0 | 2012-09-18 16:50:10 +0000 | [diff] [blame] | 339 | #if IS_ENABLED(CONFIG_IPV6) |
Amerigo Wang | d4915c0 | 2012-09-18 16:50:10 +0000 | [diff] [blame] | 340 | static inline int ip6_frag_mem(struct net *net) |
| 341 | { |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 342 | return sum_frag_mem_limit(&net->ipv6.frags); |
Amerigo Wang | d4915c0 | 2012-09-18 16:50:10 +0000 | [diff] [blame] | 343 | } |
| 344 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | |
Jesper Dangaard Brouer | c2a9366 | 2013-01-15 07:16:35 +0000 | [diff] [blame] | 346 | #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */ |
| 347 | #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */ |
Joe Perches | 9874c41 | 2010-02-16 18:40:04 +0000 | [diff] [blame] | 348 | #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 350 | int __ipv6_addr_type(const struct in6_addr *addr); |
YOSHIFUJI Hideaki | b1cacb6 | 2005-11-08 09:38:12 -0800 | [diff] [blame] | 351 | static inline int ipv6_addr_type(const struct in6_addr *addr) |
| 352 | { |
| 353 | return __ipv6_addr_type(addr) & 0xffff; |
| 354 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | |
| 356 | static inline int ipv6_addr_scope(const struct in6_addr *addr) |
| 357 | { |
YOSHIFUJI Hideaki | b1cacb6 | 2005-11-08 09:38:12 -0800 | [diff] [blame] | 358 | return __ipv6_addr_type(addr) & IPV6_ADDR_SCOPE_MASK; |
| 359 | } |
| 360 | |
| 361 | static inline int __ipv6_addr_src_scope(int type) |
| 362 | { |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 363 | return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16); |
YOSHIFUJI Hideaki | b1cacb6 | 2005-11-08 09:38:12 -0800 | [diff] [blame] | 364 | } |
| 365 | |
| 366 | static inline int ipv6_addr_src_scope(const struct in6_addr *addr) |
| 367 | { |
| 368 | return __ipv6_addr_src_scope(__ipv6_addr_type(addr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | } |
| 370 | |
Hannes Frederic Sowa | b7ef213 | 2013-03-08 02:07:16 +0000 | [diff] [blame] | 371 | static inline bool __ipv6_addr_needs_scope_id(int type) |
| 372 | { |
| 373 | return type & IPV6_ADDR_LINKLOCAL || |
| 374 | (type & IPV6_ADDR_MULTICAST && |
| 375 | (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL))); |
| 376 | } |
| 377 | |
| 378 | static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface) |
| 379 | { |
| 380 | return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0; |
| 381 | } |
| 382 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2) |
| 384 | { |
Eric Dumazet | db3459d | 2007-05-03 17:39:04 -0700 | [diff] [blame] | 385 | return memcmp(a1, a2, sizeof(struct in6_addr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | } |
| 387 | |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 388 | static inline bool |
Patrick McHardy | f2ffd9e | 2006-03-20 18:03:16 -0800 | [diff] [blame] | 389 | ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, |
| 390 | const struct in6_addr *a2) |
| 391 | { |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 392 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 393 | const unsigned long *ul1 = (const unsigned long *)a1; |
| 394 | const unsigned long *ulm = (const unsigned long *)m; |
| 395 | const unsigned long *ul2 = (const unsigned long *)a2; |
| 396 | |
| 397 | return !!(((ul1[0] ^ ul2[0]) & ulm[0]) | |
| 398 | ((ul1[1] ^ ul2[1]) & ulm[1])); |
| 399 | #else |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 400 | return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | |
| 401 | ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | |
| 402 | ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | |
| 403 | ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 404 | #endif |
Patrick McHardy | f2ffd9e | 2006-03-20 18:03:16 -0800 | [diff] [blame] | 405 | } |
| 406 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | static inline void ipv6_addr_prefix(struct in6_addr *pfx, |
| 408 | const struct in6_addr *addr, |
| 409 | int plen) |
| 410 | { |
| 411 | /* caller must guarantee 0 <= plen <= 128 */ |
| 412 | int o = plen >> 3, |
| 413 | b = plen & 0x7; |
| 414 | |
Eric Dumazet | db3459d | 2007-05-03 17:39:04 -0700 | [diff] [blame] | 415 | memset(pfx->s6_addr, 0, sizeof(pfx->s6_addr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | memcpy(pfx->s6_addr, addr, o); |
Eric Dumazet | db3459d | 2007-05-03 17:39:04 -0700 | [diff] [blame] | 417 | if (b != 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | } |
| 420 | |
Alexander Aring | 818f1f3 | 2015-12-09 22:46:31 +0100 | [diff] [blame] | 421 | static inline void ipv6_addr_prefix_copy(struct in6_addr *addr, |
| 422 | const struct in6_addr *pfx, |
| 423 | int plen) |
| 424 | { |
| 425 | /* caller must guarantee 0 <= plen <= 128 */ |
| 426 | int o = plen >> 3, |
| 427 | b = plen & 0x7; |
| 428 | |
| 429 | memcpy(addr->s6_addr, pfx, o); |
| 430 | if (b != 0) { |
| 431 | addr->s6_addr[o] &= ~(0xff00 >> b); |
| 432 | addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b)); |
| 433 | } |
| 434 | } |
| 435 | |
YOSHIFUJI Hideaki / 吉藤英明 | 5206c57 | 2013-01-14 07:10:24 +0000 | [diff] [blame] | 436 | static inline void __ipv6_addr_set_half(__be32 *addr, |
| 437 | __be32 wh, __be32 wl) |
| 438 | { |
| 439 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 440 | #if defined(__BIG_ENDIAN) |
| 441 | if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) { |
| 442 | *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl)); |
| 443 | return; |
| 444 | } |
| 445 | #elif defined(__LITTLE_ENDIAN) |
| 446 | if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) { |
| 447 | *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh)); |
| 448 | return; |
| 449 | } |
| 450 | #endif |
| 451 | #endif |
| 452 | addr[0] = wh; |
| 453 | addr[1] = wl; |
| 454 | } |
| 455 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | static inline void ipv6_addr_set(struct in6_addr *addr, |
Al Viro | 48818f8 | 2006-09-27 18:44:54 -0700 | [diff] [blame] | 457 | __be32 w1, __be32 w2, |
| 458 | __be32 w3, __be32 w4) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | { |
YOSHIFUJI Hideaki / 吉藤英明 | 5206c57 | 2013-01-14 07:10:24 +0000 | [diff] [blame] | 460 | __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2); |
| 461 | __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 464 | static inline bool ipv6_addr_equal(const struct in6_addr *a1, |
| 465 | const struct in6_addr *a2) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | { |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 467 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 468 | const unsigned long *ul1 = (const unsigned long *)a1; |
| 469 | const unsigned long *ul2 = (const unsigned long *)a2; |
| 470 | |
| 471 | return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL; |
| 472 | #else |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 473 | return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | |
| 474 | (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | |
| 475 | (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | |
| 476 | (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 477 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | } |
| 479 | |
YOSHIFUJI Hideaki / 吉藤英明 | 3867517 | 2013-01-14 07:10:38 +0000 | [diff] [blame] | 480 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 481 | static inline bool __ipv6_prefix_equal64_half(const __be64 *a1, |
| 482 | const __be64 *a2, |
| 483 | unsigned int len) |
| 484 | { |
Fabio Baltieri | 512613d | 2013-01-16 22:30:17 +0100 | [diff] [blame] | 485 | if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len)))) |
YOSHIFUJI Hideaki / 吉藤英明 | 3867517 | 2013-01-14 07:10:38 +0000 | [diff] [blame] | 486 | return false; |
| 487 | return true; |
| 488 | } |
| 489 | |
| 490 | static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, |
| 491 | const struct in6_addr *addr2, |
| 492 | unsigned int prefixlen) |
| 493 | { |
| 494 | const __be64 *a1 = (const __be64 *)addr1; |
| 495 | const __be64 *a2 = (const __be64 *)addr2; |
| 496 | |
| 497 | if (prefixlen >= 64) { |
| 498 | if (a1[0] ^ a2[0]) |
| 499 | return false; |
| 500 | return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64); |
| 501 | } |
| 502 | return __ipv6_prefix_equal64_half(a1, a2, prefixlen); |
| 503 | } |
| 504 | #else |
YOSHIFUJI Hideaki / 吉藤英明 | 2ef9733 | 2013-01-14 07:10:31 +0000 | [diff] [blame] | 505 | static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, |
| 506 | const struct in6_addr *addr2, |
| 507 | unsigned int prefixlen) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | { |
YOSHIFUJI Hideaki / 吉藤英明 | 2ef9733 | 2013-01-14 07:10:31 +0000 | [diff] [blame] | 509 | const __be32 *a1 = addr1->s6_addr32; |
| 510 | const __be32 *a2 = addr2->s6_addr32; |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 511 | unsigned int pdw, pbi; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | |
| 513 | /* check complete u32 in prefix */ |
| 514 | pdw = prefixlen >> 5; |
| 515 | if (pdw && memcmp(a1, a2, pdw << 2)) |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 516 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | |
| 518 | /* check incomplete u32 in prefix */ |
| 519 | pbi = prefixlen & 0x1f; |
| 520 | if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi)))) |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 521 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 523 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | } |
YOSHIFUJI Hideaki / 吉藤英明 | 3867517 | 2013-01-14 07:10:38 +0000 | [diff] [blame] | 525 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 527 | struct inet_frag_queue; |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 528 | |
Patrick McHardy | 0b5ccb2 | 2009-12-15 16:59:18 +0100 | [diff] [blame] | 529 | enum ip6_defrag_users { |
| 530 | IP6_DEFRAG_LOCAL_DELIVER, |
| 531 | IP6_DEFRAG_CONNTRACK_IN, |
Alexey Dobriyan | 4be929b | 2010-05-24 14:33:03 -0700 | [diff] [blame] | 532 | __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX, |
Patrick McHardy | 0b5ccb2 | 2009-12-15 16:59:18 +0100 | [diff] [blame] | 533 | IP6_DEFRAG_CONNTRACK_OUT, |
Alexey Dobriyan | 4be929b | 2010-05-24 14:33:03 -0700 | [diff] [blame] | 534 | __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX, |
Patrick McHardy | 8fa9ff6 | 2009-12-15 16:59:59 +0100 | [diff] [blame] | 535 | IP6_DEFRAG_CONNTRACK_BRIDGE_IN, |
Alexey Dobriyan | 4be929b | 2010-05-24 14:33:03 -0700 | [diff] [blame] | 536 | __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, |
Patrick McHardy | 0b5ccb2 | 2009-12-15 16:59:18 +0100 | [diff] [blame] | 537 | }; |
| 538 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 539 | struct ip6_create_arg { |
| 540 | __be32 id; |
Patrick McHardy | 0b5ccb2 | 2009-12-15 16:59:18 +0100 | [diff] [blame] | 541 | u32 user; |
Eric Dumazet | b71d1d4 | 2011-04-22 04:53:02 +0000 | [diff] [blame] | 542 | const struct in6_addr *src; |
| 543 | const struct in6_addr *dst; |
Michal Kubeček | 264640f | 2015-11-24 15:07:11 +0100 | [diff] [blame] | 544 | int iif; |
Hannes Frederic Sowa | eec2e61 | 2013-03-22 08:24:44 +0000 | [diff] [blame] | 545 | u8 ecn; |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 546 | }; |
| 547 | |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 548 | void ip6_frag_init(struct inet_frag_queue *q, const void *a); |
| 549 | bool ip6_frag_match(const struct inet_frag_queue *q, const void *a); |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 550 | |
Amerigo Wang | b836c99 | 2012-09-18 16:50:09 +0000 | [diff] [blame] | 551 | /* |
| 552 | * Equivalent of ipv4 struct ip |
| 553 | */ |
| 554 | struct frag_queue { |
| 555 | struct inet_frag_queue q; |
| 556 | |
| 557 | __be32 id; /* fragment id */ |
| 558 | u32 user; |
| 559 | struct in6_addr saddr; |
| 560 | struct in6_addr daddr; |
| 561 | |
| 562 | int iif; |
| 563 | unsigned int csum; |
| 564 | __u16 nhoffset; |
Hannes Frederic Sowa | eec2e61 | 2013-03-22 08:24:44 +0000 | [diff] [blame] | 565 | u8 ecn; |
Amerigo Wang | b836c99 | 2012-09-18 16:50:09 +0000 | [diff] [blame] | 566 | }; |
| 567 | |
| 568 | void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq, |
| 569 | struct inet_frags *frags); |
| 570 | |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 571 | static inline bool ipv6_addr_any(const struct in6_addr *a) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | { |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 573 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 574 | const unsigned long *ul = (const unsigned long *)a; |
| 575 | |
| 576 | return (ul[0] | ul[1]) == 0UL; |
| 577 | #else |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 578 | return (a->s6_addr32[0] | a->s6_addr32[1] | |
| 579 | a->s6_addr32[2] | a->s6_addr32[3]) == 0; |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 580 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | } |
| 582 | |
Eric Dumazet | ddbe503 | 2012-07-18 08:11:12 +0000 | [diff] [blame] | 583 | static inline u32 ipv6_addr_hash(const struct in6_addr *a) |
| 584 | { |
| 585 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 586 | const unsigned long *ul = (const unsigned long *)a; |
| 587 | unsigned long x = ul[0] ^ ul[1]; |
| 588 | |
| 589 | return (u32)(x ^ (x >> 32)); |
| 590 | #else |
| 591 | return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^ |
| 592 | a->s6_addr32[2] ^ a->s6_addr32[3]); |
| 593 | #endif |
| 594 | } |
| 595 | |
Eric Dumazet | 08dcdbf | 2013-02-21 12:18:52 +0000 | [diff] [blame] | 596 | /* more secured version of ipv6_addr_hash() */ |
Hannes Frederic Sowa | b50026b | 2013-10-19 21:48:52 +0200 | [diff] [blame] | 597 | static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval) |
Eric Dumazet | 08dcdbf | 2013-02-21 12:18:52 +0000 | [diff] [blame] | 598 | { |
| 599 | u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1]; |
| 600 | |
| 601 | return jhash_3words(v, |
| 602 | (__force u32)a->s6_addr32[2], |
| 603 | (__force u32)a->s6_addr32[3], |
Hannes Frederic Sowa | b50026b | 2013-10-19 21:48:52 +0200 | [diff] [blame] | 604 | initval); |
Eric Dumazet | 08dcdbf | 2013-02-21 12:18:52 +0000 | [diff] [blame] | 605 | } |
| 606 | |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 607 | static inline bool ipv6_addr_loopback(const struct in6_addr *a) |
YOSHIFUJI Hideaki | f630e43 | 2008-06-19 16:33:57 -0700 | [diff] [blame] | 608 | { |
YOSHIFUJI Hideaki / 吉藤英明 | e287656 | 2013-01-14 07:10:06 +0000 | [diff] [blame] | 609 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 610 | const __be64 *be = (const __be64 *)a; |
YOSHIFUJI Hideaki / 吉藤英明 | e287656 | 2013-01-14 07:10:06 +0000 | [diff] [blame] | 611 | |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 612 | return (be[0] | (be[1] ^ cpu_to_be64(1))) == 0UL; |
YOSHIFUJI Hideaki / 吉藤英明 | e287656 | 2013-01-14 07:10:06 +0000 | [diff] [blame] | 613 | #else |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 614 | return (a->s6_addr32[0] | a->s6_addr32[1] | |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 615 | a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0; |
YOSHIFUJI Hideaki / 吉藤英明 | e287656 | 2013-01-14 07:10:06 +0000 | [diff] [blame] | 616 | #endif |
YOSHIFUJI Hideaki | f630e43 | 2008-06-19 16:33:57 -0700 | [diff] [blame] | 617 | } |
| 618 | |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 619 | /* |
| 620 | * Note that we must __force cast these to unsigned long to make sparse happy, |
| 621 | * since all of the endian-annotated types are fixed size regardless of arch. |
| 622 | */ |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 623 | static inline bool ipv6_addr_v4mapped(const struct in6_addr *a) |
Brian Haley | e773e4f | 2007-08-24 23:16:08 -0700 | [diff] [blame] | 624 | { |
YOSHIFUJI Hideaki / 吉藤英明 | a04d40b | 2013-01-14 07:10:14 +0000 | [diff] [blame] | 625 | return ( |
| 626 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 627 | *(unsigned long *)a | |
YOSHIFUJI Hideaki / 吉藤英明 | a04d40b | 2013-01-14 07:10:14 +0000 | [diff] [blame] | 628 | #else |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 629 | (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) | |
YOSHIFUJI Hideaki / 吉藤英明 | a04d40b | 2013-01-14 07:10:14 +0000 | [diff] [blame] | 630 | #endif |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 631 | (__force unsigned long)(a->s6_addr32[2] ^ |
| 632 | cpu_to_be32(0x0000ffff))) == 0UL; |
Brian Haley | e773e4f | 2007-08-24 23:16:08 -0700 | [diff] [blame] | 633 | } |
| 634 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | /* |
Juha-Matti Tapio | 99cd07a | 2008-02-28 20:55:46 -0800 | [diff] [blame] | 636 | * Check for a RFC 4843 ORCHID address |
| 637 | * (Overlay Routable Cryptographic Hash Identifiers) |
| 638 | */ |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 639 | static inline bool ipv6_addr_orchid(const struct in6_addr *a) |
Juha-Matti Tapio | 99cd07a | 2008-02-28 20:55:46 -0800 | [diff] [blame] | 640 | { |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 641 | return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010); |
Juha-Matti Tapio | 99cd07a | 2008-02-28 20:55:46 -0800 | [diff] [blame] | 642 | } |
| 643 | |
Lorenzo Colitti | 5c98631 | 2014-04-29 11:57:34 +0900 | [diff] [blame] | 644 | static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr) |
| 645 | { |
| 646 | return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000); |
| 647 | } |
| 648 | |
Aurélien Charbon | f15364b | 2008-01-18 15:50:56 +0100 | [diff] [blame] | 649 | static inline void ipv6_addr_set_v4mapped(const __be32 addr, |
| 650 | struct in6_addr *v4mapped) |
| 651 | { |
| 652 | ipv6_addr_set(v4mapped, |
| 653 | 0, 0, |
| 654 | htonl(0x0000FFFF), |
| 655 | addr); |
| 656 | } |
| 657 | |
Juha-Matti Tapio | 99cd07a | 2008-02-28 20:55:46 -0800 | [diff] [blame] | 658 | /* |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 659 | * find the first different bit between two addresses |
| 660 | * length of address must be a multiple of 32bits |
| 661 | */ |
YOSHIFUJI Hideaki / 吉藤英明 | 9f2e733 | 2013-01-14 07:09:54 +0000 | [diff] [blame] | 662 | static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen) |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 663 | { |
Al Viro | ef296f5 | 2006-11-14 20:56:33 -0800 | [diff] [blame] | 664 | const __be32 *a1 = token1, *a2 = token2; |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 665 | int i; |
| 666 | |
| 667 | addrlen >>= 2; |
| 668 | |
| 669 | for (i = 0; i < addrlen; i++) { |
Al Viro | ef296f5 | 2006-11-14 20:56:33 -0800 | [diff] [blame] | 670 | __be32 xb = a1[i] ^ a2[i]; |
| 671 | if (xb) |
YOSHIFUJI Hideaki / 吉藤英明 | d57b8fb | 2010-03-29 06:00:05 +0000 | [diff] [blame] | 672 | return i * 32 + 31 - __fls(ntohl(xb)); |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 673 | } |
| 674 | |
| 675 | /* |
| 676 | * we should *never* get to this point since that |
| 677 | * would mean the addrs are equal |
| 678 | * |
| 679 | * However, we do get to it 8) And exacly, when |
| 680 | * addresses are equal 8) |
| 681 | * |
| 682 | * ip route add 1111::/128 via ... |
| 683 | * ip route add 1111::/64 via ... |
| 684 | * and we are here. |
| 685 | * |
| 686 | * Ideally, this function should stop comparison |
| 687 | * at prefix length. It does not, but it is still OK, |
| 688 | * if returned value is greater than prefix length. |
| 689 | * --ANK (980803) |
| 690 | */ |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 691 | return addrlen << 5; |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 692 | } |
| 693 | |
YOSHIFUJI Hideaki / 吉藤英明 | 9f2e733 | 2013-01-14 07:09:54 +0000 | [diff] [blame] | 694 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 695 | static inline int __ipv6_addr_diff64(const void *token1, const void *token2, int addrlen) |
| 696 | { |
| 697 | const __be64 *a1 = token1, *a2 = token2; |
| 698 | int i; |
| 699 | |
| 700 | addrlen >>= 3; |
| 701 | |
| 702 | for (i = 0; i < addrlen; i++) { |
| 703 | __be64 xb = a1[i] ^ a2[i]; |
| 704 | if (xb) |
| 705 | return i * 64 + 63 - __fls(be64_to_cpu(xb)); |
| 706 | } |
| 707 | |
| 708 | return addrlen << 6; |
| 709 | } |
| 710 | #endif |
| 711 | |
| 712 | static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen) |
| 713 | { |
| 714 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 715 | if (__builtin_constant_p(addrlen) && !(addrlen & 7)) |
| 716 | return __ipv6_addr_diff64(token1, token2, addrlen); |
| 717 | #endif |
| 718 | return __ipv6_addr_diff32(token1, token2, addrlen); |
| 719 | } |
| 720 | |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 721 | static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2) |
| 722 | { |
| 723 | return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); |
| 724 | } |
| 725 | |
Eric Dumazet | 7f15986 | 2015-05-25 16:02:21 -0700 | [diff] [blame] | 726 | __be32 ipv6_select_ident(struct net *net, |
| 727 | const struct in6_addr *daddr, |
| 728 | const struct in6_addr *saddr); |
Hannes Frederic Sowa | 5a352dd | 2015-03-25 17:07:45 +0100 | [diff] [blame] | 729 | void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); |
Ben Hutchings | 5188cd4 | 2014-10-30 18:27:17 +0000 | [diff] [blame] | 730 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 731 | int ip6_dst_hoplimit(struct dst_entry *dst); |
Cong Wang | 3ce9b35 | 2013-08-31 13:44:28 +0800 | [diff] [blame] | 732 | |
Lorenzo Colitti | 5c98631 | 2014-04-29 11:57:34 +0900 | [diff] [blame] | 733 | static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, |
| 734 | struct dst_entry *dst) |
| 735 | { |
| 736 | int hlimit; |
| 737 | |
| 738 | if (ipv6_addr_is_multicast(&fl6->daddr)) |
| 739 | hlimit = np->mcast_hops; |
| 740 | else |
| 741 | hlimit = np->hop_limit; |
| 742 | if (hlimit < 0) |
| 743 | hlimit = ip6_dst_hoplimit(dst); |
| 744 | return hlimit; |
| 745 | } |
| 746 | |
Tom Herbert | c3f8324 | 2015-06-04 09:16:40 -0700 | [diff] [blame] | 747 | /* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store |
| 748 | * Equivalent to : flow->v6addrs.src = iph->saddr; |
| 749 | * flow->v6addrs.dst = iph->daddr; |
| 750 | */ |
| 751 | static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow, |
| 752 | const struct ipv6hdr *iph) |
| 753 | { |
| 754 | BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) != |
| 755 | offsetof(typeof(flow->addrs), v6addrs.src) + |
| 756 | sizeof(flow->addrs.v6addrs.src)); |
| 757 | memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs)); |
| 758 | flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; |
| 759 | } |
| 760 | |
Florian Fainelli | a37934f | 2014-07-08 11:15:03 -0700 | [diff] [blame] | 761 | #if IS_ENABLED(CONFIG_IPV6) |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 762 | |
| 763 | /* Sysctl settings for net ipv6.auto_flowlabels */ |
| 764 | #define IP6_AUTO_FLOW_LABEL_OFF 0 |
| 765 | #define IP6_AUTO_FLOW_LABEL_OPTOUT 1 |
| 766 | #define IP6_AUTO_FLOW_LABEL_OPTIN 2 |
| 767 | #define IP6_AUTO_FLOW_LABEL_FORCED 3 |
| 768 | |
| 769 | #define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED |
| 770 | |
Tom Herbert | b567741 | 2015-07-31 16:52:14 -0700 | [diff] [blame] | 771 | #define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 772 | |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 773 | static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, |
Tom Herbert | 67800f9 | 2015-07-31 16:52:11 -0700 | [diff] [blame] | 774 | __be32 flowlabel, bool autolabel, |
| 775 | struct flowi6 *fl6) |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 776 | { |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 777 | u32 hash; |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 778 | |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 779 | if (flowlabel || |
| 780 | net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || |
| 781 | (!autolabel && |
| 782 | net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED)) |
| 783 | return flowlabel; |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 784 | |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 785 | hash = skb_get_hash_flowi6(skb, fl6); |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 786 | |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 787 | /* Since this is being sent on the wire obfuscate hash a bit |
| 788 | * to minimize possbility that any useful information to an |
| 789 | * attacker is leaked. Only lower 20 bits are relevant. |
| 790 | */ |
| 791 | rol32(hash, 16); |
Tom Herbert | 82a584b | 2015-04-29 15:33:21 -0700 | [diff] [blame] | 792 | |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 793 | flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; |
| 794 | |
| 795 | if (net->ipv6.sysctl.flowlabel_state_ranges) |
| 796 | flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG; |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 797 | |
| 798 | return flowlabel; |
| 799 | } |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 800 | |
| 801 | static inline int ip6_default_np_autolabel(struct net *net) |
| 802 | { |
| 803 | switch (net->ipv6.sysctl.auto_flowlabels) { |
| 804 | case IP6_AUTO_FLOW_LABEL_OFF: |
| 805 | case IP6_AUTO_FLOW_LABEL_OPTIN: |
| 806 | default: |
| 807 | return 0; |
| 808 | case IP6_AUTO_FLOW_LABEL_OPTOUT: |
| 809 | case IP6_AUTO_FLOW_LABEL_FORCED: |
| 810 | return 1; |
| 811 | } |
| 812 | } |
Florian Fainelli | a37934f | 2014-07-08 11:15:03 -0700 | [diff] [blame] | 813 | #else |
| 814 | static inline void ip6_set_txhash(struct sock *sk) { } |
| 815 | static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 816 | __be32 flowlabel, bool autolabel, |
| 817 | struct flowi6 *fl6) |
Florian Fainelli | a37934f | 2014-07-08 11:15:03 -0700 | [diff] [blame] | 818 | { |
| 819 | return flowlabel; |
| 820 | } |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 821 | static inline int ip6_default_np_autolabel(struct net *net) |
| 822 | { |
| 823 | return 0; |
| 824 | } |
Florian Fainelli | a37934f | 2014-07-08 11:15:03 -0700 | [diff] [blame] | 825 | #endif |
| 826 | |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 827 | |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 828 | /* |
YOSHIFUJI Hideaki / 吉藤英明 | 3e4e4c1 | 2013-01-13 05:01:39 +0000 | [diff] [blame] | 829 | * Header manipulation |
| 830 | */ |
| 831 | static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass, |
| 832 | __be32 flowlabel) |
| 833 | { |
YOSHIFUJI Hideaki | 07f623d | 2013-01-17 12:10:57 +0900 | [diff] [blame] | 834 | *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | flowlabel; |
YOSHIFUJI Hideaki / 吉藤英明 | 3e4e4c1 | 2013-01-13 05:01:39 +0000 | [diff] [blame] | 835 | } |
| 836 | |
YOSHIFUJI Hideaki / 吉藤英明 | 6502ca5 | 2013-01-13 05:01:51 +0000 | [diff] [blame] | 837 | static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr) |
| 838 | { |
| 839 | return *(__be32 *)hdr & IPV6_FLOWINFO_MASK; |
| 840 | } |
| 841 | |
Florent Fourcot | 3308de2 | 2013-12-08 15:47:00 +0100 | [diff] [blame] | 842 | static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr) |
| 843 | { |
| 844 | return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK; |
| 845 | } |
| 846 | |
Li RongQing | d76ed22 | 2014-01-15 17:03:30 +0800 | [diff] [blame] | 847 | static inline u8 ip6_tclass(__be32 flowinfo) |
| 848 | { |
| 849 | return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT; |
| 850 | } |
Daniel Borkmann | eaa93bf | 2016-03-18 18:37:57 +0100 | [diff] [blame] | 851 | |
| 852 | static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel) |
| 853 | { |
| 854 | return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel; |
| 855 | } |
| 856 | |
YOSHIFUJI Hideaki / 吉藤英明 | 3e4e4c1 | 2013-01-13 05:01:39 +0000 | [diff] [blame] | 857 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 | * Prototypes exported by ipv6 |
| 859 | */ |
| 860 | |
| 861 | /* |
| 862 | * rcv function (called from netdevice level) |
| 863 | */ |
| 864 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 865 | int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, |
| 866 | struct packet_type *pt, struct net_device *orig_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 868 | int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); |
Patrick McHardy | b05e106 | 2006-01-06 23:03:34 -0800 | [diff] [blame] | 869 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | /* |
| 871 | * upper-layer output functions |
| 872 | */ |
Eric Dumazet | 1c1e9d2 | 2015-09-25 07:39:20 -0700 | [diff] [blame] | 873 | int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 874 | struct ipv6_txoptions *opt, int tclass); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 876 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 878 | int ip6_append_data(struct sock *sk, |
| 879 | int getfrag(void *from, char *to, int offset, int len, |
| 880 | int odd, struct sk_buff *skb), |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 881 | void *from, int length, int transhdrlen, |
| 882 | struct ipcm6_cookie *ipc6, struct flowi6 *fl6, |
| 883 | struct rt6_info *rt, unsigned int flags, |
Soheil Hassas Yeganeh | c14ac94 | 2016-04-02 23:08:12 -0400 | [diff] [blame] | 884 | const struct sockcm_cookie *sockc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 886 | int ip6_push_pending_frames(struct sock *sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 888 | void ip6_flush_pending_frames(struct sock *sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | |
Vlad Yasevich | 6422398 | 2015-01-31 10:40:15 -0500 | [diff] [blame] | 890 | int ip6_send_skb(struct sk_buff *skb); |
| 891 | |
| 892 | struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue, |
| 893 | struct inet_cork_full *cork, |
| 894 | struct inet6_cork *v6_cork); |
| 895 | struct sk_buff *ip6_make_skb(struct sock *sk, |
| 896 | int getfrag(void *from, char *to, int offset, |
| 897 | int len, int odd, struct sk_buff *skb), |
| 898 | void *from, int length, int transhdrlen, |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 899 | struct ipcm6_cookie *ipc6, struct flowi6 *fl6, |
| 900 | struct rt6_info *rt, unsigned int flags, |
Soheil Hassas Yeganeh | c14ac94 | 2016-04-02 23:08:12 -0400 | [diff] [blame] | 901 | const struct sockcm_cookie *sockc); |
Vlad Yasevich | 6422398 | 2015-01-31 10:40:15 -0500 | [diff] [blame] | 902 | |
| 903 | static inline struct sk_buff *ip6_finish_skb(struct sock *sk) |
| 904 | { |
| 905 | return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork, |
| 906 | &inet6_sk(sk)->cork); |
| 907 | } |
| 908 | |
Roopa Prabhu | 343d60a | 2015-07-30 13:34:53 -0700 | [diff] [blame] | 909 | int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, |
| 910 | struct flowi6 *fl6); |
Eric Dumazet | 3aef934 | 2015-09-25 07:39:12 -0700 | [diff] [blame] | 911 | struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, |
Steffen Klassert | 0e0d44a | 2013-08-28 08:04:14 +0200 | [diff] [blame] | 912 | const struct in6_addr *final_dst); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 913 | struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, |
Steffen Klassert | 0e0d44a | 2013-08-28 08:04:14 +0200 | [diff] [blame] | 914 | const struct in6_addr *final_dst); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 915 | struct dst_entry *ip6_blackhole_route(struct net *net, |
| 916 | struct dst_entry *orig_dst); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 917 | |
| 918 | /* |
| 919 | * skb processing functions |
| 920 | */ |
| 921 | |
Eric W. Biederman | ede2059 | 2015-10-07 16:48:47 -0500 | [diff] [blame] | 922 | int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 923 | int ip6_forward(struct sk_buff *skb); |
| 924 | int ip6_input(struct sk_buff *skb); |
| 925 | int ip6_mc_input(struct sk_buff *skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | |
Eric W. Biederman | cf91a99 | 2015-10-07 16:48:45 -0500 | [diff] [blame] | 927 | int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
Eric W. Biederman | 33224b1 | 2015-10-07 16:48:46 -0500 | [diff] [blame] | 928 | int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
Herbert Xu | ef76bc2 | 2008-01-11 19:15:08 -0800 | [diff] [blame] | 929 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | /* |
| 931 | * Extension header (options) processing |
| 932 | */ |
| 933 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 934 | void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, |
| 935 | u8 *proto, struct in6_addr **daddr_p); |
| 936 | void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, |
| 937 | u8 *proto); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 939 | int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp, |
| 940 | __be16 *frag_offp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 941 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 942 | bool ipv6_ext_hdr(u8 nexthdr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | |
Jesse Gross | f8f6267 | 2012-11-09 17:05:07 -0800 | [diff] [blame] | 944 | enum { |
Ansis Atteka | 9195bb8 | 2012-11-09 17:11:31 -0800 | [diff] [blame] | 945 | IP6_FH_F_FRAG = (1 << 0), |
| 946 | IP6_FH_F_AUTH = (1 << 1), |
| 947 | IP6_FH_F_SKIP_RH = (1 << 2), |
Jesse Gross | f8f6267 | 2012-11-09 17:05:07 -0800 | [diff] [blame] | 948 | }; |
| 949 | |
| 950 | /* find specified header and get offset to it */ |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 951 | int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target, |
| 952 | unsigned short *fragoff, int *fragflg); |
Jesse Gross | f8f6267 | 2012-11-09 17:05:07 -0800 | [diff] [blame] | 953 | |
Huw Davies | 0868383 | 2016-06-27 15:06:15 -0400 | [diff] [blame] | 954 | int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type); |
Masahide NAKAMURA | c61a404 | 2006-08-23 19:18:35 -0700 | [diff] [blame] | 955 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 956 | struct in6_addr *fl6_update_dst(struct flowi6 *fl6, |
| 957 | const struct ipv6_txoptions *opt, |
| 958 | struct in6_addr *orig); |
Arnaud Ebalard | 20c59de | 2010-06-01 21:35:01 +0000 | [diff] [blame] | 959 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | /* |
| 961 | * socket options (ipv6_sockglue.c) |
| 962 | */ |
| 963 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 964 | int ipv6_setsockopt(struct sock *sk, int level, int optname, |
| 965 | char __user *optval, unsigned int optlen); |
| 966 | int ipv6_getsockopt(struct sock *sk, int level, int optname, |
| 967 | char __user *optval, int __user *optlen); |
| 968 | int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, |
| 969 | char __user *optval, unsigned int optlen); |
| 970 | int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, |
| 971 | char __user *optval, int __user *optlen); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 972 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 973 | int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); |
Hannes Frederic Sowa | 82b276c | 2014-01-20 05:16:39 +0100 | [diff] [blame] | 974 | int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, |
| 975 | int addr_len); |
Martin KaFai Lau | 33c162a | 2016-04-11 15:29:36 -0700 | [diff] [blame] | 976 | int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr); |
Martin KaFai Lau | e646b65 | 2016-04-11 15:29:37 -0700 | [diff] [blame] | 977 | void ip6_datagram_release_cb(struct sock *sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 | |
Hannes Frederic Sowa | 85fbaa7 | 2013-11-23 00:46:12 +0100 | [diff] [blame] | 979 | int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, |
| 980 | int *addr_len); |
| 981 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, |
| 982 | int *addr_len); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 983 | void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, |
| 984 | u32 info, u8 *payload); |
| 985 | void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); |
| 986 | void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 988 | int inet6_release(struct socket *sock); |
| 989 | int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); |
| 990 | int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, |
| 991 | int peer); |
| 992 | int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 993 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 994 | int inet6_hash_connect(struct inet_timewait_death_row *death_row, |
Arnaldo Carvalho de Melo | d8313f5 | 2005-12-13 23:25:44 -0800 | [diff] [blame] | 995 | struct sock *sk); |
| 996 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 997 | /* |
| 998 | * reassembly.c |
| 999 | */ |
Eric Dumazet | 90ddc4f | 2005-12-22 12:49:22 -0800 | [diff] [blame] | 1000 | extern const struct proto_ops inet6_stream_ops; |
| 1001 | extern const struct proto_ops inet6_dgram_ops; |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1002 | |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 1003 | struct group_source_req; |
| 1004 | struct group_filter; |
| 1005 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1006 | int ip6_mc_source(int add, int omode, struct sock *sk, |
| 1007 | struct group_source_req *pgsr); |
| 1008 | int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf); |
| 1009 | int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, |
| 1010 | struct group_filter __user *optval, int __user *optlen); |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1011 | |
| 1012 | #ifdef CONFIG_PROC_FS |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1013 | int ac6_proc_init(struct net *net); |
| 1014 | void ac6_proc_exit(struct net *net); |
| 1015 | int raw6_proc_init(void); |
| 1016 | void raw6_proc_exit(void); |
| 1017 | int tcp6_proc_init(struct net *net); |
| 1018 | void tcp6_proc_exit(struct net *net); |
| 1019 | int udp6_proc_init(struct net *net); |
| 1020 | void udp6_proc_exit(struct net *net); |
| 1021 | int udplite6_proc_init(void); |
| 1022 | void udplite6_proc_exit(void); |
| 1023 | int ipv6_misc_proc_init(void); |
| 1024 | void ipv6_misc_proc_exit(void); |
| 1025 | int snmp6_register_dev(struct inet6_dev *idev); |
| 1026 | int snmp6_unregister_dev(struct inet6_dev *idev); |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1027 | |
Herbert Xu | 7f7d9a6 | 2007-04-24 21:54:09 -0700 | [diff] [blame] | 1028 | #else |
Daniel Lezcano | 6ab57e7 | 2008-03-26 16:52:32 -0700 | [diff] [blame] | 1029 | static inline int ac6_proc_init(struct net *net) { return 0; } |
| 1030 | static inline void ac6_proc_exit(struct net *net) { } |
| 1031 | static inline int snmp6_register_dev(struct inet6_dev *idev) { return 0; } |
| 1032 | static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; } |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1033 | #endif |
| 1034 | |
| 1035 | #ifdef CONFIG_SYSCTL |
Joe Perches | 9e8cda3 | 2013-06-13 19:37:53 -0700 | [diff] [blame] | 1036 | extern struct ctl_table ipv6_route_table_template[]; |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1037 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1038 | struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); |
| 1039 | struct ctl_table *ipv6_route_sysctl_init(struct net *net); |
| 1040 | int ipv6_sysctl_register(void); |
| 1041 | void ipv6_sysctl_unregister(void); |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1042 | #endif |
| 1043 | |
Madhu Challa | 46a4dee | 2015-02-25 09:58:34 -0800 | [diff] [blame] | 1044 | int ipv6_sock_mc_join(struct sock *sk, int ifindex, |
| 1045 | const struct in6_addr *addr); |
Madhu Challa | 46a4dee | 2015-02-25 09:58:34 -0800 | [diff] [blame] | 1046 | int ipv6_sock_mc_drop(struct sock *sk, int ifindex, |
| 1047 | const struct in6_addr *addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1048 | #endif /* _NET_IPV6_H */ |