Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _NET_IP6_TUNNEL_H |
| 2 | #define _NET_IP6_TUNNEL_H |
| 3 | |
| 4 | #include <linux/ipv6.h> |
| 5 | #include <linux/netdevice.h> |
Pravin B Shelar | c544193 | 2013-03-25 14:49:35 +0000 | [diff] [blame] | 6 | #include <linux/if_tunnel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/ip6_tunnel.h> |
Pravin B Shelar | 039f506 | 2015-12-24 14:34:54 -0800 | [diff] [blame] | 8 | #include <net/ip_tunnels.h> |
Paolo Abeni | 607f725 | 2016-02-12 15:43:54 +0100 | [diff] [blame] | 9 | #include <net/dst_cache.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | |
xeb@mail.ru | c12b395 | 2012-08-10 00:51:50 +0000 | [diff] [blame] | 11 | #define IP6TUNNEL_ERR_TIMEO (30*HZ) |
| 12 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | /* capable of sending packets */ |
| 14 | #define IP6_TNL_F_CAP_XMIT 0x10000 |
| 15 | /* capable of receiving packets */ |
| 16 | #define IP6_TNL_F_CAP_RCV 0x20000 |
Ville Nuorvala | d0087b2 | 2012-06-28 18:15:52 +0000 | [diff] [blame] | 17 | /* determine capability on a per-packet basis */ |
| 18 | #define IP6_TNL_F_CAP_PER_PACKET 0x40000 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
xeb@mail.ru | c12b395 | 2012-08-10 00:51:50 +0000 | [diff] [blame] | 20 | struct __ip6_tnl_parm { |
| 21 | char name[IFNAMSIZ]; /* name of tunnel device */ |
| 22 | int link; /* ifindex of underlying L2 interface */ |
| 23 | __u8 proto; /* tunnel protocol */ |
| 24 | __u8 encap_limit; /* encapsulation limit for tunnel */ |
| 25 | __u8 hop_limit; /* hop limit for tunnel */ |
| 26 | __be32 flowinfo; /* traffic class and flowlabel for tunnel */ |
| 27 | __u32 flags; /* tunnel flags */ |
| 28 | struct in6_addr laddr; /* local tunnel end-point address */ |
| 29 | struct in6_addr raddr; /* remote tunnel end-point address */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
xeb@mail.ru | c12b395 | 2012-08-10 00:51:50 +0000 | [diff] [blame] | 31 | __be16 i_flags; |
| 32 | __be16 o_flags; |
| 33 | __be32 i_key; |
| 34 | __be32 o_key; |
| 35 | }; |
| 36 | |
| 37 | /* IPv6 tunnel */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | struct ip6_tnl { |
Eric Dumazet | 6f0bcf1 | 2010-10-24 21:33:16 +0000 | [diff] [blame] | 39 | struct ip6_tnl __rcu *next; /* next tunnel in list */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | struct net_device *dev; /* virtual device associated with tunnel */ |
Nicolas Dichtel | 0bd87628 | 2013-08-13 17:51:12 +0200 | [diff] [blame] | 41 | struct net *net; /* netns for packet i/o */ |
xeb@mail.ru | c12b395 | 2012-08-10 00:51:50 +0000 | [diff] [blame] | 42 | struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | struct flowi fl; /* flowi template for xmit */ |
Paolo Abeni | 607f725 | 2016-02-12 15:43:54 +0100 | [diff] [blame] | 44 | struct dst_cache dst_cache; /* cached dst */ |
Tom Herbert | 0d3c703 | 2016-04-29 17:12:15 -0700 | [diff] [blame] | 45 | struct gro_cells gro_cells; |
xeb@mail.ru | c12b395 | 2012-08-10 00:51:50 +0000 | [diff] [blame] | 46 | |
| 47 | int err_count; |
| 48 | unsigned long err_time; |
| 49 | |
| 50 | /* These fields used only by GRE */ |
| 51 | __u32 i_seqno; /* The last seen seqno */ |
| 52 | __u32 o_seqno; /* The last output seqno */ |
Tom Herbert | 79ecb90 | 2016-04-29 17:12:20 -0700 | [diff] [blame] | 53 | int hlen; /* tun_hlen + encap_hlen */ |
| 54 | int tun_hlen; /* Precalculated header length */ |
Tom Herbert | 058214a | 2016-05-18 09:06:17 -0700 | [diff] [blame] | 55 | int encap_hlen; /* Encap header length (FOU,GUE) */ |
| 56 | struct ip_tunnel_encap encap; |
xeb@mail.ru | c12b395 | 2012-08-10 00:51:50 +0000 | [diff] [blame] | 57 | int mlink; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | }; |
| 59 | |
Tom Herbert | 058214a | 2016-05-18 09:06:17 -0700 | [diff] [blame] | 60 | struct ip6_tnl_encap_ops { |
| 61 | size_t (*encap_hlen)(struct ip_tunnel_encap *e); |
| 62 | int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, |
| 63 | u8 *protocol, struct flowi6 *fl6); |
| 64 | }; |
| 65 | |
Arnd Bergmann | 9791d8e | 2016-05-25 16:50:45 +0200 | [diff] [blame] | 66 | #ifdef CONFIG_INET |
| 67 | |
Tom Herbert | 058214a | 2016-05-18 09:06:17 -0700 | [diff] [blame] | 68 | extern const struct ip6_tnl_encap_ops __rcu * |
| 69 | ip6tun_encaps[MAX_IPTUN_ENCAP_OPS]; |
| 70 | |
| 71 | int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops, |
| 72 | unsigned int num); |
| 73 | int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops, |
| 74 | unsigned int num); |
| 75 | int ip6_tnl_encap_setup(struct ip6_tnl *t, |
| 76 | struct ip_tunnel_encap *ipencap); |
| 77 | |
| 78 | static inline int ip6_encap_hlen(struct ip_tunnel_encap *e) |
| 79 | { |
| 80 | const struct ip6_tnl_encap_ops *ops; |
| 81 | int hlen = -EINVAL; |
| 82 | |
| 83 | if (e->type == TUNNEL_ENCAP_NONE) |
| 84 | return 0; |
| 85 | |
| 86 | if (e->type >= MAX_IPTUN_ENCAP_OPS) |
| 87 | return -EINVAL; |
| 88 | |
| 89 | rcu_read_lock(); |
| 90 | ops = rcu_dereference(ip6tun_encaps[e->type]); |
| 91 | if (likely(ops && ops->encap_hlen)) |
| 92 | hlen = ops->encap_hlen(e); |
| 93 | rcu_read_unlock(); |
| 94 | |
| 95 | return hlen; |
| 96 | } |
| 97 | |
| 98 | static inline int ip6_tnl_encap(struct sk_buff *skb, struct ip6_tnl *t, |
| 99 | u8 *protocol, struct flowi6 *fl6) |
| 100 | { |
| 101 | const struct ip6_tnl_encap_ops *ops; |
| 102 | int ret = -EINVAL; |
| 103 | |
| 104 | if (t->encap.type == TUNNEL_ENCAP_NONE) |
| 105 | return 0; |
| 106 | |
| 107 | if (t->encap.type >= MAX_IPTUN_ENCAP_OPS) |
| 108 | return -EINVAL; |
| 109 | |
| 110 | rcu_read_lock(); |
| 111 | ops = rcu_dereference(ip6tun_encaps[t->encap.type]); |
| 112 | if (likely(ops && ops->build_header)) |
| 113 | ret = ops->build_header(skb, &t->encap, protocol, fl6); |
| 114 | rcu_read_unlock(); |
| 115 | |
| 116 | return ret; |
| 117 | } |
| 118 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | /* Tunnel encapsulation limit destination sub-option */ |
| 120 | |
| 121 | struct ipv6_tlv_tnl_enc_lim { |
| 122 | __u8 type; /* type-code for option */ |
| 123 | __u8 length; /* option length */ |
| 124 | __u8 encap_limit; /* tunnel encapsulation limit */ |
Eric Dumazet | bc10502 | 2010-06-03 03:21:52 -0700 | [diff] [blame] | 125 | } __packed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | |
xeb@mail.ru | c12b395 | 2012-08-10 00:51:50 +0000 | [diff] [blame] | 127 | int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, |
| 128 | const struct in6_addr *raddr); |
Tom Herbert | 0d3c703 | 2016-04-29 17:12:15 -0700 | [diff] [blame] | 129 | int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, |
| 130 | const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, |
| 131 | bool log_ecn_error); |
Steffen Klassert | d500514 | 2014-11-05 08:02:48 +0100 | [diff] [blame] | 132 | int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, |
| 133 | const struct in6_addr *raddr); |
Tom Herbert | 8eb30be | 2016-04-29 17:12:18 -0700 | [diff] [blame] | 134 | int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, |
| 135 | struct flowi6 *fl6, int encap_limit, __u32 *pmtu, __u8 proto); |
xeb@mail.ru | c12b395 | 2012-08-10 00:51:50 +0000 | [diff] [blame] | 136 | __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); |
| 137 | __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, |
| 138 | const struct in6_addr *raddr); |
Nicolas Dichtel | 1728d4f | 2015-01-15 15:11:17 +0100 | [diff] [blame] | 139 | struct net *ip6_tnl_get_link_net(const struct net_device *dev); |
Nicolas Dichtel | ecf2c06 | 2015-04-02 17:07:01 +0200 | [diff] [blame] | 140 | int ip6_tnl_get_iflink(const struct net_device *dev); |
Tom Herbert | 79ecb90 | 2016-04-29 17:12:20 -0700 | [diff] [blame] | 141 | int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); |
xeb@mail.ru | c12b395 | 2012-08-10 00:51:50 +0000 | [diff] [blame] | 142 | |
David Miller | 79b16aa | 2015-04-05 22:19:09 -0400 | [diff] [blame] | 143 | static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, |
| 144 | struct net_device *dev) |
Cong Wang | e8f72ea | 2013-03-09 23:00:39 +0000 | [diff] [blame] | 145 | { |
Cong Wang | e8f72ea | 2013-03-09 23:00:39 +0000 | [diff] [blame] | 146 | int pkt_len, err; |
| 147 | |
Nicolas Dichtel | 83cf9a2 | 2015-09-18 11:47:41 +0200 | [diff] [blame] | 148 | pkt_len = skb->len - skb_inner_network_offset(skb); |
Eric W. Biederman | 33224b1 | 2015-10-07 16:48:46 -0500 | [diff] [blame] | 149 | err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); |
Pravin B Shelar | 039f506 | 2015-12-24 14:34:54 -0800 | [diff] [blame] | 150 | if (unlikely(net_xmit_eval(err))) |
| 151 | pkt_len = -1; |
| 152 | iptunnel_xmit_stats(dev, pkt_len); |
Cong Wang | e8f72ea | 2013-03-09 23:00:39 +0000 | [diff] [blame] | 153 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | #endif |
Arnd Bergmann | 0efeff2 | 2016-01-01 13:18:48 +0100 | [diff] [blame] | 155 | #endif |