blob: 10cce0dd4450353410e55d2b96ffeab6921b1759 [file] [log] [blame]
Tom Herbert8024e022014-07-13 19:49:37 -07001#ifndef __NET_UDP_TUNNEL_H
2#define __NET_UDP_TUNNEL_H
3
Andy Zhou6a93cc92014-09-16 17:31:17 -07004#include <net/ip_tunnels.h>
5#include <net/udp.h>
6
7#if IS_ENABLED(CONFIG_IPV6)
8#include <net/ipv6.h>
9#include <net/addrconf.h>
10#endif
11
Tom Herbert8024e022014-07-13 19:49:37 -070012struct udp_port_cfg {
13 u8 family;
14
15 /* Used only for kernel-created sockets */
16 union {
17 struct in_addr local_ip;
18#if IS_ENABLED(CONFIG_IPV6)
19 struct in6_addr local_ip6;
20#endif
21 };
22
23 union {
24 struct in_addr peer_ip;
25#if IS_ENABLED(CONFIG_IPV6)
26 struct in6_addr peer_ip6;
27#endif
28 };
29
30 __be16 local_udp_port;
31 __be16 peer_udp_port;
32 unsigned int use_udp_checksums:1,
33 use_udp6_tx_checksums:1,
Jiri Benca43a9ef2015-08-28 20:48:22 +020034 use_udp6_rx_checksums:1,
35 ipv6_v6only:1;
Tom Herbert8024e022014-07-13 19:49:37 -070036};
37
Andy Zhoufd384412014-09-16 17:31:16 -070038int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
39 struct socket **sockp);
40
41#if IS_ENABLED(CONFIG_IPV6)
42int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
43 struct socket **sockp);
44#else
45static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
46 struct socket **sockp)
47{
48 return 0;
49}
50#endif
51
52static inline int udp_sock_create(struct net *net,
53 struct udp_port_cfg *cfg,
54 struct socket **sockp)
55{
56 if (cfg->family == AF_INET)
57 return udp_sock_create4(net, cfg, sockp);
58
59 if (cfg->family == AF_INET6)
60 return udp_sock_create6(net, cfg, sockp);
61
62 return -EPFNOSUPPORT;
63}
Tom Herbert8024e022014-07-13 19:49:37 -070064
Andy Zhou6a93cc92014-09-16 17:31:17 -070065typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
66typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
Tom Herbert38fd2af2016-04-05 08:22:52 -070067typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
68 struct sk_buff **head,
69 struct sk_buff *skb);
70typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
71 int nhoff);
Andy Zhou6a93cc92014-09-16 17:31:17 -070072
73struct udp_tunnel_sock_cfg {
74 void *sk_user_data; /* user data used by encap_rcv call back */
75 /* Used for setting up udp_sock fields, see udp.h for details */
76 __u8 encap_type;
77 udp_tunnel_encap_rcv_t encap_rcv;
78 udp_tunnel_encap_destroy_t encap_destroy;
Tom Herbert38fd2af2016-04-05 08:22:52 -070079 udp_tunnel_gro_receive_t gro_receive;
80 udp_tunnel_gro_complete_t gro_complete;
Andy Zhou6a93cc92014-09-16 17:31:17 -070081};
82
83/* Setup the given (UDP) sock to receive UDP encapsulated packets */
84void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
85 struct udp_tunnel_sock_cfg *sock_cfg);
86
Alexander Duycke7b3db52016-06-16 12:20:52 -070087/* -- List of parsable UDP tunnel types --
88 *
89 * Adding to this list will result in serious debate. The main issue is
90 * that this list is essentially a list of workarounds for either poorly
91 * designed tunnels, or poorly designed device offloads.
92 *
93 * The parsing supported via these types should really be used for Rx
94 * traffic only as the network stack will have already inserted offsets for
95 * the location of the headers in the skb. In addition any ports that are
96 * pushed should be kept within the namespace without leaking to other
97 * devices such as VFs or other ports on the same device.
98 *
99 * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the
100 * need to use this for Rx checksum offload. It should not be necessary to
101 * call this function to perform Tx offloads on outgoing traffic.
102 */
103enum udp_parsable_tunnel_type {
104 UDP_TUNNEL_TYPE_VXLAN, /* RFC 7348 */
105 UDP_TUNNEL_TYPE_GENEVE, /* draft-ietf-nvo3-geneve */
Alexander Duyckb9adcd692016-06-16 12:23:19 -0700106 UDP_TUNNEL_TYPE_VXLAN_GPE, /* draft-ietf-nvo3-vxlan-gpe */
Alexander Duycke7b3db52016-06-16 12:20:52 -0700107};
108
109struct udp_tunnel_info {
110 unsigned short type;
111 sa_family_t sa_family;
112 __be16 port;
113};
114
115/* Notify network devices of offloadable types */
116void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
117 unsigned short type);
Sabrina Dubroca296d8ee2017-07-21 12:49:30 +0200118void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
119 unsigned short type);
Alexander Duycke7b3db52016-06-16 12:20:52 -0700120void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
121void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
122
Alexander Duyck7c46a642016-06-16 12:21:00 -0700123static inline void udp_tunnel_get_rx_info(struct net_device *dev)
124{
125 ASSERT_RTNL();
126 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
127}
128
Sabrina Dubroca296d8ee2017-07-21 12:49:30 +0200129static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
130{
131 ASSERT_RTNL();
132 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
133}
134
Andy Zhou6a93cc92014-09-16 17:31:17 -0700135/* Transmit the skb using UDP encapsulation. */
Pravin B Shelar039f5062015-12-24 14:34:54 -0800136void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
137 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
138 __be16 df, __be16 src_port, __be16 dst_port,
139 bool xnet, bool nocheck);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700140
141#if IS_ENABLED(CONFIG_IPV6)
David Miller79b16aa2015-04-05 22:19:09 -0400142int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
143 struct sk_buff *skb,
Tom Herbertd998f8e2015-01-20 11:23:04 -0800144 struct net_device *dev, struct in6_addr *saddr,
145 struct in6_addr *daddr,
Daniel Borkmann13461142016-03-09 03:00:02 +0100146 __u8 prio, __u8 ttl, __be32 label,
147 __be16 src_port, __be16 dst_port, bool nocheck);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700148#endif
149
150void udp_tunnel_sock_release(struct socket *sock);
151
Pravin B Shelarc29a70d2015-08-26 23:46:50 -0700152struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
153 __be16 flags, __be64 tunnel_id,
154 int md_size);
155
Alexander Duyck86a98052016-06-16 12:20:44 -0700156#ifdef CONFIG_INET
Alexander Duyckaed069d2016-04-14 15:33:37 -0400157static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
Andy Zhou6a93cc92014-09-16 17:31:17 -0700158{
159 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
160
Edward Cree6fa79662016-02-11 21:02:31 +0000161 return iptunnel_handle_offloads(skb, type);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700162}
Alexander Duyck86a98052016-06-16 12:20:44 -0700163#endif
Andy Zhou6a93cc92014-09-16 17:31:17 -0700164
165static inline void udp_tunnel_encap_enable(struct socket *sock)
166{
167#if IS_ENABLED(CONFIG_IPV6)
168 if (sock->sk->sk_family == PF_INET6)
169 ipv6_stub->udpv6_encap_enable();
170 else
171#endif
172 udp_encap_enable();
173}
174
Tom Herbert8024e022014-07-13 19:49:37 -0700175#endif