blob: b95a6927c7185eae2481d21235fa106e8a7ddb94 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Tom Herbert8024e022014-07-13 19:49:37 -07002#ifndef __NET_UDP_TUNNEL_H
3#define __NET_UDP_TUNNEL_H
4
Andy Zhou6a93cc92014-09-16 17:31:17 -07005#include <net/ip_tunnels.h>
6#include <net/udp.h>
7
8#if IS_ENABLED(CONFIG_IPV6)
9#include <net/ipv6.h>
10#include <net/addrconf.h>
11#endif
12
Tom Herbert8024e022014-07-13 19:49:37 -070013struct udp_port_cfg {
14 u8 family;
15
16 /* Used only for kernel-created sockets */
17 union {
18 struct in_addr local_ip;
19#if IS_ENABLED(CONFIG_IPV6)
20 struct in6_addr local_ip6;
21#endif
22 };
23
24 union {
25 struct in_addr peer_ip;
26#if IS_ENABLED(CONFIG_IPV6)
27 struct in6_addr peer_ip6;
28#endif
29 };
30
31 __be16 local_udp_port;
32 __be16 peer_udp_port;
33 unsigned int use_udp_checksums:1,
34 use_udp6_tx_checksums:1,
Jiri Benca43a9ef2015-08-28 20:48:22 +020035 use_udp6_rx_checksums:1,
36 ipv6_v6only:1;
Tom Herbert8024e022014-07-13 19:49:37 -070037};
38
Andy Zhoufd384412014-09-16 17:31:16 -070039int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
40 struct socket **sockp);
41
42#if IS_ENABLED(CONFIG_IPV6)
43int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
44 struct socket **sockp);
45#else
46static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
47 struct socket **sockp)
48{
49 return 0;
50}
51#endif
52
53static inline int udp_sock_create(struct net *net,
54 struct udp_port_cfg *cfg,
55 struct socket **sockp)
56{
57 if (cfg->family == AF_INET)
58 return udp_sock_create4(net, cfg, sockp);
59
60 if (cfg->family == AF_INET6)
61 return udp_sock_create6(net, cfg, sockp);
62
63 return -EPFNOSUPPORT;
64}
Tom Herbert8024e022014-07-13 19:49:37 -070065
Andy Zhou6a93cc92014-09-16 17:31:17 -070066typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
67typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
Tom Herbert38fd2af2016-04-05 08:22:52 -070068typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
69 struct sk_buff **head,
70 struct sk_buff *skb);
71typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
72 int nhoff);
Andy Zhou6a93cc92014-09-16 17:31:17 -070073
74struct udp_tunnel_sock_cfg {
75 void *sk_user_data; /* user data used by encap_rcv call back */
76 /* Used for setting up udp_sock fields, see udp.h for details */
77 __u8 encap_type;
78 udp_tunnel_encap_rcv_t encap_rcv;
79 udp_tunnel_encap_destroy_t encap_destroy;
Tom Herbert38fd2af2016-04-05 08:22:52 -070080 udp_tunnel_gro_receive_t gro_receive;
81 udp_tunnel_gro_complete_t gro_complete;
Andy Zhou6a93cc92014-09-16 17:31:17 -070082};
83
84/* Setup the given (UDP) sock to receive UDP encapsulated packets */
85void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
86 struct udp_tunnel_sock_cfg *sock_cfg);
87
Alexander Duycke7b3db52016-06-16 12:20:52 -070088/* -- List of parsable UDP tunnel types --
89 *
90 * Adding to this list will result in serious debate. The main issue is
91 * that this list is essentially a list of workarounds for either poorly
92 * designed tunnels, or poorly designed device offloads.
93 *
94 * The parsing supported via these types should really be used for Rx
95 * traffic only as the network stack will have already inserted offsets for
96 * the location of the headers in the skb. In addition any ports that are
97 * pushed should be kept within the namespace without leaking to other
98 * devices such as VFs or other ports on the same device.
99 *
100 * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the
101 * need to use this for Rx checksum offload. It should not be necessary to
102 * call this function to perform Tx offloads on outgoing traffic.
103 */
104enum udp_parsable_tunnel_type {
105 UDP_TUNNEL_TYPE_VXLAN, /* RFC 7348 */
106 UDP_TUNNEL_TYPE_GENEVE, /* draft-ietf-nvo3-geneve */
Alexander Duyckb9adcd692016-06-16 12:23:19 -0700107 UDP_TUNNEL_TYPE_VXLAN_GPE, /* draft-ietf-nvo3-vxlan-gpe */
Alexander Duycke7b3db52016-06-16 12:20:52 -0700108};
109
110struct udp_tunnel_info {
111 unsigned short type;
112 sa_family_t sa_family;
113 __be16 port;
114};
115
116/* Notify network devices of offloadable types */
117void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
118 unsigned short type);
Sabrina Dubroca296d8ee2017-07-21 12:49:30 +0200119void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
120 unsigned short type);
Alexander Duycke7b3db52016-06-16 12:20:52 -0700121void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
122void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
123
Alexander Duyck7c46a642016-06-16 12:21:00 -0700124static inline void udp_tunnel_get_rx_info(struct net_device *dev)
125{
126 ASSERT_RTNL();
127 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
128}
129
Sabrina Dubroca296d8ee2017-07-21 12:49:30 +0200130static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
131{
132 ASSERT_RTNL();
133 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
134}
135
Andy Zhou6a93cc92014-09-16 17:31:17 -0700136/* Transmit the skb using UDP encapsulation. */
Pravin B Shelar039f5062015-12-24 14:34:54 -0800137void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
138 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
139 __be16 df, __be16 src_port, __be16 dst_port,
140 bool xnet, bool nocheck);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700141
142#if IS_ENABLED(CONFIG_IPV6)
David Miller79b16aa2015-04-05 22:19:09 -0400143int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
144 struct sk_buff *skb,
Tom Herbertd998f8e2015-01-20 11:23:04 -0800145 struct net_device *dev, struct in6_addr *saddr,
146 struct in6_addr *daddr,
Daniel Borkmann13461142016-03-09 03:00:02 +0100147 __u8 prio, __u8 ttl, __be32 label,
148 __be16 src_port, __be16 dst_port, bool nocheck);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700149#endif
150
151void udp_tunnel_sock_release(struct socket *sock);
152
Pravin B Shelarc29a70d2015-08-26 23:46:50 -0700153struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
154 __be16 flags, __be64 tunnel_id,
155 int md_size);
156
Alexander Duyck86a98052016-06-16 12:20:44 -0700157#ifdef CONFIG_INET
Alexander Duyckaed069d2016-04-14 15:33:37 -0400158static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
Andy Zhou6a93cc92014-09-16 17:31:17 -0700159{
160 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
161
Edward Cree6fa79662016-02-11 21:02:31 +0000162 return iptunnel_handle_offloads(skb, type);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700163}
Alexander Duyck86a98052016-06-16 12:20:44 -0700164#endif
Andy Zhou6a93cc92014-09-16 17:31:17 -0700165
166static inline void udp_tunnel_encap_enable(struct socket *sock)
167{
168#if IS_ENABLED(CONFIG_IPV6)
169 if (sock->sk->sk_family == PF_INET6)
170 ipv6_stub->udpv6_encap_enable();
171 else
172#endif
173 udp_encap_enable();
174}
175
Tom Herbert8024e022014-07-13 19:49:37 -0700176#endif