blob: dc45b538e237b908f4d6c6337b4c533a4236735a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The User Datagram Protocol (UDP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
Alan Cox113aa832008-10-13 19:01:08 -070011 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Hirokazu Takahashi, <taka@valinux.co.jp>
13 *
14 * Fixes:
15 * Alan Cox : verify_area() calls
16 * Alan Cox : stopped close while in use off icmp
17 * messages. Not a fix but a botch that
18 * for udp at least is 'valid'.
19 * Alan Cox : Fixed icmp handling properly
20 * Alan Cox : Correct error for oversized datagrams
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090021 * Alan Cox : Tidied select() semantics.
22 * Alan Cox : udp_err() fixed properly, also now
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * select and read wake correctly on errors
24 * Alan Cox : udp_send verify_area moved to avoid mem leak
25 * Alan Cox : UDP can count its memory
26 * Alan Cox : send to an unknown connection causes
27 * an ECONNREFUSED off the icmp, but
28 * does NOT close.
29 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
30 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
31 * bug no longer crashes it.
32 * Fred Van Kempen : Net2e support for sk->broadcast.
33 * Alan Cox : Uses skb_free_datagram
34 * Alan Cox : Added get/set sockopt support.
35 * Alan Cox : Broadcasting without option set returns EACCES.
36 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
37 * Alan Cox : Use ip_tos and ip_ttl
38 * Alan Cox : SNMP Mibs
39 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
40 * Matt Dillon : UDP length checks.
41 * Alan Cox : Smarter af_inet used properly.
42 * Alan Cox : Use new kernel side addressing.
43 * Alan Cox : Incorrect return on truncated datagram receive.
44 * Arnt Gulbrandsen : New udp_send and stuff
45 * Alan Cox : Cache last socket
46 * Alan Cox : Route cache
47 * Jon Peatfield : Minor efficiency fix to sendto().
48 * Mike Shaver : RFC1122 checks.
49 * Alan Cox : Nonblocking error fix.
50 * Willy Konynenberg : Transparent proxying support.
51 * Mike McLagan : Routing by source
52 * David S. Miller : New socket lookup architecture.
53 * Last socket cache retained as it
54 * does have a high hit rate.
55 * Olaf Kirch : Don't linearise iovec on sendmsg.
56 * Andi Kleen : Some cleanups, cache destination entry
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090057 * for connect.
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
59 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
60 * return ENOTCONN for unconnected sockets (POSIX)
61 * Janos Farkas : don't deliver multi/broadcasts to a different
62 * bound-to-device socket
63 * Hirokazu Takahashi : HW checksumming for outgoing UDP
64 * datagrams.
65 * Hirokazu Takahashi : sendfile() on UDP works now.
66 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
67 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
68 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
69 * a single port at the same time.
70 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
James Chapman342f0232007-06-27 15:37:46 -070071 * James Chapman : Add L2TP encapsulation type.
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 *
73 *
74 * This program is free software; you can redistribute it and/or
75 * modify it under the terms of the GNU General Public License
76 * as published by the Free Software Foundation; either version
77 * 2 of the License, or (at your option) any later version.
78 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090079
Joe Perchesafd465032012-03-12 07:03:32 +000080#define pr_fmt(fmt) "UDP: " fmt
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <asm/uaccess.h>
83#include <asm/ioctls.h>
Hideo Aoki95766ff2007-12-31 00:29:24 -080084#include <linux/bootmem.h>
Eric Dumazet8203efb2008-10-29 02:32:32 -070085#include <linux/highmem.h>
86#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include <linux/types.h>
88#include <linux/fcntl.h>
89#include <linux/module.h>
90#include <linux/socket.h>
91#include <linux/sockios.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020092#include <linux/igmp.h>
Shawn Bohrer6e540302015-06-03 16:27:38 -050093#include <linux/inetdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/in.h>
95#include <linux/errno.h>
96#include <linux/timer.h>
97#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/inet.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +0900100#include <linux/slab.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -0700101#include <net/tcp_states.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#include <linux/skbuff.h>
103#include <linux/proc_fs.h>
104#include <linux/seq_file.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200105#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#include <net/icmp.h>
Shawn Bohrer421b3882013-10-07 11:01:39 -0500107#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <net/route.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <net/checksum.h>
110#include <net/xfrm.h>
Satoru Moriya296f7ea2011-06-17 11:58:39 +0000111#include <trace/events/udp.h>
Eric Dumazet447167b2012-04-11 23:05:28 +0000112#include <linux/static_key.h>
Eric Dumazet22911fc2012-06-27 00:23:44 +0000113#include <trace/events/skb.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300114#include <net/busy_poll.h>
Gerrit Renkerba4e58e2006-11-27 11:10:57 -0800115#include "udp_impl.h"
Craig Galleke32ea7e2016-01-04 17:41:46 -0500116#include <net/sock_reuseport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000118struct udp_table udp_table __read_mostly;
Eric Dumazet645ca702008-10-29 01:41:45 -0700119EXPORT_SYMBOL(udp_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Eric Dumazet8d987e52010-11-09 23:24:26 +0000121long sysctl_udp_mem[3] __read_mostly;
Hideo Aoki95766ff2007-12-31 00:29:24 -0800122EXPORT_SYMBOL(sysctl_udp_mem);
Eric Dumazetc482c562009-07-17 00:26:32 +0000123
124int sysctl_udp_rmem_min __read_mostly;
Hideo Aoki95766ff2007-12-31 00:29:24 -0800125EXPORT_SYMBOL(sysctl_udp_rmem_min);
Eric Dumazetc482c562009-07-17 00:26:32 +0000126
127int sysctl_udp_wmem_min __read_mostly;
Hideo Aoki95766ff2007-12-31 00:29:24 -0800128EXPORT_SYMBOL(sysctl_udp_wmem_min);
129
Eric Dumazet8d987e52010-11-09 23:24:26 +0000130atomic_long_t udp_memory_allocated;
Hideo Aoki95766ff2007-12-31 00:29:24 -0800131EXPORT_SYMBOL(udp_memory_allocated);
132
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000133#define MAX_UDP_PORTS 65536
134#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
Eric Dumazet98322f22009-01-26 21:35:35 -0800135
Eric Dumazetf24d43c2008-10-09 14:51:27 -0700136static int udp_lib_lport_inuse(struct net *net, __u16 num,
Eric Dumazet645ca702008-10-29 01:41:45 -0700137 const struct udp_hslot *hslot,
Eric Dumazet98322f22009-01-26 21:35:35 -0800138 unsigned long *bitmap,
Eric Dumazetf24d43c2008-10-09 14:51:27 -0700139 struct sock *sk,
140 int (*saddr_comp)(const struct sock *sk1,
Craig Galleke32ea7e2016-01-04 17:41:46 -0500141 const struct sock *sk2,
142 bool match_wildcard),
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000143 unsigned int log)
Gerrit Renker25030a72006-08-26 20:06:05 -0700144{
Eric Dumazetf24d43c2008-10-09 14:51:27 -0700145 struct sock *sk2;
Eric Dumazet88ab1932008-11-16 19:39:21 -0800146 struct hlist_nulls_node *node;
Tom Herbertba418fa2013-01-22 09:50:32 +0000147 kuid_t uid = sock_i_uid(sk);
Gerrit Renker25030a72006-08-26 20:06:05 -0700148
Joe Perches4243cdc2014-11-11 21:59:20 -0800149 sk_nulls_for_each(sk2, node, &hslot->head) {
Joe Perches9d4fb272009-11-23 10:41:23 -0800150 if (net_eq(sock_net(sk2), net) &&
151 sk2 != sk &&
Eric Dumazetd4cada42009-11-08 10:17:30 +0000152 (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
Joe Perches9d4fb272009-11-23 10:41:23 -0800153 (!sk2->sk_reuse || !sk->sk_reuse) &&
154 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
155 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
Tom Herbertba418fa2013-01-22 09:50:32 +0000156 (!sk2->sk_reuseport || !sk->sk_reuseport ||
Craig Galleke32ea7e2016-01-04 17:41:46 -0500157 rcu_access_pointer(sk->sk_reuseport_cb) ||
Joe Perches4243cdc2014-11-11 21:59:20 -0800158 !uid_eq(uid, sock_i_uid(sk2))) &&
Craig Galleke32ea7e2016-01-04 17:41:46 -0500159 saddr_comp(sk, sk2, true)) {
Joe Perches4243cdc2014-11-11 21:59:20 -0800160 if (!bitmap)
Eric Dumazet98322f22009-01-26 21:35:35 -0800161 return 1;
Joe Perches4243cdc2014-11-11 21:59:20 -0800162 __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap);
Eric Dumazet98322f22009-01-26 21:35:35 -0800163 }
Joe Perches4243cdc2014-11-11 21:59:20 -0800164 }
Gerrit Renker25030a72006-08-26 20:06:05 -0700165 return 0;
166}
167
Eric Dumazet30fff922009-11-09 05:26:33 +0000168/*
169 * Note: we still hold spinlock of primary hash chain, so no other writer
170 * can insert/delete a socket with local_port == num
171 */
172static int udp_lib_lport_inuse2(struct net *net, __u16 num,
Joe Perches4243cdc2014-11-11 21:59:20 -0800173 struct udp_hslot *hslot2,
174 struct sock *sk,
175 int (*saddr_comp)(const struct sock *sk1,
Craig Galleke32ea7e2016-01-04 17:41:46 -0500176 const struct sock *sk2,
177 bool match_wildcard))
Eric Dumazet30fff922009-11-09 05:26:33 +0000178{
179 struct sock *sk2;
180 struct hlist_nulls_node *node;
Tom Herbertba418fa2013-01-22 09:50:32 +0000181 kuid_t uid = sock_i_uid(sk);
Eric Dumazet30fff922009-11-09 05:26:33 +0000182 int res = 0;
183
184 spin_lock(&hslot2->lock);
Joe Perches4243cdc2014-11-11 21:59:20 -0800185 udp_portaddr_for_each_entry(sk2, node, &hslot2->head) {
Joe Perches9d4fb272009-11-23 10:41:23 -0800186 if (net_eq(sock_net(sk2), net) &&
187 sk2 != sk &&
188 (udp_sk(sk2)->udp_port_hash == num) &&
189 (!sk2->sk_reuse || !sk->sk_reuse) &&
190 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
191 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
Tom Herbertba418fa2013-01-22 09:50:32 +0000192 (!sk2->sk_reuseport || !sk->sk_reuseport ||
Craig Galleke32ea7e2016-01-04 17:41:46 -0500193 rcu_access_pointer(sk->sk_reuseport_cb) ||
Joe Perches4243cdc2014-11-11 21:59:20 -0800194 !uid_eq(uid, sock_i_uid(sk2))) &&
Craig Galleke32ea7e2016-01-04 17:41:46 -0500195 saddr_comp(sk, sk2, true)) {
Eric Dumazet30fff922009-11-09 05:26:33 +0000196 res = 1;
197 break;
198 }
Joe Perches4243cdc2014-11-11 21:59:20 -0800199 }
Eric Dumazet30fff922009-11-09 05:26:33 +0000200 spin_unlock(&hslot2->lock);
201 return res;
202}
203
Craig Galleke32ea7e2016-01-04 17:41:46 -0500204static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot,
205 int (*saddr_same)(const struct sock *sk1,
206 const struct sock *sk2,
207 bool match_wildcard))
208{
209 struct net *net = sock_net(sk);
210 struct hlist_nulls_node *node;
211 kuid_t uid = sock_i_uid(sk);
212 struct sock *sk2;
213
214 sk_nulls_for_each(sk2, node, &hslot->head) {
215 if (net_eq(sock_net(sk2), net) &&
216 sk2 != sk &&
217 sk2->sk_family == sk->sk_family &&
218 ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
219 (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
220 (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
221 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
222 (*saddr_same)(sk, sk2, false)) {
223 return reuseport_add_sock(sk, sk2);
224 }
225 }
226
227 /* Initial allocation may have already happened via setsockopt */
228 if (!rcu_access_pointer(sk->sk_reuseport_cb))
229 return reuseport_alloc(sk);
230 return 0;
231}
232
Gerrit Renker25030a72006-08-26 20:06:05 -0700233/**
Pavel Emelyanov6ba5a3c2008-03-22 16:51:21 -0700234 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
Gerrit Renker25030a72006-08-26 20:06:05 -0700235 *
236 * @sk: socket struct in question
237 * @snum: port number to look up
David S. Millerdf2bc452007-06-05 15:18:43 -0700238 * @saddr_comp: AF-dependent comparison of bound local IP addresses
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300239 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
Eric Dumazet30fff922009-11-09 05:26:33 +0000240 * with NULL address
Gerrit Renker25030a72006-08-26 20:06:05 -0700241 */
Pavel Emelyanov6ba5a3c2008-03-22 16:51:21 -0700242int udp_lib_get_port(struct sock *sk, unsigned short snum,
Joe Perches4243cdc2014-11-11 21:59:20 -0800243 int (*saddr_comp)(const struct sock *sk1,
Craig Galleke32ea7e2016-01-04 17:41:46 -0500244 const struct sock *sk2,
245 bool match_wildcard),
Eric Dumazet30fff922009-11-09 05:26:33 +0000246 unsigned int hash2_nulladdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247{
Eric Dumazet512615b2009-11-08 10:17:58 +0000248 struct udp_hslot *hslot, *hslot2;
Eric Dumazet645ca702008-10-29 01:41:45 -0700249 struct udp_table *udptable = sk->sk_prot->h.udp_table;
Gerrit Renker25030a72006-08-26 20:06:05 -0700250 int error = 1;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900251 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Stephen Hemminger32c1da72007-08-24 23:09:41 -0700253 if (!snum) {
Eric Dumazet9088c562008-10-08 11:44:17 -0700254 int low, high, remaining;
Eric Dumazet95c96172012-04-15 05:58:06 +0000255 unsigned int rand;
Eric Dumazet98322f22009-01-26 21:35:35 -0800256 unsigned short first, last;
257 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Eric W. Biederman0bbf87d2013-09-28 14:10:59 -0700259 inet_get_local_port_range(net, &low, &high);
Anton Arapova25de532007-10-18 22:00:17 -0700260 remaining = (high - low) + 1;
Stephen Hemminger227b60f2007-10-10 17:30:46 -0700261
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500262 rand = prandom_u32();
Daniel Borkmann8fc54f62014-08-23 20:58:54 +0200263 first = reciprocal_scale(rand, remaining) + low;
Eric Dumazet98322f22009-01-26 21:35:35 -0800264 /*
265 * force rand to be an odd multiple of UDP_HTABLE_SIZE
266 */
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000267 rand = (rand | 1) * (udptable->mask + 1);
Eric Dumazet5781b232009-12-13 19:32:39 -0800268 last = first + udptable->mask + 1;
269 do {
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000270 hslot = udp_hashslot(udptable, net, first);
Eric Dumazet98322f22009-01-26 21:35:35 -0800271 bitmap_zero(bitmap, PORTS_PER_CHAIN);
Eric Dumazet645ca702008-10-29 01:41:45 -0700272 spin_lock_bh(&hslot->lock);
Eric Dumazet98322f22009-01-26 21:35:35 -0800273 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000274 saddr_comp, udptable->log);
Eric Dumazet98322f22009-01-26 21:35:35 -0800275
276 snum = first;
277 /*
278 * Iterate on all possible values of snum for this hash.
279 * Using steps of an odd multiple of UDP_HTABLE_SIZE
280 * give us randomization and full range coverage.
281 */
Eric Dumazet9088c562008-10-08 11:44:17 -0700282 do {
Eric Dumazet98322f22009-01-26 21:35:35 -0800283 if (low <= snum && snum <= high &&
Amerigo Wange3826f12010-05-05 00:27:06 +0000284 !test_bit(snum >> udptable->log, bitmap) &&
WANG Cong122ff242014-05-12 16:04:53 -0700285 !inet_is_local_reserved_port(net, snum))
Eric Dumazet98322f22009-01-26 21:35:35 -0800286 goto found;
287 snum += rand;
288 } while (snum != first);
289 spin_unlock_bh(&hslot->lock);
Eric Dumazet5781b232009-12-13 19:32:39 -0800290 } while (++first != last);
Eric Dumazet98322f22009-01-26 21:35:35 -0800291 goto fail;
Eric Dumazet645ca702008-10-29 01:41:45 -0700292 } else {
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000293 hslot = udp_hashslot(udptable, net, snum);
Eric Dumazet645ca702008-10-29 01:41:45 -0700294 spin_lock_bh(&hslot->lock);
Eric Dumazet30fff922009-11-09 05:26:33 +0000295 if (hslot->count > 10) {
296 int exist;
297 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
298
299 slot2 &= udptable->mask;
300 hash2_nulladdr &= udptable->mask;
301
302 hslot2 = udp_hashslot2(udptable, slot2);
303 if (hslot->count < hslot2->count)
304 goto scan_primary_hash;
305
306 exist = udp_lib_lport_inuse2(net, snum, hslot2,
307 sk, saddr_comp);
308 if (!exist && (hash2_nulladdr != slot2)) {
309 hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
310 exist = udp_lib_lport_inuse2(net, snum, hslot2,
311 sk, saddr_comp);
312 }
313 if (exist)
314 goto fail_unlock;
315 else
316 goto found;
317 }
318scan_primary_hash:
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000319 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
320 saddr_comp, 0))
Eric Dumazet645ca702008-10-29 01:41:45 -0700321 goto fail_unlock;
322 }
Eric Dumazet98322f22009-01-26 21:35:35 -0800323found:
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000324 inet_sk(sk)->inet_num = snum;
Eric Dumazetd4cada42009-11-08 10:17:30 +0000325 udp_sk(sk)->udp_port_hash = snum;
326 udp_sk(sk)->udp_portaddr_hash ^= snum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 if (sk_unhashed(sk)) {
Craig Galleke32ea7e2016-01-04 17:41:46 -0500328 if (sk->sk_reuseport &&
329 udp_reuseport_add_sock(sk, hslot, saddr_comp)) {
330 inet_sk(sk)->inet_num = 0;
331 udp_sk(sk)->udp_port_hash = 0;
332 udp_sk(sk)->udp_portaddr_hash ^= snum;
333 goto fail_unlock;
334 }
335
Eric Dumazet88ab1932008-11-16 19:39:21 -0800336 sk_nulls_add_node_rcu(sk, &hslot->head);
Eric Dumazetfdcc8aa92009-11-08 10:17:05 +0000337 hslot->count++;
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -0700338 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Eric Dumazet512615b2009-11-08 10:17:58 +0000339
340 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
341 spin_lock(&hslot2->lock);
342 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
343 &hslot2->head);
344 hslot2->count++;
345 spin_unlock(&hslot2->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
Gerrit Renker25030a72006-08-26 20:06:05 -0700347 error = 0;
Eric Dumazet645ca702008-10-29 01:41:45 -0700348fail_unlock:
349 spin_unlock_bh(&hslot->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350fail:
Gerrit Renker25030a72006-08-26 20:06:05 -0700351 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
Eric Dumazetc482c562009-07-17 00:26:32 +0000353EXPORT_SYMBOL(udp_lib_get_port);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Craig Galleke32ea7e2016-01-04 17:41:46 -0500355/* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
356 * match_wildcard == false: addresses must be exactly the same, i.e.
357 * 0.0.0.0 only equals to 0.0.0.0
358 */
359static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2,
360 bool match_wildcard)
David S. Millerdb8dac22008-03-06 16:22:02 -0800361{
362 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
363
Craig Galleke32ea7e2016-01-04 17:41:46 -0500364 if (!ipv6_only_sock(sk2)) {
365 if (inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)
366 return 1;
367 if (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr)
368 return match_wildcard;
369 }
370 return 0;
David S. Millerdb8dac22008-03-06 16:22:02 -0800371}
372
Eric Dumazet6eada012015-03-18 14:05:33 -0700373static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
374 unsigned int port)
Eric Dumazetd4cada42009-11-08 10:17:30 +0000375{
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700376 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
Eric Dumazetd4cada42009-11-08 10:17:30 +0000377}
378
Pavel Emelyanov6ba5a3c2008-03-22 16:51:21 -0700379int udp_v4_get_port(struct sock *sk, unsigned short snum)
David S. Millerdb8dac22008-03-06 16:22:02 -0800380{
Eric Dumazet30fff922009-11-09 05:26:33 +0000381 unsigned int hash2_nulladdr =
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700382 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
Eric Dumazet30fff922009-11-09 05:26:33 +0000383 unsigned int hash2_partial =
384 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
385
Eric Dumazetd4cada42009-11-08 10:17:30 +0000386 /* precompute partial secondary hash */
Eric Dumazet30fff922009-11-09 05:26:33 +0000387 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
388 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
David S. Millerdb8dac22008-03-06 16:22:02 -0800389}
390
Joe Perches60c04ae2014-12-01 20:29:06 -0800391static inline int compute_score(struct sock *sk, struct net *net,
392 __be32 saddr, unsigned short hnum, __be16 sport,
393 __be32 daddr, __be16 dport, int dif)
Eric Dumazet645ca702008-10-29 01:41:45 -0700394{
Joe Perches60c04ae2014-12-01 20:29:06 -0800395 int score;
396 struct inet_sock *inet;
Eric Dumazet645ca702008-10-29 01:41:45 -0700397
Joe Perches60c04ae2014-12-01 20:29:06 -0800398 if (!net_eq(sock_net(sk), net) ||
399 udp_sk(sk)->udp_port_hash != hnum ||
400 ipv6_only_sock(sk))
401 return -1;
Eric Dumazet645ca702008-10-29 01:41:45 -0700402
Joe Perches60c04ae2014-12-01 20:29:06 -0800403 score = (sk->sk_family == PF_INET) ? 2 : 1;
404 inet = inet_sk(sk);
405
406 if (inet->inet_rcv_saddr) {
407 if (inet->inet_rcv_saddr != daddr)
408 return -1;
409 score += 4;
Eric Dumazet645ca702008-10-29 01:41:45 -0700410 }
Joe Perches60c04ae2014-12-01 20:29:06 -0800411
412 if (inet->inet_daddr) {
413 if (inet->inet_daddr != saddr)
414 return -1;
415 score += 4;
416 }
417
418 if (inet->inet_dport) {
419 if (inet->inet_dport != sport)
420 return -1;
421 score += 4;
422 }
423
424 if (sk->sk_bound_dev_if) {
425 if (sk->sk_bound_dev_if != dif)
426 return -1;
427 score += 4;
428 }
Eric Dumazet70da2682015-10-08 19:33:21 -0700429 if (sk->sk_incoming_cpu == raw_smp_processor_id())
430 score++;
Eric Dumazet645ca702008-10-29 01:41:45 -0700431 return score;
432}
433
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000434/*
435 * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
436 */
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000437static inline int compute_score2(struct sock *sk, struct net *net,
438 __be32 saddr, __be16 sport,
439 __be32 daddr, unsigned int hnum, int dif)
440{
Joe Perches60c04ae2014-12-01 20:29:06 -0800441 int score;
442 struct inet_sock *inet;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000443
Joe Perches60c04ae2014-12-01 20:29:06 -0800444 if (!net_eq(sock_net(sk), net) ||
445 ipv6_only_sock(sk))
446 return -1;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000447
Joe Perches60c04ae2014-12-01 20:29:06 -0800448 inet = inet_sk(sk);
449
450 if (inet->inet_rcv_saddr != daddr ||
451 inet->inet_num != hnum)
452 return -1;
453
454 score = (sk->sk_family == PF_INET) ? 2 : 1;
455
456 if (inet->inet_daddr) {
457 if (inet->inet_daddr != saddr)
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000458 return -1;
Joe Perches60c04ae2014-12-01 20:29:06 -0800459 score += 4;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000460 }
Joe Perches60c04ae2014-12-01 20:29:06 -0800461
462 if (inet->inet_dport) {
463 if (inet->inet_dport != sport)
464 return -1;
465 score += 4;
466 }
467
468 if (sk->sk_bound_dev_if) {
469 if (sk->sk_bound_dev_if != dif)
470 return -1;
471 score += 4;
472 }
473
Eric Dumazet70da2682015-10-08 19:33:21 -0700474 if (sk->sk_incoming_cpu == raw_smp_processor_id())
475 score++;
476
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000477 return score;
478}
479
Eric Dumazet6eada012015-03-18 14:05:33 -0700480static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
481 const __u16 lport, const __be32 faddr,
482 const __be16 fport)
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200483{
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +0200484 static u32 udp_ehash_secret __read_mostly;
485
486 net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
487
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200488 return __inet_ehashfn(laddr, lport, faddr, fport,
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +0200489 udp_ehash_secret + net_hash_mix(net));
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200490}
491
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000492/* called with read_rcu_lock() */
493static struct sock *udp4_lib_lookup2(struct net *net,
494 __be32 saddr, __be16 sport,
495 __be32 daddr, unsigned int hnum, int dif,
Craig Gallek11341582016-01-05 15:08:07 -0500496 struct udp_hslot *hslot2, unsigned int slot2,
497 struct sk_buff *skb)
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000498{
499 struct sock *sk, *result;
500 struct hlist_nulls_node *node;
Tom Herbertba418fa2013-01-22 09:50:32 +0000501 int score, badness, matches = 0, reuseport = 0;
502 u32 hash = 0;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000503
504begin:
505 result = NULL;
Tom Herbertba418fa2013-01-22 09:50:32 +0000506 badness = 0;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000507 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
508 score = compute_score2(sk, net, saddr, sport,
509 daddr, hnum, dif);
510 if (score > badness) {
511 result = sk;
512 badness = score;
Tom Herbertba418fa2013-01-22 09:50:32 +0000513 reuseport = sk->sk_reuseport;
514 if (reuseport) {
Craig Galleke32ea7e2016-01-04 17:41:46 -0500515 struct sock *sk2;
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200516 hash = udp_ehashfn(net, daddr, hnum,
517 saddr, sport);
Craig Gallek11341582016-01-05 15:08:07 -0500518 sk2 = reuseport_select_sock(sk, hash, skb,
519 sizeof(struct udphdr));
Craig Galleke32ea7e2016-01-04 17:41:46 -0500520 if (sk2) {
521 result = sk2;
522 goto found;
523 }
Tom Herbertba418fa2013-01-22 09:50:32 +0000524 matches = 1;
525 }
526 } else if (score == badness && reuseport) {
527 matches++;
Daniel Borkmann8fc54f62014-08-23 20:58:54 +0200528 if (reciprocal_scale(hash, matches) == 0)
Tom Herbertba418fa2013-01-22 09:50:32 +0000529 result = sk;
530 hash = next_pseudo_random32(hash);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000531 }
532 }
533 /*
534 * if the nulls value we got at the end of this lookup is
535 * not the expected one, we must restart lookup.
536 * We probably met an item that was moved to another chain.
537 */
538 if (get_nulls_value(node) != slot2)
539 goto begin;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000540 if (result) {
Craig Galleke32ea7e2016-01-04 17:41:46 -0500541found:
Eric Dumazetc31504d2010-11-15 19:58:26 +0000542 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000543 result = NULL;
544 else if (unlikely(compute_score2(result, net, saddr, sport,
545 daddr, hnum, dif) < badness)) {
546 sock_put(result);
547 goto begin;
548 }
549 }
550 return result;
551}
552
David S. Millerdb8dac22008-03-06 16:22:02 -0800553/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
554 * harder than this. -DaveM
555 */
Pavel Emelyanovfce82332011-12-09 06:23:34 +0000556struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
David S. Millerdb8dac22008-03-06 16:22:02 -0800557 __be16 sport, __be32 daddr, __be16 dport,
Craig Gallek538950a2016-01-04 17:41:47 -0500558 int dif, struct udp_table *udptable, struct sk_buff *skb)
David S. Millerdb8dac22008-03-06 16:22:02 -0800559{
Eric Dumazet271b72c2008-10-29 02:11:14 -0700560 struct sock *sk, *result;
Eric Dumazet88ab1932008-11-16 19:39:21 -0800561 struct hlist_nulls_node *node;
David S. Millerdb8dac22008-03-06 16:22:02 -0800562 unsigned short hnum = ntohs(dport);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000563 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
564 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
Tom Herbertba418fa2013-01-22 09:50:32 +0000565 int score, badness, matches = 0, reuseport = 0;
566 u32 hash = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -0800567
Eric Dumazet271b72c2008-10-29 02:11:14 -0700568 rcu_read_lock();
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000569 if (hslot->count > 10) {
570 hash2 = udp4_portaddr_hash(net, daddr, hnum);
571 slot2 = hash2 & udptable->mask;
572 hslot2 = &udptable->hash2[slot2];
573 if (hslot->count < hslot2->count)
574 goto begin;
575
576 result = udp4_lib_lookup2(net, saddr, sport,
577 daddr, hnum, dif,
Craig Gallek11341582016-01-05 15:08:07 -0500578 hslot2, slot2, skb);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000579 if (!result) {
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700580 hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000581 slot2 = hash2 & udptable->mask;
582 hslot2 = &udptable->hash2[slot2];
583 if (hslot->count < hslot2->count)
584 goto begin;
585
Jorge Boncompte [DTI2]1223c672010-04-08 04:56:48 +0000586 result = udp4_lib_lookup2(net, saddr, sport,
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700587 htonl(INADDR_ANY), hnum, dif,
Craig Gallek11341582016-01-05 15:08:07 -0500588 hslot2, slot2, skb);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000589 }
590 rcu_read_unlock();
591 return result;
592 }
Eric Dumazet271b72c2008-10-29 02:11:14 -0700593begin:
594 result = NULL;
Tom Herbertba418fa2013-01-22 09:50:32 +0000595 badness = 0;
Eric Dumazet88ab1932008-11-16 19:39:21 -0800596 sk_nulls_for_each_rcu(sk, node, &hslot->head) {
Eric Dumazet645ca702008-10-29 01:41:45 -0700597 score = compute_score(sk, net, saddr, hnum, sport,
598 daddr, dport, dif);
599 if (score > badness) {
600 result = sk;
601 badness = score;
Tom Herbertba418fa2013-01-22 09:50:32 +0000602 reuseport = sk->sk_reuseport;
603 if (reuseport) {
Craig Galleke32ea7e2016-01-04 17:41:46 -0500604 struct sock *sk2;
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200605 hash = udp_ehashfn(net, daddr, hnum,
606 saddr, sport);
Craig Gallek538950a2016-01-04 17:41:47 -0500607 sk2 = reuseport_select_sock(sk, hash, skb,
608 sizeof(struct udphdr));
Craig Galleke32ea7e2016-01-04 17:41:46 -0500609 if (sk2) {
610 result = sk2;
611 goto found;
612 }
Tom Herbertba418fa2013-01-22 09:50:32 +0000613 matches = 1;
614 }
615 } else if (score == badness && reuseport) {
616 matches++;
Daniel Borkmann8fc54f62014-08-23 20:58:54 +0200617 if (reciprocal_scale(hash, matches) == 0)
Tom Herbertba418fa2013-01-22 09:50:32 +0000618 result = sk;
619 hash = next_pseudo_random32(hash);
David S. Millerdb8dac22008-03-06 16:22:02 -0800620 }
621 }
Eric Dumazet88ab1932008-11-16 19:39:21 -0800622 /*
623 * if the nulls value we got at the end of this lookup is
624 * not the expected one, we must restart lookup.
625 * We probably met an item that was moved to another chain.
626 */
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000627 if (get_nulls_value(node) != slot)
Eric Dumazet88ab1932008-11-16 19:39:21 -0800628 goto begin;
629
Eric Dumazet271b72c2008-10-29 02:11:14 -0700630 if (result) {
Craig Galleke32ea7e2016-01-04 17:41:46 -0500631found:
Eric Dumazetc31504d2010-11-15 19:58:26 +0000632 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
Eric Dumazet271b72c2008-10-29 02:11:14 -0700633 result = NULL;
634 else if (unlikely(compute_score(result, net, saddr, hnum, sport,
635 daddr, dport, dif) < badness)) {
636 sock_put(result);
637 goto begin;
638 }
639 }
640 rcu_read_unlock();
David S. Millerdb8dac22008-03-06 16:22:02 -0800641 return result;
642}
Pavel Emelyanovfce82332011-12-09 06:23:34 +0000643EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
David S. Millerdb8dac22008-03-06 16:22:02 -0800644
KOVACS Krisztian607c4aa2008-10-07 12:38:32 -0700645static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
646 __be16 sport, __be16 dport,
Eric Dumazet645ca702008-10-29 01:41:45 -0700647 struct udp_table *udptable)
KOVACS Krisztian607c4aa2008-10-07 12:38:32 -0700648{
649 const struct iphdr *iph = ip_hdr(skb);
650
Eric Dumazet8afdd992013-12-10 18:07:23 -0800651 return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
652 iph->daddr, dport, inet_iif(skb),
Craig Gallek538950a2016-01-04 17:41:47 -0500653 udptable, skb);
KOVACS Krisztian607c4aa2008-10-07 12:38:32 -0700654}
655
KOVACS Krisztianbcd41302008-10-01 07:48:10 -0700656struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
657 __be32 daddr, __be16 dport, int dif)
658{
Craig Galleke32ea7e2016-01-04 17:41:46 -0500659 return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif,
Craig Gallek538950a2016-01-04 17:41:47 -0500660 &udp_table, NULL);
KOVACS Krisztianbcd41302008-10-01 07:48:10 -0700661}
662EXPORT_SYMBOL_GPL(udp4_lib_lookup);
663
Shawn Bohrer421b3882013-10-07 11:01:39 -0500664static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
665 __be16 loc_port, __be32 loc_addr,
666 __be16 rmt_port, __be32 rmt_addr,
667 int dif, unsigned short hnum)
668{
669 struct inet_sock *inet = inet_sk(sk);
670
671 if (!net_eq(sock_net(sk), net) ||
672 udp_sk(sk)->udp_port_hash != hnum ||
673 (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
674 (inet->inet_dport != rmt_port && inet->inet_dport) ||
675 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
676 ipv6_only_sock(sk) ||
677 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
678 return false;
679 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
680 return false;
681 return true;
682}
683
David S. Millerdb8dac22008-03-06 16:22:02 -0800684/*
685 * This routine is called by the ICMP module when it gets some
686 * sort of error condition. If err < 0 then the socket should
687 * be closed and the error returned to the user. If err > 0
688 * it's just the icmp type << 8 | icmp code.
689 * Header points to the ip header of the error packet. We move
690 * on past this. Then (as it used to claim before adjustment)
691 * header points to the first 8 bytes of the udp header. We need
692 * to find the appropriate port.
693 */
694
Eric Dumazet645ca702008-10-29 01:41:45 -0700695void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
David S. Millerdb8dac22008-03-06 16:22:02 -0800696{
697 struct inet_sock *inet;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000698 const struct iphdr *iph = (const struct iphdr *)skb->data;
Eric Dumazetc482c562009-07-17 00:26:32 +0000699 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
David S. Millerdb8dac22008-03-06 16:22:02 -0800700 const int type = icmp_hdr(skb)->type;
701 const int code = icmp_hdr(skb)->code;
702 struct sock *sk;
703 int harderr;
704 int err;
Pavel Emelyanovfd54d712008-07-14 23:01:40 -0700705 struct net *net = dev_net(skb->dev);
David S. Millerdb8dac22008-03-06 16:22:02 -0800706
Pavel Emelyanovfd54d712008-07-14 23:01:40 -0700707 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
Craig Gallek538950a2016-01-04 17:41:47 -0500708 iph->saddr, uh->source, skb->dev->ifindex, udptable,
709 NULL);
Ian Morris51456b22015-04-03 09:17:26 +0100710 if (!sk) {
Pavel Emelyanovdcfc23c2008-07-14 23:03:00 -0700711 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
David S. Millerdb8dac22008-03-06 16:22:02 -0800712 return; /* No socket for error */
713 }
714
715 err = 0;
716 harderr = 0;
717 inet = inet_sk(sk);
718
719 switch (type) {
720 default:
721 case ICMP_TIME_EXCEEDED:
722 err = EHOSTUNREACH;
723 break;
724 case ICMP_SOURCE_QUENCH:
725 goto out;
726 case ICMP_PARAMETERPROB:
727 err = EPROTO;
728 harderr = 1;
729 break;
730 case ICMP_DEST_UNREACH:
731 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
David S. Miller36393392012-06-14 22:21:46 -0700732 ipv4_sk_update_pmtu(skb, sk, info);
David S. Millerdb8dac22008-03-06 16:22:02 -0800733 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
734 err = EMSGSIZE;
735 harderr = 1;
736 break;
737 }
738 goto out;
739 }
740 err = EHOSTUNREACH;
741 if (code <= NR_ICMP_UNREACH) {
742 harderr = icmp_err_convert[code].fatal;
743 err = icmp_err_convert[code].errno;
744 }
745 break;
David S. Miller55be7a92012-07-11 21:27:49 -0700746 case ICMP_REDIRECT:
747 ipv4_sk_redirect(skb, sk);
Duan Jiong1a462d12013-09-20 18:20:28 +0800748 goto out;
David S. Millerdb8dac22008-03-06 16:22:02 -0800749 }
750
751 /*
752 * RFC1122: OK. Passes ICMP errors back to application, as per
753 * 4.1.3.3.
754 */
755 if (!inet->recverr) {
756 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
757 goto out;
Eric Dumazetb1faf562010-05-31 23:44:05 -0700758 } else
Eric Dumazetc482c562009-07-17 00:26:32 +0000759 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
Eric Dumazetb1faf562010-05-31 23:44:05 -0700760
David S. Millerdb8dac22008-03-06 16:22:02 -0800761 sk->sk_err = err;
762 sk->sk_error_report(sk);
763out:
764 sock_put(sk);
765}
766
767void udp_err(struct sk_buff *skb, u32 info)
768{
Eric Dumazet645ca702008-10-29 01:41:45 -0700769 __udp4_lib_err(skb, info, &udp_table);
David S. Millerdb8dac22008-03-06 16:22:02 -0800770}
771
772/*
773 * Throw away all pending data and cancel the corking. Socket is locked.
774 */
Denis V. Lunev36d926b2008-06-04 15:49:07 +0400775void udp_flush_pending_frames(struct sock *sk)
David S. Millerdb8dac22008-03-06 16:22:02 -0800776{
777 struct udp_sock *up = udp_sk(sk);
778
779 if (up->pending) {
780 up->len = 0;
781 up->pending = 0;
782 ip_flush_pending_frames(sk);
783 }
784}
Denis V. Lunev36d926b2008-06-04 15:49:07 +0400785EXPORT_SYMBOL(udp_flush_pending_frames);
David S. Millerdb8dac22008-03-06 16:22:02 -0800786
787/**
Herbert Xuf6b96642011-03-01 02:36:48 +0000788 * udp4_hwcsum - handle outgoing HW checksumming
David S. Millerdb8dac22008-03-06 16:22:02 -0800789 * @skb: sk_buff containing the filled-in UDP header
790 * (checksum field must be zeroed out)
Herbert Xuf6b96642011-03-01 02:36:48 +0000791 * @src: source IP address
792 * @dst: destination IP address
David S. Millerdb8dac22008-03-06 16:22:02 -0800793 */
Thomas Grafc26bf4a2013-07-25 18:12:18 +0200794void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
David S. Millerdb8dac22008-03-06 16:22:02 -0800795{
David S. Millerdb8dac22008-03-06 16:22:02 -0800796 struct udphdr *uh = udp_hdr(skb);
Herbert Xuf6b96642011-03-01 02:36:48 +0000797 int offset = skb_transport_offset(skb);
798 int len = skb->len - offset;
799 int hlen = len;
David S. Millerdb8dac22008-03-06 16:22:02 -0800800 __wsum csum = 0;
801
WANG Congebbe4952014-06-02 16:12:02 -0700802 if (!skb_has_frag_list(skb)) {
David S. Millerdb8dac22008-03-06 16:22:02 -0800803 /*
804 * Only one fragment on the socket.
805 */
806 skb->csum_start = skb_transport_header(skb) - skb->head;
807 skb->csum_offset = offsetof(struct udphdr, check);
Herbert Xuf6b96642011-03-01 02:36:48 +0000808 uh->check = ~csum_tcpudp_magic(src, dst, len,
809 IPPROTO_UDP, 0);
David S. Millerdb8dac22008-03-06 16:22:02 -0800810 } else {
WANG Congebbe4952014-06-02 16:12:02 -0700811 struct sk_buff *frags;
812
David S. Millerdb8dac22008-03-06 16:22:02 -0800813 /*
814 * HW-checksum won't work as there are two or more
815 * fragments on the socket so that all csums of sk_buffs
816 * should be together
817 */
WANG Congebbe4952014-06-02 16:12:02 -0700818 skb_walk_frags(skb, frags) {
Herbert Xuf6b96642011-03-01 02:36:48 +0000819 csum = csum_add(csum, frags->csum);
820 hlen -= frags->len;
WANG Congebbe4952014-06-02 16:12:02 -0700821 }
David S. Millerdb8dac22008-03-06 16:22:02 -0800822
Herbert Xuf6b96642011-03-01 02:36:48 +0000823 csum = skb_checksum(skb, offset, hlen, csum);
David S. Millerdb8dac22008-03-06 16:22:02 -0800824 skb->ip_summed = CHECKSUM_NONE;
825
David S. Millerdb8dac22008-03-06 16:22:02 -0800826 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
827 if (uh->check == 0)
828 uh->check = CSUM_MANGLED_0;
829 }
830}
Thomas Grafc26bf4a2013-07-25 18:12:18 +0200831EXPORT_SYMBOL_GPL(udp4_hwcsum);
David S. Millerdb8dac22008-03-06 16:22:02 -0800832
Tom Herbertaf5fcba2014-06-04 17:19:48 -0700833/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
834 * for the simple case like when setting the checksum for a UDP tunnel.
835 */
836void udp_set_csum(bool nocheck, struct sk_buff *skb,
837 __be32 saddr, __be32 daddr, int len)
838{
839 struct udphdr *uh = udp_hdr(skb);
840
841 if (nocheck)
842 uh->check = 0;
843 else if (skb_is_gso(skb))
844 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
845 else if (skb_dst(skb) && skb_dst(skb)->dev &&
Tom Herbertc8cd0982015-12-14 11:19:44 -0800846 (skb_dst(skb)->dev->features &
847 (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM))) {
Tom Herbertaf5fcba2014-06-04 17:19:48 -0700848
849 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
850
851 skb->ip_summed = CHECKSUM_PARTIAL;
852 skb->csum_start = skb_transport_header(skb) - skb->head;
853 skb->csum_offset = offsetof(struct udphdr, check);
854 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
855 } else {
856 __wsum csum;
857
858 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
859
860 uh->check = 0;
861 csum = skb_checksum(skb, 0, len, 0);
862 uh->check = udp_v4_check(len, saddr, daddr, csum);
863 if (uh->check == 0)
864 uh->check = CSUM_MANGLED_0;
865
866 skb->ip_summed = CHECKSUM_UNNECESSARY;
867 }
868}
869EXPORT_SYMBOL(udp_set_csum);
870
David S. Miller79ab0532011-05-09 13:31:04 -0700871static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
Herbert Xuf6b96642011-03-01 02:36:48 +0000872{
873 struct sock *sk = skb->sk;
874 struct inet_sock *inet = inet_sk(sk);
875 struct udphdr *uh;
Herbert Xuf6b96642011-03-01 02:36:48 +0000876 int err = 0;
877 int is_udplite = IS_UDPLITE(sk);
878 int offset = skb_transport_offset(skb);
879 int len = skb->len - offset;
880 __wsum csum = 0;
881
882 /*
883 * Create a UDP header
884 */
885 uh = udp_hdr(skb);
886 uh->source = inet->inet_sport;
David S. Miller79ab0532011-05-09 13:31:04 -0700887 uh->dest = fl4->fl4_dport;
Herbert Xuf6b96642011-03-01 02:36:48 +0000888 uh->len = htons(len);
889 uh->check = 0;
890
891 if (is_udplite) /* UDP-Lite */
892 csum = udplite_csum(skb);
893
Tom Herbert28448b82014-05-23 08:47:19 -0700894 else if (sk->sk_no_check_tx) { /* UDP csum disabled */
Herbert Xuf6b96642011-03-01 02:36:48 +0000895
896 skb->ip_summed = CHECKSUM_NONE;
897 goto send;
898
899 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
900
David S. Miller79ab0532011-05-09 13:31:04 -0700901 udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
Herbert Xuf6b96642011-03-01 02:36:48 +0000902 goto send;
903
904 } else
905 csum = udp_csum(skb);
906
907 /* add protocol-dependent pseudo-header */
David S. Miller79ab0532011-05-09 13:31:04 -0700908 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
Herbert Xuf6b96642011-03-01 02:36:48 +0000909 sk->sk_protocol, csum);
910 if (uh->check == 0)
911 uh->check = CSUM_MANGLED_0;
912
913send:
Eric Dumazetb5ec8ee2012-08-10 02:22:47 +0000914 err = ip_send_skb(sock_net(sk), skb);
Herbert Xuf6b96642011-03-01 02:36:48 +0000915 if (err) {
916 if (err == -ENOBUFS && !inet->recverr) {
917 UDP_INC_STATS_USER(sock_net(sk),
918 UDP_MIB_SNDBUFERRORS, is_udplite);
919 err = 0;
920 }
921 } else
922 UDP_INC_STATS_USER(sock_net(sk),
923 UDP_MIB_OUTDATAGRAMS, is_udplite);
924 return err;
925}
926
David S. Millerdb8dac22008-03-06 16:22:02 -0800927/*
928 * Push out all pending data as one UDP datagram. Socket is locked.
929 */
Hannes Frederic Sowa8822b642013-07-01 20:21:30 +0200930int udp_push_pending_frames(struct sock *sk)
David S. Millerdb8dac22008-03-06 16:22:02 -0800931{
932 struct udp_sock *up = udp_sk(sk);
933 struct inet_sock *inet = inet_sk(sk);
David S. Millerb6f21b22011-03-12 02:09:18 -0500934 struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
David S. Millerdb8dac22008-03-06 16:22:02 -0800935 struct sk_buff *skb;
David S. Millerdb8dac22008-03-06 16:22:02 -0800936 int err = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -0800937
David S. Miller77968b72011-05-08 17:12:19 -0700938 skb = ip_finish_skb(sk, fl4);
Herbert Xuf6b96642011-03-01 02:36:48 +0000939 if (!skb)
David S. Millerdb8dac22008-03-06 16:22:02 -0800940 goto out;
941
David S. Miller79ab0532011-05-09 13:31:04 -0700942 err = udp_send_skb(skb, fl4);
David S. Millerdb8dac22008-03-06 16:22:02 -0800943
David S. Millerdb8dac22008-03-06 16:22:02 -0800944out:
945 up->len = 0;
946 up->pending = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -0800947 return err;
948}
Hannes Frederic Sowa8822b642013-07-01 20:21:30 +0200949EXPORT_SYMBOL(udp_push_pending_frames);
David S. Millerdb8dac22008-03-06 16:22:02 -0800950
Ying Xue1b784142015-03-02 15:37:48 +0800951int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
David S. Millerdb8dac22008-03-06 16:22:02 -0800952{
953 struct inet_sock *inet = inet_sk(sk);
954 struct udp_sock *up = udp_sk(sk);
David S. Millere4749952011-05-08 16:38:45 -0700955 struct flowi4 fl4_stack;
David S. Millerb6f21b22011-03-12 02:09:18 -0500956 struct flowi4 *fl4;
David S. Millerdb8dac22008-03-06 16:22:02 -0800957 int ulen = len;
958 struct ipcm_cookie ipc;
959 struct rtable *rt = NULL;
960 int free = 0;
961 int connected = 0;
962 __be32 daddr, faddr, saddr;
963 __be16 dport;
964 u8 tos;
965 int err, is_udplite = IS_UDPLITE(sk);
966 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
967 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
Herbert Xu903ab862011-03-01 02:36:48 +0000968 struct sk_buff *skb;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000969 struct ip_options_data opt_copy;
David S. Millerdb8dac22008-03-06 16:22:02 -0800970
971 if (len > 0xFFFF)
972 return -EMSGSIZE;
973
974 /*
975 * Check the flags.
976 */
977
Eric Dumazetc482c562009-07-17 00:26:32 +0000978 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
David S. Millerdb8dac22008-03-06 16:22:02 -0800979 return -EOPNOTSUPP;
980
981 ipc.opt = NULL;
Oliver Hartkopp2244d072010-08-17 08:59:14 +0000982 ipc.tx_flags = 0;
Francesco Fuscoaa661582013-09-24 15:43:09 +0200983 ipc.ttl = 0;
984 ipc.tos = -1;
David S. Millerdb8dac22008-03-06 16:22:02 -0800985
Herbert Xu903ab862011-03-01 02:36:48 +0000986 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
987
David S. Millerf5fca602011-05-08 17:24:10 -0700988 fl4 = &inet->cork.fl.u.ip4;
David S. Millerdb8dac22008-03-06 16:22:02 -0800989 if (up->pending) {
990 /*
991 * There are pending frames.
992 * The socket lock must be held while it's corked.
993 */
994 lock_sock(sk);
995 if (likely(up->pending)) {
996 if (unlikely(up->pending != AF_INET)) {
997 release_sock(sk);
998 return -EINVAL;
999 }
1000 goto do_append_data;
1001 }
1002 release_sock(sk);
1003 }
1004 ulen += sizeof(struct udphdr);
1005
1006 /*
1007 * Get and verify the address.
1008 */
1009 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +01001010 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
David S. Millerdb8dac22008-03-06 16:22:02 -08001011 if (msg->msg_namelen < sizeof(*usin))
1012 return -EINVAL;
1013 if (usin->sin_family != AF_INET) {
1014 if (usin->sin_family != AF_UNSPEC)
1015 return -EAFNOSUPPORT;
1016 }
1017
1018 daddr = usin->sin_addr.s_addr;
1019 dport = usin->sin_port;
1020 if (dport == 0)
1021 return -EINVAL;
1022 } else {
1023 if (sk->sk_state != TCP_ESTABLISHED)
1024 return -EDESTADDRREQ;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001025 daddr = inet->inet_daddr;
1026 dport = inet->inet_dport;
David S. Millerdb8dac22008-03-06 16:22:02 -08001027 /* Open fast path for connected socket.
1028 Route will not be used, if at least one option is set.
1029 */
1030 connected = 1;
1031 }
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001032 ipc.addr = inet->inet_saddr;
David S. Millerdb8dac22008-03-06 16:22:02 -08001033
1034 ipc.oif = sk->sk_bound_dev_if;
Daniel Borkmannbf84a012013-04-14 08:08:13 +00001035
1036 sock_tx_timestamp(sk, &ipc.tx_flags);
1037
David S. Millerdb8dac22008-03-06 16:22:02 -08001038 if (msg->msg_controllen) {
Hannes Frederic Sowac8e6ad02014-02-18 21:38:08 +01001039 err = ip_cmsg_send(sock_net(sk), msg, &ipc,
1040 sk->sk_family == AF_INET6);
David S. Millerdb8dac22008-03-06 16:22:02 -08001041 if (err)
1042 return err;
1043 if (ipc.opt)
1044 free = 1;
1045 connected = 0;
1046 }
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001047 if (!ipc.opt) {
1048 struct ip_options_rcu *inet_opt;
1049
1050 rcu_read_lock();
1051 inet_opt = rcu_dereference(inet->inet_opt);
1052 if (inet_opt) {
1053 memcpy(&opt_copy, inet_opt,
1054 sizeof(*inet_opt) + inet_opt->opt.optlen);
1055 ipc.opt = &opt_copy.opt;
1056 }
1057 rcu_read_unlock();
1058 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001059
1060 saddr = ipc.addr;
1061 ipc.addr = faddr = daddr;
1062
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001063 if (ipc.opt && ipc.opt->opt.srr) {
David S. Millerdb8dac22008-03-06 16:22:02 -08001064 if (!daddr)
1065 return -EINVAL;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001066 faddr = ipc.opt->opt.faddr;
David S. Millerdb8dac22008-03-06 16:22:02 -08001067 connected = 0;
1068 }
Francesco Fuscoaa661582013-09-24 15:43:09 +02001069 tos = get_rttos(&ipc, inet);
David S. Millerdb8dac22008-03-06 16:22:02 -08001070 if (sock_flag(sk, SOCK_LOCALROUTE) ||
1071 (msg->msg_flags & MSG_DONTROUTE) ||
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001072 (ipc.opt && ipc.opt->opt.is_strictroute)) {
David S. Millerdb8dac22008-03-06 16:22:02 -08001073 tos |= RTO_ONLINK;
1074 connected = 0;
1075 }
1076
1077 if (ipv4_is_multicast(daddr)) {
1078 if (!ipc.oif)
1079 ipc.oif = inet->mc_index;
1080 if (!saddr)
1081 saddr = inet->mc_addr;
1082 connected = 0;
Erich E. Hoover76e21052012-02-08 09:11:07 +00001083 } else if (!ipc.oif)
1084 ipc.oif = inet->uc_index;
David S. Millerdb8dac22008-03-06 16:22:02 -08001085
1086 if (connected)
Eric Dumazetc482c562009-07-17 00:26:32 +00001087 rt = (struct rtable *)sk_dst_check(sk, 0);
David S. Millerdb8dac22008-03-06 16:22:02 -08001088
Ian Morris51456b22015-04-03 09:17:26 +01001089 if (!rt) {
Pavel Emelyanov84a3aa02008-07-16 20:19:08 -07001090 struct net *net = sock_net(sk);
David Ahern9a24abf2015-08-13 14:59:03 -06001091 __u8 flow_flags = inet_sk_flowi_flags(sk);
Pavel Emelyanov84a3aa02008-07-16 20:19:08 -07001092
David S. Millere4749952011-05-08 16:38:45 -07001093 fl4 = &fl4_stack;
David Ahern9a24abf2015-08-13 14:59:03 -06001094
David S. Millere4749952011-05-08 16:38:45 -07001095 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
David S. Millerc0951cb2011-03-31 04:54:27 -07001096 RT_SCOPE_UNIVERSE, sk->sk_protocol,
David Ahern9a24abf2015-08-13 14:59:03 -06001097 flow_flags,
David S. Millerc0951cb2011-03-31 04:54:27 -07001098 faddr, saddr, dport, inet->inet_sport);
1099
David Ahernb5bdacf2016-01-04 09:09:27 -08001100 if (!saddr && ipc.oif) {
1101 err = l3mdev_get_saddr(net, ipc.oif, fl4);
1102 if (err < 0)
1103 goto out;
1104 }
David Ahern8cbb512c2015-10-05 08:51:26 -07001105
David S. Millere4749952011-05-08 16:38:45 -07001106 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
1107 rt = ip_route_output_flow(net, fl4, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -08001108 if (IS_ERR(rt)) {
1109 err = PTR_ERR(rt);
David S. Miller06dc94b2011-03-03 10:38:01 -08001110 rt = NULL;
David S. Millerdb8dac22008-03-06 16:22:02 -08001111 if (err == -ENETUNREACH)
Eric Dumazetf1d8cba2013-11-28 09:51:22 -08001112 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
David S. Millerdb8dac22008-03-06 16:22:02 -08001113 goto out;
1114 }
1115
1116 err = -EACCES;
1117 if ((rt->rt_flags & RTCF_BROADCAST) &&
1118 !sock_flag(sk, SOCK_BROADCAST))
1119 goto out;
1120 if (connected)
Changli Gaod8d1f302010-06-10 23:31:35 -07001121 sk_dst_set(sk, dst_clone(&rt->dst));
David S. Millerdb8dac22008-03-06 16:22:02 -08001122 }
1123
1124 if (msg->msg_flags&MSG_CONFIRM)
1125 goto do_confirm;
1126back_from_confirm:
1127
David S. Millere4749952011-05-08 16:38:45 -07001128 saddr = fl4->saddr;
David S. Millerdb8dac22008-03-06 16:22:02 -08001129 if (!ipc.addr)
David S. Millere4749952011-05-08 16:38:45 -07001130 daddr = ipc.addr = fl4->daddr;
David S. Millerdb8dac22008-03-06 16:22:02 -08001131
Herbert Xu903ab862011-03-01 02:36:48 +00001132 /* Lockless fast path for the non-corking case. */
1133 if (!corkreq) {
Al Virof69e6d12014-11-24 13:23:40 -05001134 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
Herbert Xu903ab862011-03-01 02:36:48 +00001135 sizeof(struct udphdr), &ipc, &rt,
1136 msg->msg_flags);
1137 err = PTR_ERR(skb);
YOSHIFUJI Hideaki / 吉藤英明50c3a482013-01-22 06:32:49 +00001138 if (!IS_ERR_OR_NULL(skb))
David S. Miller79ab0532011-05-09 13:31:04 -07001139 err = udp_send_skb(skb, fl4);
Herbert Xu903ab862011-03-01 02:36:48 +00001140 goto out;
1141 }
1142
David S. Millerdb8dac22008-03-06 16:22:02 -08001143 lock_sock(sk);
1144 if (unlikely(up->pending)) {
1145 /* The socket is already corked while preparing it. */
1146 /* ... which is an evident application bug. --ANK */
1147 release_sock(sk);
1148
Joe Perchesba7a46f2014-11-11 10:59:17 -08001149 net_dbg_ratelimited("cork app bug 2\n");
David S. Millerdb8dac22008-03-06 16:22:02 -08001150 err = -EINVAL;
1151 goto out;
1152 }
1153 /*
1154 * Now cork the socket to pend data.
1155 */
David S. Millerb6f21b22011-03-12 02:09:18 -05001156 fl4 = &inet->cork.fl.u.ip4;
1157 fl4->daddr = daddr;
1158 fl4->saddr = saddr;
David S. Miller9cce96d2011-03-12 03:00:33 -05001159 fl4->fl4_dport = dport;
1160 fl4->fl4_sport = inet->inet_sport;
David S. Millerdb8dac22008-03-06 16:22:02 -08001161 up->pending = AF_INET;
1162
1163do_append_data:
1164 up->len += ulen;
Al Virof69e6d12014-11-24 13:23:40 -05001165 err = ip_append_data(sk, fl4, getfrag, msg, ulen,
David S. Millerf5fca602011-05-08 17:24:10 -07001166 sizeof(struct udphdr), &ipc, &rt,
1167 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
David S. Millerdb8dac22008-03-06 16:22:02 -08001168 if (err)
1169 udp_flush_pending_frames(sk);
1170 else if (!corkreq)
1171 err = udp_push_pending_frames(sk);
1172 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1173 up->pending = 0;
1174 release_sock(sk);
1175
1176out:
1177 ip_rt_put(rt);
1178 if (free)
1179 kfree(ipc.opt);
1180 if (!err)
1181 return len;
1182 /*
1183 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1184 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1185 * we don't have a good statistic (IpOutDiscards but it can be too many
1186 * things). We could add another new stat but at least for now that
1187 * seems like overkill.
1188 */
1189 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
Pavel Emelyanov629ca232008-07-05 21:18:07 -07001190 UDP_INC_STATS_USER(sock_net(sk),
1191 UDP_MIB_SNDBUFERRORS, is_udplite);
David S. Millerdb8dac22008-03-06 16:22:02 -08001192 }
1193 return err;
1194
1195do_confirm:
Changli Gaod8d1f302010-06-10 23:31:35 -07001196 dst_confirm(&rt->dst);
David S. Millerdb8dac22008-03-06 16:22:02 -08001197 if (!(msg->msg_flags&MSG_PROBE) || len)
1198 goto back_from_confirm;
1199 err = 0;
1200 goto out;
1201}
Eric Dumazetc482c562009-07-17 00:26:32 +00001202EXPORT_SYMBOL(udp_sendmsg);
David S. Millerdb8dac22008-03-06 16:22:02 -08001203
1204int udp_sendpage(struct sock *sk, struct page *page, int offset,
1205 size_t size, int flags)
1206{
David S. Millerf5fca602011-05-08 17:24:10 -07001207 struct inet_sock *inet = inet_sk(sk);
David S. Millerdb8dac22008-03-06 16:22:02 -08001208 struct udp_sock *up = udp_sk(sk);
1209 int ret;
1210
Shawn Landdend3f7d562013-11-24 22:36:28 -08001211 if (flags & MSG_SENDPAGE_NOTLAST)
1212 flags |= MSG_MORE;
1213
David S. Millerdb8dac22008-03-06 16:22:02 -08001214 if (!up->pending) {
1215 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
1216
1217 /* Call udp_sendmsg to specify destination address which
1218 * sendpage interface can't pass.
1219 * This will succeed only when the socket is connected.
1220 */
Ying Xue1b784142015-03-02 15:37:48 +08001221 ret = udp_sendmsg(sk, &msg, 0);
David S. Millerdb8dac22008-03-06 16:22:02 -08001222 if (ret < 0)
1223 return ret;
1224 }
1225
1226 lock_sock(sk);
1227
1228 if (unlikely(!up->pending)) {
1229 release_sock(sk);
1230
Joe Perchesba7a46f2014-11-11 10:59:17 -08001231 net_dbg_ratelimited("udp cork app bug 3\n");
David S. Millerdb8dac22008-03-06 16:22:02 -08001232 return -EINVAL;
1233 }
1234
David S. Millerf5fca602011-05-08 17:24:10 -07001235 ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
1236 page, offset, size, flags);
David S. Millerdb8dac22008-03-06 16:22:02 -08001237 if (ret == -EOPNOTSUPP) {
1238 release_sock(sk);
1239 return sock_no_sendpage(sk->sk_socket, page, offset,
1240 size, flags);
1241 }
1242 if (ret < 0) {
1243 udp_flush_pending_frames(sk);
1244 goto out;
1245 }
1246
1247 up->len += size;
1248 if (!(up->corkflag || (flags&MSG_MORE)))
1249 ret = udp_push_pending_frames(sk);
1250 if (!ret)
1251 ret = size;
1252out:
1253 release_sock(sk);
1254 return ret;
1255}
1256
Eric Dumazet85584672009-10-09 04:43:40 +00001257/**
1258 * first_packet_length - return length of first packet in receive queue
1259 * @sk: socket
1260 *
1261 * Drops all bad checksum frames, until a valid one is found.
1262 * Returns the length of found skb, or 0 if none is found.
1263 */
1264static unsigned int first_packet_length(struct sock *sk)
1265{
1266 struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
1267 struct sk_buff *skb;
1268 unsigned int res;
1269
1270 __skb_queue_head_init(&list_kill);
1271
1272 spin_lock_bh(&rcvq->lock);
1273 while ((skb = skb_peek(rcvq)) != NULL &&
1274 udp_lib_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001275 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS,
1276 IS_UDPLITE(sk));
Eric Dumazet85584672009-10-09 04:43:40 +00001277 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1278 IS_UDPLITE(sk));
Eric Dumazet8edf19c2009-10-15 00:12:40 +00001279 atomic_inc(&sk->sk_drops);
Eric Dumazet85584672009-10-09 04:43:40 +00001280 __skb_unlink(skb, rcvq);
1281 __skb_queue_tail(&list_kill, skb);
1282 }
1283 res = skb ? skb->len : 0;
1284 spin_unlock_bh(&rcvq->lock);
1285
1286 if (!skb_queue_empty(&list_kill)) {
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001287 bool slow = lock_sock_fast(sk);
1288
Eric Dumazet85584672009-10-09 04:43:40 +00001289 __skb_queue_purge(&list_kill);
1290 sk_mem_reclaim_partial(sk);
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001291 unlock_sock_fast(sk, slow);
Eric Dumazet85584672009-10-09 04:43:40 +00001292 }
1293 return res;
1294}
1295
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296/*
1297 * IOCTL requests applicable to the UDP protocol
1298 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001299
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
1301{
Stephen Hemminger6516c652007-03-08 20:41:55 -08001302 switch (cmd) {
1303 case SIOCOUTQ:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 {
Eric Dumazet31e6d362009-06-17 19:05:41 -07001305 int amount = sk_wmem_alloc_get(sk);
1306
Stephen Hemminger6516c652007-03-08 20:41:55 -08001307 return put_user(amount, (int __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 }
Stephen Hemminger6516c652007-03-08 20:41:55 -08001309
1310 case SIOCINQ:
1311 {
Eric Dumazet85584672009-10-09 04:43:40 +00001312 unsigned int amount = first_packet_length(sk);
Stephen Hemminger6516c652007-03-08 20:41:55 -08001313
Eric Dumazet85584672009-10-09 04:43:40 +00001314 if (amount)
Stephen Hemminger6516c652007-03-08 20:41:55 -08001315 /*
1316 * We will only return the amount
1317 * of this packet since that is all
1318 * that will be read.
1319 */
Eric Dumazet85584672009-10-09 04:43:40 +00001320 amount -= sizeof(struct udphdr);
1321
Stephen Hemminger6516c652007-03-08 20:41:55 -08001322 return put_user(amount, (int __user *)arg);
1323 }
1324
1325 default:
1326 return -ENOIOCTLCMD;
1327 }
1328
1329 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330}
Eric Dumazetc482c562009-07-17 00:26:32 +00001331EXPORT_SYMBOL(udp_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332
David S. Millerdb8dac22008-03-06 16:22:02 -08001333/*
1334 * This should be easy, if there is something there we
1335 * return it, otherwise we block.
1336 */
1337
Ying Xue1b784142015-03-02 15:37:48 +08001338int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
1339 int flags, int *addr_len)
David S. Millerdb8dac22008-03-06 16:22:02 -08001340{
1341 struct inet_sock *inet = inet_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01001342 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
David S. Millerdb8dac22008-03-06 16:22:02 -08001343 struct sk_buff *skb;
David S. Miller59c2cda2011-12-01 14:12:55 -05001344 unsigned int ulen, copied;
Pavel Emelyanov3f518bf2012-02-21 07:30:58 +00001345 int peeked, off = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -08001346 int err;
1347 int is_udplite = IS_UDPLITE(sk);
Eric Dumazet197c9492015-12-30 08:51:12 -05001348 bool checksum_valid = false;
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001349 bool slow;
David S. Millerdb8dac22008-03-06 16:22:02 -08001350
David S. Millerdb8dac22008-03-06 16:22:02 -08001351 if (flags & MSG_ERRQUEUE)
Hannes Frederic Sowa85fbaa72013-11-23 00:46:12 +01001352 return ip_recv_error(sk, msg, len, addr_len);
David S. Millerdb8dac22008-03-06 16:22:02 -08001353
1354try_again:
1355 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
Pavel Emelyanov3f518bf2012-02-21 07:30:58 +00001356 &peeked, &off, &err);
David S. Millerdb8dac22008-03-06 16:22:02 -08001357 if (!skb)
1358 goto out;
1359
1360 ulen = skb->len - sizeof(struct udphdr);
David S. Miller59c2cda2011-12-01 14:12:55 -05001361 copied = len;
1362 if (copied > ulen)
1363 copied = ulen;
1364 else if (copied < ulen)
David S. Millerdb8dac22008-03-06 16:22:02 -08001365 msg->msg_flags |= MSG_TRUNC;
1366
1367 /*
1368 * If checksum is needed at all, try to do it while copying the
1369 * data. If the data is truncated, or if we only want a partial
1370 * coverage checksum (UDP-Lite), do it before the copy.
1371 */
1372
David S. Miller59c2cda2011-12-01 14:12:55 -05001373 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
Eric Dumazet197c9492015-12-30 08:51:12 -05001374 checksum_valid = !udp_lib_checksum_complete(skb);
1375 if (!checksum_valid)
David S. Millerdb8dac22008-03-06 16:22:02 -08001376 goto csum_copy_err;
1377 }
1378
Eric Dumazet197c9492015-12-30 08:51:12 -05001379 if (checksum_valid || skb_csum_unnecessary(skb))
David S. Miller51f3d022014-11-05 16:46:40 -05001380 err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
1381 msg, copied);
David S. Millerdb8dac22008-03-06 16:22:02 -08001382 else {
Al Viro227158d2014-04-06 18:47:38 -04001383 err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr),
1384 msg);
David S. Millerdb8dac22008-03-06 16:22:02 -08001385
1386 if (err == -EINVAL)
1387 goto csum_copy_err;
1388 }
1389
Eric Dumazet22911fc2012-06-27 00:23:44 +00001390 if (unlikely(err)) {
1391 trace_kfree_skb(skb, udp_recvmsg);
Eric Dumazet979402b2012-09-05 23:34:44 +00001392 if (!peeked) {
1393 atomic_inc(&sk->sk_drops);
1394 UDP_INC_STATS_USER(sock_net(sk),
1395 UDP_MIB_INERRORS, is_udplite);
1396 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001397 goto out_free;
Eric Dumazet22911fc2012-06-27 00:23:44 +00001398 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001399
1400 if (!peeked)
Pavel Emelyanov629ca232008-07-05 21:18:07 -07001401 UDP_INC_STATS_USER(sock_net(sk),
1402 UDP_MIB_INDATAGRAMS, is_udplite);
David S. Millerdb8dac22008-03-06 16:22:02 -08001403
Neil Horman3b885782009-10-12 13:26:31 -07001404 sock_recv_ts_and_drops(msg, sk, skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001405
1406 /* Copy the address. */
Eric Dumazetc482c562009-07-17 00:26:32 +00001407 if (sin) {
David S. Millerdb8dac22008-03-06 16:22:02 -08001408 sin->sin_family = AF_INET;
1409 sin->sin_port = udp_hdr(skb)->source;
1410 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
1411 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
Hannes Frederic Sowabceaa902013-11-18 04:20:45 +01001412 *addr_len = sizeof(*sin);
David S. Millerdb8dac22008-03-06 16:22:02 -08001413 }
1414 if (inet->cmsg_flags)
Tom Herbertad6f9392015-01-05 13:56:17 -08001415 ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
David S. Millerdb8dac22008-03-06 16:22:02 -08001416
David S. Miller59c2cda2011-12-01 14:12:55 -05001417 err = copied;
David S. Millerdb8dac22008-03-06 16:22:02 -08001418 if (flags & MSG_TRUNC)
1419 err = ulen;
1420
1421out_free:
Eric Dumazet9d410c72009-10-30 05:03:53 +00001422 skb_free_datagram_locked(sk, skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001423out:
1424 return err;
1425
1426csum_copy_err:
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001427 slow = lock_sock_fast(sk);
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001428 if (!skb_kill_datagram(sk, skb, flags)) {
1429 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
Pavel Emelyanov629ca232008-07-05 21:18:07 -07001430 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001431 }
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001432 unlock_sock_fast(sk, slow);
David S. Millerdb8dac22008-03-06 16:22:02 -08001433
Eric Dumazetbeb39db2015-05-30 09:16:53 -07001434 /* starting over for a new packet, but check if we need to yield */
1435 cond_resched();
Xufeng Zhang9cfaa8d2011-06-21 10:43:40 +00001436 msg->msg_flags &= ~MSG_TRUNC;
David S. Millerdb8dac22008-03-06 16:22:02 -08001437 goto try_again;
1438}
1439
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440int udp_disconnect(struct sock *sk, int flags)
1441{
1442 struct inet_sock *inet = inet_sk(sk);
1443 /*
1444 * 1003.1g - break association.
1445 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001446
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 sk->sk_state = TCP_CLOSE;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001448 inet->inet_daddr = 0;
1449 inet->inet_dport = 0;
Tom Herbertbdeab992011-08-14 19:45:55 +00001450 sock_rps_reset_rxhash(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 sk->sk_bound_dev_if = 0;
1452 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1453 inet_reset_saddr(sk);
1454
1455 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
1456 sk->sk_prot->unhash(sk);
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001457 inet->inet_sport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 }
1459 sk_dst_reset(sk);
1460 return 0;
1461}
Eric Dumazetc482c562009-07-17 00:26:32 +00001462EXPORT_SYMBOL(udp_disconnect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
Eric Dumazet645ca702008-10-29 01:41:45 -07001464void udp_lib_unhash(struct sock *sk)
1465{
Eric Dumazet723b4612008-11-25 13:55:15 -08001466 if (sk_hashed(sk)) {
1467 struct udp_table *udptable = sk->sk_prot->h.udp_table;
Eric Dumazet512615b2009-11-08 10:17:58 +00001468 struct udp_hslot *hslot, *hslot2;
1469
1470 hslot = udp_hashslot(udptable, sock_net(sk),
1471 udp_sk(sk)->udp_port_hash);
1472 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
Eric Dumazet645ca702008-10-29 01:41:45 -07001473
Eric Dumazet723b4612008-11-25 13:55:15 -08001474 spin_lock_bh(&hslot->lock);
Craig Galleke32ea7e2016-01-04 17:41:46 -05001475 if (rcu_access_pointer(sk->sk_reuseport_cb))
1476 reuseport_detach_sock(sk);
Eric Dumazet723b4612008-11-25 13:55:15 -08001477 if (sk_nulls_del_node_init_rcu(sk)) {
Eric Dumazetfdcc8aa92009-11-08 10:17:05 +00001478 hslot->count--;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001479 inet_sk(sk)->inet_num = 0;
Eric Dumazet723b4612008-11-25 13:55:15 -08001480 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
Eric Dumazet512615b2009-11-08 10:17:58 +00001481
1482 spin_lock(&hslot2->lock);
1483 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1484 hslot2->count--;
1485 spin_unlock(&hslot2->lock);
Eric Dumazet723b4612008-11-25 13:55:15 -08001486 }
1487 spin_unlock_bh(&hslot->lock);
Eric Dumazet645ca702008-10-29 01:41:45 -07001488 }
Eric Dumazet645ca702008-10-29 01:41:45 -07001489}
1490EXPORT_SYMBOL(udp_lib_unhash);
1491
Eric Dumazet719f8352010-09-08 05:08:44 +00001492/*
1493 * inet_rcv_saddr was changed, we must rehash secondary hash
1494 */
1495void udp_lib_rehash(struct sock *sk, u16 newhash)
1496{
1497 if (sk_hashed(sk)) {
1498 struct udp_table *udptable = sk->sk_prot->h.udp_table;
1499 struct udp_hslot *hslot, *hslot2, *nhslot2;
1500
1501 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1502 nhslot2 = udp_hashslot2(udptable, newhash);
1503 udp_sk(sk)->udp_portaddr_hash = newhash;
Craig Galleke32ea7e2016-01-04 17:41:46 -05001504
1505 if (hslot2 != nhslot2 ||
1506 rcu_access_pointer(sk->sk_reuseport_cb)) {
Eric Dumazet719f8352010-09-08 05:08:44 +00001507 hslot = udp_hashslot(udptable, sock_net(sk),
1508 udp_sk(sk)->udp_port_hash);
1509 /* we must lock primary chain too */
1510 spin_lock_bh(&hslot->lock);
Craig Galleke32ea7e2016-01-04 17:41:46 -05001511 if (rcu_access_pointer(sk->sk_reuseport_cb))
1512 reuseport_detach_sock(sk);
Eric Dumazet719f8352010-09-08 05:08:44 +00001513
Craig Galleke32ea7e2016-01-04 17:41:46 -05001514 if (hslot2 != nhslot2) {
1515 spin_lock(&hslot2->lock);
1516 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1517 hslot2->count--;
1518 spin_unlock(&hslot2->lock);
Eric Dumazet719f8352010-09-08 05:08:44 +00001519
Craig Galleke32ea7e2016-01-04 17:41:46 -05001520 spin_lock(&nhslot2->lock);
1521 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
1522 &nhslot2->head);
1523 nhslot2->count++;
1524 spin_unlock(&nhslot2->lock);
1525 }
Eric Dumazet719f8352010-09-08 05:08:44 +00001526
1527 spin_unlock_bh(&hslot->lock);
1528 }
1529 }
1530}
1531EXPORT_SYMBOL(udp_lib_rehash);
1532
1533static void udp_v4_rehash(struct sock *sk)
1534{
1535 u16 new_hash = udp4_portaddr_hash(sock_net(sk),
1536 inet_sk(sk)->inet_rcv_saddr,
1537 inet_sk(sk)->inet_num);
1538 udp_lib_rehash(sk, new_hash);
1539}
1540
Herbert Xu93821772008-09-15 11:48:46 -07001541static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1542{
Tom Herbertfec5e652010-04-16 16:01:27 -07001543 int rc;
Herbert Xu93821772008-09-15 11:48:46 -07001544
Shawn Bohrer005ec972013-10-07 11:01:38 -05001545 if (inet_sk(sk)->inet_daddr) {
Tom Herbertbdeab992011-08-14 19:45:55 +00001546 sock_rps_save_rxhash(sk, skb);
Shawn Bohrer005ec972013-10-07 11:01:38 -05001547 sk_mark_napi_id(sk, skb);
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001548 sk_incoming_cpu_update(sk);
Shawn Bohrer005ec972013-10-07 11:01:38 -05001549 }
Tom Herbertfec5e652010-04-16 16:01:27 -07001550
Eric Dumazetd826eb12011-11-09 07:24:35 +00001551 rc = sock_queue_rcv_skb(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -07001552 if (rc < 0) {
1553 int is_udplite = IS_UDPLITE(sk);
1554
Herbert Xu93821772008-09-15 11:48:46 -07001555 /* Note that an ENOMEM error is charged twice */
Eric Dumazet766e90372009-10-14 20:40:11 -07001556 if (rc == -ENOMEM)
Herbert Xu93821772008-09-15 11:48:46 -07001557 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1558 is_udplite);
Eric Dumazet766e90372009-10-14 20:40:11 -07001559 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1560 kfree_skb(skb);
Satoru Moriya296f7ea2011-06-17 11:58:39 +00001561 trace_udp_fail_queue_rcv_skb(rc, sk);
Eric Dumazet766e90372009-10-14 20:40:11 -07001562 return -1;
Herbert Xu93821772008-09-15 11:48:46 -07001563 }
1564
1565 return 0;
1566
Herbert Xu93821772008-09-15 11:48:46 -07001567}
1568
Eric Dumazet447167b2012-04-11 23:05:28 +00001569static struct static_key udp_encap_needed __read_mostly;
1570void udp_encap_enable(void)
1571{
1572 if (!static_key_enabled(&udp_encap_needed))
1573 static_key_slow_inc(&udp_encap_needed);
1574}
1575EXPORT_SYMBOL(udp_encap_enable);
1576
David S. Millerdb8dac22008-03-06 16:22:02 -08001577/* returns:
1578 * -1: error
1579 * 0: success
1580 * >0: "udp encap" protocol resubmission
1581 *
1582 * Note that in the success and error cases, the skb is assumed to
1583 * have either been requeued or freed.
1584 */
Eric Dumazetc482c562009-07-17 00:26:32 +00001585int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
David S. Millerdb8dac22008-03-06 16:22:02 -08001586{
1587 struct udp_sock *up = udp_sk(sk);
1588 int rc;
1589 int is_udplite = IS_UDPLITE(sk);
1590
1591 /*
1592 * Charge it to the socket, dropping if the queue is full.
1593 */
1594 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1595 goto drop;
1596 nf_reset(skb);
1597
Eric Dumazet447167b2012-04-11 23:05:28 +00001598 if (static_key_false(&udp_encap_needed) && up->encap_type) {
Eric Dumazet0ad92ad2011-11-01 12:56:59 +00001599 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
1600
David S. Millerdb8dac22008-03-06 16:22:02 -08001601 /*
1602 * This is an encapsulation socket so pass the skb to
1603 * the socket's udp_encap_rcv() hook. Otherwise, just
1604 * fall through and pass this up the UDP socket.
1605 * up->encap_rcv() returns the following value:
1606 * =0 if skb was successfully passed to the encap
1607 * handler or was discarded by it.
1608 * >0 if skb should be passed on to UDP.
1609 * <0 if skb should be resubmitted as proto -N
1610 */
1611
1612 /* if we're overly short, let UDP handle it */
Eric Dumazet0ad92ad2011-11-01 12:56:59 +00001613 encap_rcv = ACCESS_ONCE(up->encap_rcv);
Ian Morris00db4122015-04-03 09:17:27 +01001614 if (skb->len > sizeof(struct udphdr) && encap_rcv) {
David S. Millerdb8dac22008-03-06 16:22:02 -08001615 int ret;
1616
Tom Herbert0a809662014-05-07 16:52:39 -07001617 /* Verify checksum before giving to encap */
1618 if (udp_lib_checksum_complete(skb))
1619 goto csum_error;
1620
Eric Dumazet0ad92ad2011-11-01 12:56:59 +00001621 ret = encap_rcv(sk, skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001622 if (ret <= 0) {
Pavel Emelyanov02833282008-07-05 21:18:48 -07001623 UDP_INC_STATS_BH(sock_net(sk),
1624 UDP_MIB_INDATAGRAMS,
David S. Millerdb8dac22008-03-06 16:22:02 -08001625 is_udplite);
1626 return -ret;
1627 }
1628 }
1629
1630 /* FALLTHROUGH -- it's a UDP Packet */
1631 }
1632
1633 /*
1634 * UDP-Lite specific tests, ignored on UDP sockets
1635 */
1636 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
1637
1638 /*
1639 * MIB statistics other than incrementing the error count are
1640 * disabled for the following two types of errors: these depend
1641 * on the application settings, not on the functioning of the
1642 * protocol stack as such.
1643 *
1644 * RFC 3828 here recommends (sec 3.3): "There should also be a
1645 * way ... to ... at least let the receiving application block
1646 * delivery of packets with coverage values less than a value
1647 * provided by the application."
1648 */
1649 if (up->pcrlen == 0) { /* full coverage was set */
Joe Perchesba7a46f2014-11-11 10:59:17 -08001650 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
1651 UDP_SKB_CB(skb)->cscov, skb->len);
David S. Millerdb8dac22008-03-06 16:22:02 -08001652 goto drop;
1653 }
1654 /* The next case involves violating the min. coverage requested
1655 * by the receiver. This is subtle: if receiver wants x and x is
1656 * greater than the buffersize/MTU then receiver will complain
1657 * that it wants x while sender emits packets of smaller size y.
1658 * Therefore the above ...()->partial_cov statement is essential.
1659 */
1660 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
Joe Perchesba7a46f2014-11-11 10:59:17 -08001661 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
1662 UDP_SKB_CB(skb)->cscov, up->pcrlen);
David S. Millerdb8dac22008-03-06 16:22:02 -08001663 goto drop;
1664 }
1665 }
1666
Eric Dumazet33d480c2011-08-11 19:30:52 +00001667 if (rcu_access_pointer(sk->sk_filter) &&
1668 udp_lib_checksum_complete(skb))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001669 goto csum_error;
David S. Millerdb8dac22008-03-06 16:22:02 -08001670
Sorin Dumitru274f4822014-07-22 21:16:51 +03001671 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
James M Leddy3e215c82014-06-25 17:38:13 -04001672 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1673 is_udplite);
Eric Dumazetc3774112010-04-27 15:13:20 -07001674 goto drop;
James M Leddy3e215c82014-06-25 17:38:13 -04001675 }
Eric Dumazetc3774112010-04-27 15:13:20 -07001676
Herbert Xu93821772008-09-15 11:48:46 -07001677 rc = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -08001678
Shawn Bohrerfbf88662013-10-07 11:01:40 -05001679 ipv4_pktinfo_prepare(sk, skb);
Herbert Xu93821772008-09-15 11:48:46 -07001680 bh_lock_sock(sk);
1681 if (!sock_owned_by_user(sk))
1682 rc = __udp_queue_rcv_skb(sk, skb);
Eric Dumazetf545a382012-04-22 23:34:26 +00001683 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi55349792010-03-04 18:01:42 +00001684 bh_unlock_sock(sk);
1685 goto drop;
1686 }
Herbert Xu93821772008-09-15 11:48:46 -07001687 bh_unlock_sock(sk);
1688
1689 return rc;
David S. Millerdb8dac22008-03-06 16:22:02 -08001690
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001691csum_error:
1692 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
David S. Millerdb8dac22008-03-06 16:22:02 -08001693drop:
Pavel Emelyanov02833282008-07-05 21:18:48 -07001694 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
Eric Dumazet8edf19c2009-10-15 00:12:40 +00001695 atomic_inc(&sk->sk_drops);
David S. Millerdb8dac22008-03-06 16:22:02 -08001696 kfree_skb(skb);
1697 return -1;
1698}
1699
Eric Dumazet1240d132009-11-08 10:18:44 +00001700static void flush_stack(struct sock **stack, unsigned int count,
1701 struct sk_buff *skb, unsigned int final)
1702{
1703 unsigned int i;
1704 struct sk_buff *skb1 = NULL;
Eric Dumazetf6b8f322009-11-08 10:20:19 +00001705 struct sock *sk;
Eric Dumazet1240d132009-11-08 10:18:44 +00001706
1707 for (i = 0; i < count; i++) {
Eric Dumazetf6b8f322009-11-08 10:20:19 +00001708 sk = stack[i];
Ian Morris51456b22015-04-03 09:17:26 +01001709 if (likely(!skb1))
Eric Dumazet1240d132009-11-08 10:18:44 +00001710 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
1711
Eric Dumazetf6b8f322009-11-08 10:20:19 +00001712 if (!skb1) {
1713 atomic_inc(&sk->sk_drops);
1714 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1715 IS_UDPLITE(sk));
1716 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1717 IS_UDPLITE(sk));
1718 }
1719
1720 if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
Eric Dumazet1240d132009-11-08 10:18:44 +00001721 skb1 = NULL;
David Held2dc41cf2014-07-15 23:28:32 -04001722
1723 sock_put(sk);
Eric Dumazet1240d132009-11-08 10:18:44 +00001724 }
1725 if (unlikely(skb1))
1726 kfree_skb(skb1);
1727}
1728
Eric Dumazet97502232013-12-11 14:46:51 -08001729/* For TCP sockets, sk_rx_dst is protected by socket lock
Eric Dumazete47eb5d2013-12-15 10:53:46 -08001730 * For UDP, we use xchg() to guard against concurrent changes.
Eric Dumazet97502232013-12-11 14:46:51 -08001731 */
1732static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
Shawn Bohrer421b3882013-10-07 11:01:39 -05001733{
Eric Dumazet97502232013-12-11 14:46:51 -08001734 struct dst_entry *old;
Shawn Bohrer421b3882013-10-07 11:01:39 -05001735
Eric Dumazete47eb5d2013-12-15 10:53:46 -08001736 dst_hold(dst);
1737 old = xchg(&sk->sk_rx_dst, dst);
1738 dst_release(old);
Shawn Bohrer421b3882013-10-07 11:01:39 -05001739}
1740
David S. Millerdb8dac22008-03-06 16:22:02 -08001741/*
1742 * Multicasts and broadcasts go to each listener.
1743 *
Eric Dumazet1240d132009-11-08 10:18:44 +00001744 * Note: called only from the BH handler context.
David S. Millerdb8dac22008-03-06 16:22:02 -08001745 */
Pavel Emelyanove3163492008-06-16 17:12:11 -07001746static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
David S. Millerdb8dac22008-03-06 16:22:02 -08001747 struct udphdr *uh,
1748 __be32 saddr, __be32 daddr,
Rick Jones36cbb242014-11-06 10:37:54 -08001749 struct udp_table *udptable,
1750 int proto)
David S. Millerdb8dac22008-03-06 16:22:02 -08001751{
Eric Dumazet1240d132009-11-08 10:18:44 +00001752 struct sock *sk, *stack[256 / sizeof(struct sock *)];
David Held5cf3d462014-07-15 23:28:31 -04001753 struct hlist_nulls_node *node;
1754 unsigned short hnum = ntohs(uh->dest);
1755 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
1756 int dif = skb->dev->ifindex;
David Held2dc41cf2014-07-15 23:28:32 -04001757 unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
1758 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
Rick Jones36cbb242014-11-06 10:37:54 -08001759 bool inner_flushed = false;
David Held2dc41cf2014-07-15 23:28:32 -04001760
1761 if (use_hash2) {
1762 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
1763 udp_table.mask;
1764 hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask;
1765start_lookup:
1766 hslot = &udp_table.hash2[hash2];
1767 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
1768 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001769
Eric Dumazet645ca702008-10-29 01:41:45 -07001770 spin_lock(&hslot->lock);
David Held2dc41cf2014-07-15 23:28:32 -04001771 sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
David Held5cf3d462014-07-15 23:28:31 -04001772 if (__udp_is_mcast_sock(net, sk,
1773 uh->dest, daddr,
1774 uh->source, saddr,
1775 dif, hnum)) {
1776 if (unlikely(count == ARRAY_SIZE(stack))) {
1777 flush_stack(stack, count, skb, ~0);
Rick Jones36cbb242014-11-06 10:37:54 -08001778 inner_flushed = true;
David Held5cf3d462014-07-15 23:28:31 -04001779 count = 0;
1780 }
1781 stack[count++] = sk;
David Held2dc41cf2014-07-15 23:28:32 -04001782 sock_hold(sk);
Eric Dumazet1240d132009-11-08 10:18:44 +00001783 }
1784 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001785
Eric Dumazet645ca702008-10-29 01:41:45 -07001786 spin_unlock(&hslot->lock);
Eric Dumazet1240d132009-11-08 10:18:44 +00001787
David Held2dc41cf2014-07-15 23:28:32 -04001788 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
1789 if (use_hash2 && hash2 != hash2_any) {
1790 hash2 = hash2_any;
1791 goto start_lookup;
1792 }
1793
Eric Dumazet1240d132009-11-08 10:18:44 +00001794 /*
1795 * do the slow work with no lock held
1796 */
1797 if (count) {
1798 flush_stack(stack, count, skb, count - 1);
Eric Dumazet1240d132009-11-08 10:18:44 +00001799 } else {
Rick Jones36cbb242014-11-06 10:37:54 -08001800 if (!inner_flushed)
1801 UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
1802 proto == IPPROTO_UDPLITE);
1803 consume_skb(skb);
Eric Dumazet1240d132009-11-08 10:18:44 +00001804 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001805 return 0;
1806}
1807
1808/* Initialize UDP checksum. If exited with zero value (success),
1809 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1810 * Otherwise, csum completion requires chacksumming packet body,
1811 * including udp header and folding it to skb->csum.
1812 */
1813static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1814 int proto)
1815{
David S. Millerdb8dac22008-03-06 16:22:02 -08001816 int err;
1817
1818 UDP_SKB_CB(skb)->partial_cov = 0;
1819 UDP_SKB_CB(skb)->cscov = skb->len;
1820
1821 if (proto == IPPROTO_UDPLITE) {
1822 err = udplite_checksum_init(skb, uh);
1823 if (err)
1824 return err;
1825 }
1826
Tom Herberted70fcf2014-05-02 16:29:38 -07001827 return skb_checksum_init_zero_check(skb, proto, uh->check,
1828 inet_compute_pseudo);
David S. Millerdb8dac22008-03-06 16:22:02 -08001829}
1830
1831/*
1832 * All we need to do is get the socket, and then do a checksum.
1833 */
1834
Eric Dumazet645ca702008-10-29 01:41:45 -07001835int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
David S. Millerdb8dac22008-03-06 16:22:02 -08001836 int proto)
1837{
1838 struct sock *sk;
Jesper Dangaard Brouer7b5e56f2009-02-05 15:05:45 -08001839 struct udphdr *uh;
David S. Millerdb8dac22008-03-06 16:22:02 -08001840 unsigned short ulen;
Eric Dumazetadf30902009-06-02 05:19:30 +00001841 struct rtable *rt = skb_rtable(skb);
Jesper Dangaard Brouer2783ef22009-02-06 01:59:12 -08001842 __be32 saddr, daddr;
Pavel Emelyanov02833282008-07-05 21:18:48 -07001843 struct net *net = dev_net(skb->dev);
David S. Millerdb8dac22008-03-06 16:22:02 -08001844
1845 /*
1846 * Validate the packet.
1847 */
1848 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1849 goto drop; /* No space for header. */
1850
Jesper Dangaard Brouer7b5e56f2009-02-05 15:05:45 -08001851 uh = udp_hdr(skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001852 ulen = ntohs(uh->len);
Bjørn Morkccc2d972010-05-06 03:44:34 +00001853 saddr = ip_hdr(skb)->saddr;
1854 daddr = ip_hdr(skb)->daddr;
1855
David S. Millerdb8dac22008-03-06 16:22:02 -08001856 if (ulen > skb->len)
1857 goto short_packet;
1858
1859 if (proto == IPPROTO_UDP) {
1860 /* UDP validates ulen. */
1861 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
1862 goto short_packet;
1863 uh = udp_hdr(skb);
1864 }
1865
1866 if (udp4_csum_init(skb, uh, proto))
1867 goto csum_error;
1868
Eric Dumazet8afdd992013-12-10 18:07:23 -08001869 sk = skb_steal_sock(skb);
1870 if (sk) {
Eric Dumazet97502232013-12-11 14:46:51 -08001871 struct dst_entry *dst = skb_dst(skb);
Shawn Bohrer421b3882013-10-07 11:01:39 -05001872 int ret;
David S. Millerdb8dac22008-03-06 16:22:02 -08001873
Eric Dumazet97502232013-12-11 14:46:51 -08001874 if (unlikely(sk->sk_rx_dst != dst))
1875 udp_sk_rx_dst_set(sk, dst);
Shawn Bohrer421b3882013-10-07 11:01:39 -05001876
1877 ret = udp_queue_rcv_skb(sk, skb);
Eric Dumazet8afdd992013-12-10 18:07:23 -08001878 sock_put(sk);
Shawn Bohrer421b3882013-10-07 11:01:39 -05001879 /* a return value > 0 means to resubmit the input, but
1880 * it wants the return to be -protocol, or 0
1881 */
1882 if (ret > 0)
1883 return -ret;
1884 return 0;
Shawn Bohrer421b3882013-10-07 11:01:39 -05001885 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001886
Fabian Frederickc18450a2014-11-04 20:48:41 +01001887 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1888 return __udp4_lib_mcast_deliver(net, skb, uh,
Rick Jones36cbb242014-11-06 10:37:54 -08001889 saddr, daddr, udptable, proto);
Fabian Frederickc18450a2014-11-04 20:48:41 +01001890
1891 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
Ian Morris00db4122015-04-03 09:17:27 +01001892 if (sk) {
Eliezer Tamira5b50472013-06-10 11:40:00 +03001893 int ret;
1894
Tom Herbert224d0192015-01-05 13:56:14 -08001895 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
Tom Herbert2abb7cd2014-08-31 15:12:43 -07001896 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
1897 inet_compute_pseudo);
1898
Eliezer Tamira5b50472013-06-10 11:40:00 +03001899 ret = udp_queue_rcv_skb(sk, skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001900 sock_put(sk);
1901
1902 /* a return value > 0 means to resubmit the input, but
1903 * it wants the return to be -protocol, or 0
1904 */
1905 if (ret > 0)
1906 return -ret;
1907 return 0;
1908 }
1909
1910 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1911 goto drop;
1912 nf_reset(skb);
1913
1914 /* No socket. Drop packet silently, if checksum is wrong */
1915 if (udp_lib_checksum_complete(skb))
1916 goto csum_error;
1917
Pavel Emelyanov02833282008-07-05 21:18:48 -07001918 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
David S. Millerdb8dac22008-03-06 16:22:02 -08001919 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1920
1921 /*
1922 * Hmm. We got an UDP packet to a port to which we
1923 * don't wanna listen. Ignore it.
1924 */
1925 kfree_skb(skb);
1926 return 0;
1927
1928short_packet:
Joe Perchesba7a46f2014-11-11 10:59:17 -08001929 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
1930 proto == IPPROTO_UDPLITE ? "Lite" : "",
1931 &saddr, ntohs(uh->source),
1932 ulen, skb->len,
1933 &daddr, ntohs(uh->dest));
David S. Millerdb8dac22008-03-06 16:22:02 -08001934 goto drop;
1935
1936csum_error:
1937 /*
1938 * RFC1122: OK. Discards the bad packet silently (as far as
1939 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1940 */
Joe Perchesba7a46f2014-11-11 10:59:17 -08001941 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
1942 proto == IPPROTO_UDPLITE ? "Lite" : "",
1943 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
1944 ulen);
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001945 UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
David S. Millerdb8dac22008-03-06 16:22:02 -08001946drop:
Pavel Emelyanov02833282008-07-05 21:18:48 -07001947 UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
David S. Millerdb8dac22008-03-06 16:22:02 -08001948 kfree_skb(skb);
1949 return 0;
1950}
1951
Shawn Bohrer421b3882013-10-07 11:01:39 -05001952/* We can only early demux multicast if there is a single matching socket.
1953 * If more than one socket found returns NULL
1954 */
1955static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
1956 __be16 loc_port, __be32 loc_addr,
1957 __be16 rmt_port, __be32 rmt_addr,
1958 int dif)
1959{
1960 struct sock *sk, *result;
1961 struct hlist_nulls_node *node;
1962 unsigned short hnum = ntohs(loc_port);
1963 unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
1964 struct udp_hslot *hslot = &udp_table.hash[slot];
1965
Eric Dumazet63c6f812014-06-12 16:13:06 -07001966 /* Do not bother scanning a too big list */
1967 if (hslot->count > 10)
1968 return NULL;
1969
Shawn Bohrer421b3882013-10-07 11:01:39 -05001970 rcu_read_lock();
1971begin:
1972 count = 0;
1973 result = NULL;
1974 sk_nulls_for_each_rcu(sk, node, &hslot->head) {
1975 if (__udp_is_mcast_sock(net, sk,
1976 loc_port, loc_addr,
1977 rmt_port, rmt_addr,
1978 dif, hnum)) {
1979 result = sk;
1980 ++count;
1981 }
1982 }
1983 /*
1984 * if the nulls value we got at the end of this lookup is
1985 * not the expected one, we must restart lookup.
1986 * We probably met an item that was moved to another chain.
1987 */
1988 if (get_nulls_value(node) != slot)
1989 goto begin;
1990
1991 if (result) {
1992 if (count != 1 ||
1993 unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
1994 result = NULL;
Eric Dumazetf69b9232013-10-08 21:47:29 -07001995 else if (unlikely(!__udp_is_mcast_sock(net, result,
Shawn Bohrer421b3882013-10-07 11:01:39 -05001996 loc_port, loc_addr,
1997 rmt_port, rmt_addr,
1998 dif, hnum))) {
1999 sock_put(result);
2000 result = NULL;
2001 }
2002 }
2003 rcu_read_unlock();
2004 return result;
2005}
2006
2007/* For unicast we should only early demux connected sockets or we can
2008 * break forwarding setups. The chains here can be long so only check
2009 * if the first socket is an exact match and if not move on.
2010 */
2011static struct sock *__udp4_lib_demux_lookup(struct net *net,
2012 __be16 loc_port, __be32 loc_addr,
2013 __be16 rmt_port, __be32 rmt_addr,
2014 int dif)
2015{
2016 struct sock *sk, *result;
2017 struct hlist_nulls_node *node;
2018 unsigned short hnum = ntohs(loc_port);
2019 unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
2020 unsigned int slot2 = hash2 & udp_table.mask;
2021 struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
Joe Perchesc7228312014-05-13 20:30:07 -07002022 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
Shawn Bohrer421b3882013-10-07 11:01:39 -05002023 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
2024
2025 rcu_read_lock();
2026 result = NULL;
2027 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
2028 if (INET_MATCH(sk, net, acookie,
2029 rmt_addr, loc_addr, ports, dif))
2030 result = sk;
2031 /* Only check first socket in chain */
2032 break;
2033 }
2034
2035 if (result) {
2036 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
2037 result = NULL;
2038 else if (unlikely(!INET_MATCH(sk, net, acookie,
2039 rmt_addr, loc_addr,
2040 ports, dif))) {
2041 sock_put(result);
2042 result = NULL;
2043 }
2044 }
2045 rcu_read_unlock();
2046 return result;
2047}
2048
2049void udp_v4_early_demux(struct sk_buff *skb)
2050{
Eric Dumazet610438b2013-12-11 08:10:05 -08002051 struct net *net = dev_net(skb->dev);
2052 const struct iphdr *iph;
2053 const struct udphdr *uh;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002054 struct sock *sk;
2055 struct dst_entry *dst;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002056 int dif = skb->dev->ifindex;
Shawn Bohrer6e540302015-06-03 16:27:38 -05002057 int ours;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002058
2059 /* validate the packet */
2060 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
2061 return;
2062
Eric Dumazet610438b2013-12-11 08:10:05 -08002063 iph = ip_hdr(skb);
2064 uh = udp_hdr(skb);
2065
Shawn Bohrer421b3882013-10-07 11:01:39 -05002066 if (skb->pkt_type == PACKET_BROADCAST ||
Shawn Bohrer6e540302015-06-03 16:27:38 -05002067 skb->pkt_type == PACKET_MULTICAST) {
2068 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
2069
2070 if (!in_dev)
2071 return;
2072
2073 ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
2074 iph->protocol);
2075 if (!ours)
2076 return;
Shawn Bohrer421b3882013-10-07 11:01:39 -05002077 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
2078 uh->source, iph->saddr, dif);
Shawn Bohrer6e540302015-06-03 16:27:38 -05002079 } else if (skb->pkt_type == PACKET_HOST) {
Shawn Bohrer421b3882013-10-07 11:01:39 -05002080 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
2081 uh->source, iph->saddr, dif);
Shawn Bohrer6e540302015-06-03 16:27:38 -05002082 } else {
Shawn Bohrer421b3882013-10-07 11:01:39 -05002083 return;
Shawn Bohrer6e540302015-06-03 16:27:38 -05002084 }
Shawn Bohrer421b3882013-10-07 11:01:39 -05002085
2086 if (!sk)
2087 return;
2088
2089 skb->sk = sk;
Alexander Duyck82eabd92014-09-04 13:32:11 -04002090 skb->destructor = sock_efree;
Eric Dumazet10e2eb82015-08-01 12:14:33 +02002091 dst = READ_ONCE(sk->sk_rx_dst);
Shawn Bohrer421b3882013-10-07 11:01:39 -05002092
2093 if (dst)
2094 dst = dst_check(dst, 0);
Eric Dumazet10e2eb82015-08-01 12:14:33 +02002095 if (dst) {
2096 /* DST_NOCACHE can not be used without taking a reference */
2097 if (dst->flags & DST_NOCACHE) {
2098 if (likely(atomic_inc_not_zero(&dst->__refcnt)))
2099 skb_dst_set(skb, dst);
2100 } else {
2101 skb_dst_set_noref(skb, dst);
2102 }
2103 }
Shawn Bohrer421b3882013-10-07 11:01:39 -05002104}
2105
David S. Millerdb8dac22008-03-06 16:22:02 -08002106int udp_rcv(struct sk_buff *skb)
2107{
Eric Dumazet645ca702008-10-29 01:41:45 -07002108 return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
David S. Millerdb8dac22008-03-06 16:22:02 -08002109}
2110
Brian Haley7d06b2e2008-06-14 17:04:49 -07002111void udp_destroy_sock(struct sock *sk)
David S. Millerdb8dac22008-03-06 16:22:02 -08002112{
Tom Parkin44046a52013-03-19 06:11:12 +00002113 struct udp_sock *up = udp_sk(sk);
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002114 bool slow = lock_sock_fast(sk);
David S. Millerdb8dac22008-03-06 16:22:02 -08002115 udp_flush_pending_frames(sk);
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002116 unlock_sock_fast(sk, slow);
Tom Parkin44046a52013-03-19 06:11:12 +00002117 if (static_key_false(&udp_encap_needed) && up->encap_type) {
2118 void (*encap_destroy)(struct sock *sk);
2119 encap_destroy = ACCESS_ONCE(up->encap_destroy);
2120 if (encap_destroy)
2121 encap_destroy(sk);
2122 }
David S. Millerdb8dac22008-03-06 16:22:02 -08002123}
2124
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125/*
2126 * Socket option code for UDP
2127 */
Gerrit Renker4c0a6cb2006-11-27 09:29:59 -08002128int udp_lib_setsockopt(struct sock *sk, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002129 char __user *optval, unsigned int optlen,
Gerrit Renker4c0a6cb2006-11-27 09:29:59 -08002130 int (*push_pending_frames)(struct sock *))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131{
2132 struct udp_sock *up = udp_sk(sk);
Tom Herbert1c194482014-05-23 08:47:32 -07002133 int val, valbool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 int err = 0;
Wang Chenb2bf1e22007-12-03 22:34:16 +11002135 int is_udplite = IS_UDPLITE(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
Eric Dumazetc482c562009-07-17 00:26:32 +00002137 if (optlen < sizeof(int))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 return -EINVAL;
2139
2140 if (get_user(val, (int __user *)optval))
2141 return -EFAULT;
2142
Tom Herbert1c194482014-05-23 08:47:32 -07002143 valbool = val ? 1 : 0;
2144
Stephen Hemminger6516c652007-03-08 20:41:55 -08002145 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 case UDP_CORK:
2147 if (val != 0) {
2148 up->corkflag = 1;
2149 } else {
2150 up->corkflag = 0;
2151 lock_sock(sk);
Joe Perches4243cdc2014-11-11 21:59:20 -08002152 push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 release_sock(sk);
2154 }
2155 break;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002156
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 case UDP_ENCAP:
2158 switch (val) {
2159 case 0:
2160 case UDP_ENCAP_ESPINUDP:
2161 case UDP_ENCAP_ESPINUDP_NON_IKE:
James Chapman067b2072007-07-05 17:08:05 -07002162 up->encap_rcv = xfrm4_udp_encap_rcv;
2163 /* FALLTHROUGH */
James Chapman342f0232007-06-27 15:37:46 -07002164 case UDP_ENCAP_L2TPINUDP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 up->encap_type = val;
Eric Dumazet447167b2012-04-11 23:05:28 +00002166 udp_encap_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 break;
2168 default:
2169 err = -ENOPROTOOPT;
2170 break;
2171 }
2172 break;
2173
Tom Herbert1c194482014-05-23 08:47:32 -07002174 case UDP_NO_CHECK6_TX:
2175 up->no_check6_tx = valbool;
2176 break;
2177
2178 case UDP_NO_CHECK6_RX:
2179 up->no_check6_rx = valbool;
2180 break;
2181
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002182 /*
2183 * UDP-Lite's partial checksum coverage (RFC 3828).
2184 */
2185 /* The sender sets actual checksum coverage length via this option.
2186 * The case coverage > packet length is handled by send module. */
2187 case UDPLITE_SEND_CSCOV:
Wang Chenb2bf1e22007-12-03 22:34:16 +11002188 if (!is_udplite) /* Disable the option on UDP sockets */
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002189 return -ENOPROTOOPT;
2190 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
2191 val = 8;
Alexey Dobriyan4be929b2010-05-24 14:33:03 -07002192 else if (val > USHRT_MAX)
2193 val = USHRT_MAX;
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002194 up->pcslen = val;
2195 up->pcflag |= UDPLITE_SEND_CC;
2196 break;
2197
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002198 /* The receiver specifies a minimum checksum coverage value. To make
2199 * sense, this should be set to at least 8 (as done below). If zero is
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002200 * used, this again means full checksum coverage. */
2201 case UDPLITE_RECV_CSCOV:
Wang Chenb2bf1e22007-12-03 22:34:16 +11002202 if (!is_udplite) /* Disable the option on UDP sockets */
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002203 return -ENOPROTOOPT;
2204 if (val != 0 && val < 8) /* Avoid silly minimal values. */
2205 val = 8;
Alexey Dobriyan4be929b2010-05-24 14:33:03 -07002206 else if (val > USHRT_MAX)
2207 val = USHRT_MAX;
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002208 up->pcrlen = val;
2209 up->pcflag |= UDPLITE_RECV_CC;
2210 break;
2211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 default:
2213 err = -ENOPROTOOPT;
2214 break;
Stephen Hemminger6516c652007-03-08 20:41:55 -08002215 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
2217 return err;
2218}
Eric Dumazetc482c562009-07-17 00:26:32 +00002219EXPORT_SYMBOL(udp_lib_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
David S. Millerdb8dac22008-03-06 16:22:02 -08002221int udp_setsockopt(struct sock *sk, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002222 char __user *optval, unsigned int optlen)
David S. Millerdb8dac22008-03-06 16:22:02 -08002223{
2224 if (level == SOL_UDP || level == SOL_UDPLITE)
2225 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
2226 udp_push_pending_frames);
2227 return ip_setsockopt(sk, level, optname, optval, optlen);
2228}
2229
2230#ifdef CONFIG_COMPAT
2231int compat_udp_setsockopt(struct sock *sk, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002232 char __user *optval, unsigned int optlen)
David S. Millerdb8dac22008-03-06 16:22:02 -08002233{
2234 if (level == SOL_UDP || level == SOL_UDPLITE)
2235 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
2236 udp_push_pending_frames);
2237 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
2238}
2239#endif
2240
Gerrit Renker4c0a6cb2006-11-27 09:29:59 -08002241int udp_lib_getsockopt(struct sock *sk, int level, int optname,
2242 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243{
2244 struct udp_sock *up = udp_sk(sk);
2245 int val, len;
2246
Eric Dumazetc482c562009-07-17 00:26:32 +00002247 if (get_user(len, optlen))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 return -EFAULT;
2249
2250 len = min_t(unsigned int, len, sizeof(int));
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002251
Stephen Hemminger6516c652007-03-08 20:41:55 -08002252 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 return -EINVAL;
2254
Stephen Hemminger6516c652007-03-08 20:41:55 -08002255 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 case UDP_CORK:
2257 val = up->corkflag;
2258 break;
2259
2260 case UDP_ENCAP:
2261 val = up->encap_type;
2262 break;
2263
Tom Herbert1c194482014-05-23 08:47:32 -07002264 case UDP_NO_CHECK6_TX:
2265 val = up->no_check6_tx;
2266 break;
2267
2268 case UDP_NO_CHECK6_RX:
2269 val = up->no_check6_rx;
2270 break;
2271
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002272 /* The following two cannot be changed on UDP sockets, the return is
2273 * always 0 (which corresponds to the full checksum coverage of UDP). */
2274 case UDPLITE_SEND_CSCOV:
2275 val = up->pcslen;
2276 break;
2277
2278 case UDPLITE_RECV_CSCOV:
2279 val = up->pcrlen;
2280 break;
2281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 default:
2283 return -ENOPROTOOPT;
Stephen Hemminger6516c652007-03-08 20:41:55 -08002284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
Stephen Hemminger6516c652007-03-08 20:41:55 -08002286 if (put_user(len, optlen))
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002287 return -EFAULT;
Eric Dumazetc482c562009-07-17 00:26:32 +00002288 if (copy_to_user(optval, &val, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 return -EFAULT;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002290 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291}
Eric Dumazetc482c562009-07-17 00:26:32 +00002292EXPORT_SYMBOL(udp_lib_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293
David S. Millerdb8dac22008-03-06 16:22:02 -08002294int udp_getsockopt(struct sock *sk, int level, int optname,
2295 char __user *optval, int __user *optlen)
2296{
2297 if (level == SOL_UDP || level == SOL_UDPLITE)
2298 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
2299 return ip_getsockopt(sk, level, optname, optval, optlen);
2300}
2301
2302#ifdef CONFIG_COMPAT
2303int compat_udp_getsockopt(struct sock *sk, int level, int optname,
2304 char __user *optval, int __user *optlen)
2305{
2306 if (level == SOL_UDP || level == SOL_UDPLITE)
2307 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
2308 return compat_ip_getsockopt(sk, level, optname, optval, optlen);
2309}
2310#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311/**
2312 * udp_poll - wait for a UDP event.
2313 * @file - file struct
2314 * @sock - socket
2315 * @wait - poll table
2316 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002317 * This is same as datagram poll, except for the special case of
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 * blocking sockets. If application is using a blocking fd
2319 * and a packet with checksum error is in the queue;
2320 * then it could get return from select indicating data available
2321 * but then block when reading it. Add special case code
2322 * to work around these arguably broken applications.
2323 */
2324unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
2325{
2326 unsigned int mask = datagram_poll(file, sock, wait);
2327 struct sock *sk = sock->sk;
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002328
David Majnemerc3f1dba2013-05-31 13:15:38 +00002329 sock_rps_record_flow(sk);
2330
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 /* Check for false positives due to checksum errors */
Eric Dumazet85584672009-10-09 04:43:40 +00002332 if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
2333 !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
2334 mask &= ~(POLLIN | POLLRDNORM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
2336 return mask;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002337
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338}
Eric Dumazetc482c562009-07-17 00:26:32 +00002339EXPORT_SYMBOL(udp_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340
David S. Millerdb8dac22008-03-06 16:22:02 -08002341struct proto udp_prot = {
2342 .name = "UDP",
2343 .owner = THIS_MODULE,
2344 .close = udp_lib_close,
2345 .connect = ip4_datagram_connect,
2346 .disconnect = udp_disconnect,
2347 .ioctl = udp_ioctl,
2348 .destroy = udp_destroy_sock,
2349 .setsockopt = udp_setsockopt,
2350 .getsockopt = udp_getsockopt,
2351 .sendmsg = udp_sendmsg,
2352 .recvmsg = udp_recvmsg,
2353 .sendpage = udp_sendpage,
Herbert Xu93821772008-09-15 11:48:46 -07002354 .backlog_rcv = __udp_queue_rcv_skb,
Steffen Klassert8141ed92013-01-21 02:00:03 +00002355 .release_cb = ip4_datagram_release_cb,
David S. Millerdb8dac22008-03-06 16:22:02 -08002356 .hash = udp_lib_hash,
2357 .unhash = udp_lib_unhash,
Eric Dumazet719f8352010-09-08 05:08:44 +00002358 .rehash = udp_v4_rehash,
David S. Millerdb8dac22008-03-06 16:22:02 -08002359 .get_port = udp_v4_get_port,
2360 .memory_allocated = &udp_memory_allocated,
2361 .sysctl_mem = sysctl_udp_mem,
2362 .sysctl_wmem = &sysctl_udp_wmem_min,
2363 .sysctl_rmem = &sysctl_udp_rmem_min,
2364 .obj_size = sizeof(struct udp_sock),
Eric Dumazet271b72c2008-10-29 02:11:14 -07002365 .slab_flags = SLAB_DESTROY_BY_RCU,
Eric Dumazet645ca702008-10-29 01:41:45 -07002366 .h.udp_table = &udp_table,
David S. Millerdb8dac22008-03-06 16:22:02 -08002367#ifdef CONFIG_COMPAT
2368 .compat_setsockopt = compat_udp_setsockopt,
2369 .compat_getsockopt = compat_udp_getsockopt,
2370#endif
Octavian Purdilafcbdf092010-12-16 14:26:56 -08002371 .clear_sk = sk_prot_clear_portaddr_nulls,
David S. Millerdb8dac22008-03-06 16:22:02 -08002372};
Eric Dumazetc482c562009-07-17 00:26:32 +00002373EXPORT_SYMBOL(udp_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374
2375/* ------------------------------------------------------------------------ */
2376#ifdef CONFIG_PROC_FS
2377
Eric Dumazet645ca702008-10-29 01:41:45 -07002378static struct sock *udp_get_first(struct seq_file *seq, int start)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379{
2380 struct sock *sk;
2381 struct udp_iter_state *state = seq->private;
Denis V. Lunev6f191ef2008-03-28 18:23:33 -07002382 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002384 for (state->bucket = start; state->bucket <= state->udp_table->mask;
2385 ++state->bucket) {
Eric Dumazet88ab1932008-11-16 19:39:21 -08002386 struct hlist_nulls_node *node;
Eric Dumazet645ca702008-10-29 01:41:45 -07002387 struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002388
2389 if (hlist_nulls_empty(&hslot->head))
2390 continue;
2391
Eric Dumazet645ca702008-10-29 01:41:45 -07002392 spin_lock_bh(&hslot->lock);
Eric Dumazet88ab1932008-11-16 19:39:21 -08002393 sk_nulls_for_each(sk, node, &hslot->head) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002394 if (!net_eq(sock_net(sk), net))
Daniel Lezcanoa91275e2008-03-21 04:11:58 -07002395 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 if (sk->sk_family == state->family)
2397 goto found;
2398 }
Eric Dumazet645ca702008-10-29 01:41:45 -07002399 spin_unlock_bh(&hslot->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 }
2401 sk = NULL;
2402found:
2403 return sk;
2404}
2405
2406static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
2407{
2408 struct udp_iter_state *state = seq->private;
Denis V. Lunev6f191ef2008-03-28 18:23:33 -07002409 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
2411 do {
Eric Dumazet88ab1932008-11-16 19:39:21 -08002412 sk = sk_nulls_next(sk);
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002413 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414
Eric Dumazet645ca702008-10-29 01:41:45 -07002415 if (!sk) {
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002416 if (state->bucket <= state->udp_table->mask)
Vitaly Mayatskikh30842f22009-03-23 15:22:33 -07002417 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
Eric Dumazet645ca702008-10-29 01:41:45 -07002418 return udp_get_first(seq, state->bucket + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 }
2420 return sk;
2421}
2422
2423static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
2424{
Eric Dumazet645ca702008-10-29 01:41:45 -07002425 struct sock *sk = udp_get_first(seq, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426
2427 if (sk)
Stephen Hemminger6516c652007-03-08 20:41:55 -08002428 while (pos && (sk = udp_get_next(seq, sk)) != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 --pos;
2430 return pos ? NULL : sk;
2431}
2432
2433static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
2434{
Vitaly Mayatskikh30842f22009-03-23 15:22:33 -07002435 struct udp_iter_state *state = seq->private;
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002436 state->bucket = MAX_UDP_PORTS;
Vitaly Mayatskikh30842f22009-03-23 15:22:33 -07002437
YOSHIFUJI Hideakib50660f2008-03-31 19:38:15 -07002438 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439}
2440
2441static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2442{
2443 struct sock *sk;
2444
YOSHIFUJI Hideakib50660f2008-03-31 19:38:15 -07002445 if (v == SEQ_START_TOKEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 sk = udp_get_idx(seq, 0);
2447 else
2448 sk = udp_get_next(seq, v);
2449
2450 ++*pos;
2451 return sk;
2452}
2453
2454static void udp_seq_stop(struct seq_file *seq, void *v)
2455{
Eric Dumazet645ca702008-10-29 01:41:45 -07002456 struct udp_iter_state *state = seq->private;
2457
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002458 if (state->bucket <= state->udp_table->mask)
Eric Dumazet645ca702008-10-29 01:41:45 -07002459 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460}
2461
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002462int udp_seq_open(struct inode *inode, struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463{
Al Virod9dda782013-03-31 18:16:14 -04002464 struct udp_seq_afinfo *afinfo = PDE_DATA(inode);
Denis V. Luneva2be75c2008-03-28 18:25:06 -07002465 struct udp_iter_state *s;
2466 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467
Denis V. Luneva2be75c2008-03-28 18:25:06 -07002468 err = seq_open_net(inode, file, &afinfo->seq_ops,
2469 sizeof(struct udp_iter_state));
2470 if (err < 0)
2471 return err;
Daniel Lezcanoa91275e2008-03-21 04:11:58 -07002472
Denis V. Luneva2be75c2008-03-28 18:25:06 -07002473 s = ((struct seq_file *)file->private_data)->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 s->family = afinfo->family;
Eric Dumazet645ca702008-10-29 01:41:45 -07002475 s->udp_table = afinfo->udp_table;
Denis V. Luneva2be75c2008-03-28 18:25:06 -07002476 return err;
Daniel Lezcanoa91275e2008-03-21 04:11:58 -07002477}
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002478EXPORT_SYMBOL(udp_seq_open);
Daniel Lezcanoa91275e2008-03-21 04:11:58 -07002479
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480/* ------------------------------------------------------------------------ */
Daniel Lezcano0c96d8c2008-03-21 04:14:17 -07002481int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482{
2483 struct proc_dir_entry *p;
2484 int rc = 0;
2485
Denis V. Lunevdda61922008-03-28 18:24:26 -07002486 afinfo->seq_ops.start = udp_seq_start;
2487 afinfo->seq_ops.next = udp_seq_next;
2488 afinfo->seq_ops.stop = udp_seq_stop;
2489
Denis V. Lunev84841c32008-05-02 04:10:08 -07002490 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002491 afinfo->seq_fops, afinfo);
Denis V. Lunev84841c32008-05-02 04:10:08 -07002492 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 rc = -ENOMEM;
2494 return rc;
2495}
Eric Dumazetc482c562009-07-17 00:26:32 +00002496EXPORT_SYMBOL(udp_proc_register);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497
Daniel Lezcano0c96d8c2008-03-21 04:14:17 -07002498void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499{
Gao fengece31ff2013-02-18 01:34:56 +00002500 remove_proc_entry(afinfo->name, net->proc_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501}
Eric Dumazetc482c562009-07-17 00:26:32 +00002502EXPORT_SYMBOL(udp_proc_unregister);
David S. Millerdb8dac22008-03-06 16:22:02 -08002503
2504/* ------------------------------------------------------------------------ */
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002505static void udp4_format_sock(struct sock *sp, struct seq_file *f,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002506 int bucket)
David S. Millerdb8dac22008-03-06 16:22:02 -08002507{
2508 struct inet_sock *inet = inet_sk(sp);
Eric Dumazetc720c7e2009-10-15 06:30:45 +00002509 __be32 dest = inet->inet_daddr;
2510 __be32 src = inet->inet_rcv_saddr;
2511 __u16 destp = ntohs(inet->inet_dport);
2512 __u16 srcp = ntohs(inet->inet_sport);
David S. Millerdb8dac22008-03-06 16:22:02 -08002513
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002514 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002515 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
David S. Millerdb8dac22008-03-06 16:22:02 -08002516 bucket, src, srcp, dest, destp, sp->sk_state,
Eric Dumazet31e6d362009-06-17 19:05:41 -07002517 sk_wmem_alloc_get(sp),
2518 sk_rmem_alloc_get(sp),
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002519 0, 0L, 0,
2520 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
2521 0, sock_i_ino(sp),
Eric Dumazetcb61cb92008-06-17 21:04:56 -07002522 atomic_read(&sp->sk_refcnt), sp,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002523 atomic_read(&sp->sk_drops));
David S. Millerdb8dac22008-03-06 16:22:02 -08002524}
2525
2526int udp4_seq_show(struct seq_file *seq, void *v)
2527{
Tetsuo Handa652586d2013-11-14 14:31:57 -08002528 seq_setwidth(seq, 127);
David S. Millerdb8dac22008-03-06 16:22:02 -08002529 if (v == SEQ_START_TOKEN)
Tetsuo Handa652586d2013-11-14 14:31:57 -08002530 seq_puts(seq, " sl local_address rem_address st tx_queue "
David S. Millerdb8dac22008-03-06 16:22:02 -08002531 "rx_queue tr tm->when retrnsmt uid timeout "
Eric Dumazetcb61cb92008-06-17 21:04:56 -07002532 "inode ref pointer drops");
David S. Millerdb8dac22008-03-06 16:22:02 -08002533 else {
David S. Millerdb8dac22008-03-06 16:22:02 -08002534 struct udp_iter_state *state = seq->private;
2535
Tetsuo Handa652586d2013-11-14 14:31:57 -08002536 udp4_format_sock(v, seq, state->bucket);
David S. Millerdb8dac22008-03-06 16:22:02 -08002537 }
Tetsuo Handa652586d2013-11-14 14:31:57 -08002538 seq_pad(seq, '\n');
David S. Millerdb8dac22008-03-06 16:22:02 -08002539 return 0;
2540}
2541
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002542static const struct file_operations udp_afinfo_seq_fops = {
2543 .owner = THIS_MODULE,
2544 .open = udp_seq_open,
2545 .read = seq_read,
2546 .llseek = seq_lseek,
2547 .release = seq_release_net
2548};
2549
David S. Millerdb8dac22008-03-06 16:22:02 -08002550/* ------------------------------------------------------------------------ */
David S. Millerdb8dac22008-03-06 16:22:02 -08002551static struct udp_seq_afinfo udp4_seq_afinfo = {
David S. Millerdb8dac22008-03-06 16:22:02 -08002552 .name = "udp",
2553 .family = AF_INET,
Eric Dumazet645ca702008-10-29 01:41:45 -07002554 .udp_table = &udp_table,
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002555 .seq_fops = &udp_afinfo_seq_fops,
Denis V. Lunevdda61922008-03-28 18:24:26 -07002556 .seq_ops = {
2557 .show = udp4_seq_show,
2558 },
David S. Millerdb8dac22008-03-06 16:22:02 -08002559};
2560
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002561static int __net_init udp4_proc_init_net(struct net *net)
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002562{
2563 return udp_proc_register(net, &udp4_seq_afinfo);
2564}
2565
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002566static void __net_exit udp4_proc_exit_net(struct net *net)
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002567{
2568 udp_proc_unregister(net, &udp4_seq_afinfo);
2569}
2570
2571static struct pernet_operations udp4_net_ops = {
2572 .init = udp4_proc_init_net,
2573 .exit = udp4_proc_exit_net,
2574};
2575
David S. Millerdb8dac22008-03-06 16:22:02 -08002576int __init udp4_proc_init(void)
2577{
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002578 return register_pernet_subsys(&udp4_net_ops);
David S. Millerdb8dac22008-03-06 16:22:02 -08002579}
2580
2581void udp4_proc_exit(void)
2582{
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002583 unregister_pernet_subsys(&udp4_net_ops);
David S. Millerdb8dac22008-03-06 16:22:02 -08002584}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585#endif /* CONFIG_PROC_FS */
2586
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002587static __initdata unsigned long uhash_entries;
2588static int __init set_uhash_entries(char *str)
Eric Dumazet645ca702008-10-29 01:41:45 -07002589{
Eldad Zack413c27d2012-05-19 14:13:18 +00002590 ssize_t ret;
2591
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002592 if (!str)
2593 return 0;
Eldad Zack413c27d2012-05-19 14:13:18 +00002594
2595 ret = kstrtoul(str, 0, &uhash_entries);
2596 if (ret)
2597 return 0;
2598
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002599 if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
2600 uhash_entries = UDP_HTABLE_SIZE_MIN;
2601 return 1;
2602}
2603__setup("uhash_entries=", set_uhash_entries);
Eric Dumazet645ca702008-10-29 01:41:45 -07002604
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002605void __init udp_table_init(struct udp_table *table, const char *name)
2606{
2607 unsigned int i;
2608
Tim Bird31fe62b2012-05-23 13:33:35 +00002609 table->hash = alloc_large_system_hash(name,
2610 2 * sizeof(struct udp_hslot),
2611 uhash_entries,
2612 21, /* one slot per 2 MB */
2613 0,
2614 &table->log,
2615 &table->mask,
2616 UDP_HTABLE_SIZE_MIN,
2617 64 * 1024);
2618
Eric Dumazet512615b2009-11-08 10:17:58 +00002619 table->hash2 = table->hash + (table->mask + 1);
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002620 for (i = 0; i <= table->mask; i++) {
Eric Dumazet88ab1932008-11-16 19:39:21 -08002621 INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
Eric Dumazetfdcc8aa92009-11-08 10:17:05 +00002622 table->hash[i].count = 0;
Eric Dumazet645ca702008-10-29 01:41:45 -07002623 spin_lock_init(&table->hash[i].lock);
2624 }
Eric Dumazet512615b2009-11-08 10:17:58 +00002625 for (i = 0; i <= table->mask; i++) {
2626 INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i);
2627 table->hash2[i].count = 0;
2628 spin_lock_init(&table->hash2[i].lock);
2629 }
Eric Dumazet645ca702008-10-29 01:41:45 -07002630}
2631
Tom Herbert723b8e42015-02-24 09:17:31 -08002632u32 udp_flow_hashrnd(void)
2633{
2634 static u32 hashrnd __read_mostly;
2635
2636 net_get_random_once(&hashrnd, sizeof(hashrnd));
2637
2638 return hashrnd;
2639}
2640EXPORT_SYMBOL(udp_flow_hashrnd);
2641
Hideo Aoki95766ff2007-12-31 00:29:24 -08002642void __init udp_init(void)
2643{
Eric Dumazetf03d78d2011-07-07 00:27:05 -07002644 unsigned long limit;
Hideo Aoki95766ff2007-12-31 00:29:24 -08002645
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002646 udp_table_init(&udp_table, "UDP");
Eric Dumazetf03d78d2011-07-07 00:27:05 -07002647 limit = nr_free_buffer_pages() / 8;
Hideo Aoki95766ff2007-12-31 00:29:24 -08002648 limit = max(limit, 128UL);
2649 sysctl_udp_mem[0] = limit / 4 * 3;
2650 sysctl_udp_mem[1] = limit;
2651 sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
2652
2653 sysctl_udp_rmem_min = SK_MEM_QUANTUM;
2654 sysctl_udp_wmem_min = SK_MEM_QUANTUM;
2655}