blob: f2d05d7be743d09fd42cd0e302f4c088bc71048b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The User Datagram Protocol (UDP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
Alan Cox113aa832008-10-13 19:01:08 -070011 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Hirokazu Takahashi, <taka@valinux.co.jp>
13 *
14 * Fixes:
15 * Alan Cox : verify_area() calls
16 * Alan Cox : stopped close while in use off icmp
17 * messages. Not a fix but a botch that
18 * for udp at least is 'valid'.
19 * Alan Cox : Fixed icmp handling properly
20 * Alan Cox : Correct error for oversized datagrams
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090021 * Alan Cox : Tidied select() semantics.
22 * Alan Cox : udp_err() fixed properly, also now
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * select and read wake correctly on errors
24 * Alan Cox : udp_send verify_area moved to avoid mem leak
25 * Alan Cox : UDP can count its memory
26 * Alan Cox : send to an unknown connection causes
27 * an ECONNREFUSED off the icmp, but
28 * does NOT close.
29 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
30 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
31 * bug no longer crashes it.
32 * Fred Van Kempen : Net2e support for sk->broadcast.
33 * Alan Cox : Uses skb_free_datagram
34 * Alan Cox : Added get/set sockopt support.
35 * Alan Cox : Broadcasting without option set returns EACCES.
36 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
37 * Alan Cox : Use ip_tos and ip_ttl
38 * Alan Cox : SNMP Mibs
39 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
40 * Matt Dillon : UDP length checks.
41 * Alan Cox : Smarter af_inet used properly.
42 * Alan Cox : Use new kernel side addressing.
43 * Alan Cox : Incorrect return on truncated datagram receive.
44 * Arnt Gulbrandsen : New udp_send and stuff
45 * Alan Cox : Cache last socket
46 * Alan Cox : Route cache
47 * Jon Peatfield : Minor efficiency fix to sendto().
48 * Mike Shaver : RFC1122 checks.
49 * Alan Cox : Nonblocking error fix.
50 * Willy Konynenberg : Transparent proxying support.
51 * Mike McLagan : Routing by source
52 * David S. Miller : New socket lookup architecture.
53 * Last socket cache retained as it
54 * does have a high hit rate.
55 * Olaf Kirch : Don't linearise iovec on sendmsg.
56 * Andi Kleen : Some cleanups, cache destination entry
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090057 * for connect.
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
59 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
60 * return ENOTCONN for unconnected sockets (POSIX)
61 * Janos Farkas : don't deliver multi/broadcasts to a different
62 * bound-to-device socket
63 * Hirokazu Takahashi : HW checksumming for outgoing UDP
64 * datagrams.
65 * Hirokazu Takahashi : sendfile() on UDP works now.
66 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
67 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
68 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
69 * a single port at the same time.
70 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
James Chapman342f0232007-06-27 15:37:46 -070071 * James Chapman : Add L2TP encapsulation type.
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 *
73 *
74 * This program is free software; you can redistribute it and/or
75 * modify it under the terms of the GNU General Public License
76 * as published by the Free Software Foundation; either version
77 * 2 of the License, or (at your option) any later version.
78 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090079
Joe Perchesafd465032012-03-12 07:03:32 +000080#define pr_fmt(fmt) "UDP: " fmt
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <asm/uaccess.h>
83#include <asm/ioctls.h>
Hideo Aoki95766ff2007-12-31 00:29:24 -080084#include <linux/bootmem.h>
Eric Dumazet8203efb2008-10-29 02:32:32 -070085#include <linux/highmem.h>
86#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include <linux/types.h>
88#include <linux/fcntl.h>
89#include <linux/module.h>
90#include <linux/socket.h>
91#include <linux/sockios.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020092#include <linux/igmp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/in.h>
94#include <linux/errno.h>
95#include <linux/timer.h>
96#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/inet.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090099#include <linux/slab.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -0700100#include <net/tcp_states.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/skbuff.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200104#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/icmp.h>
Shawn Bohrer421b3882013-10-07 11:01:39 -0500106#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <net/route.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <net/checksum.h>
109#include <net/xfrm.h>
Satoru Moriya296f7ea2011-06-17 11:58:39 +0000110#include <trace/events/udp.h>
Eric Dumazet447167b2012-04-11 23:05:28 +0000111#include <linux/static_key.h>
Eric Dumazet22911fc2012-06-27 00:23:44 +0000112#include <trace/events/skb.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300113#include <net/busy_poll.h>
Gerrit Renkerba4e58e2006-11-27 11:10:57 -0800114#include "udp_impl.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000116struct udp_table udp_table __read_mostly;
Eric Dumazet645ca702008-10-29 01:41:45 -0700117EXPORT_SYMBOL(udp_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Eric Dumazet8d987e52010-11-09 23:24:26 +0000119long sysctl_udp_mem[3] __read_mostly;
Hideo Aoki95766ff2007-12-31 00:29:24 -0800120EXPORT_SYMBOL(sysctl_udp_mem);
Eric Dumazetc482c562009-07-17 00:26:32 +0000121
122int sysctl_udp_rmem_min __read_mostly;
Hideo Aoki95766ff2007-12-31 00:29:24 -0800123EXPORT_SYMBOL(sysctl_udp_rmem_min);
Eric Dumazetc482c562009-07-17 00:26:32 +0000124
125int sysctl_udp_wmem_min __read_mostly;
Hideo Aoki95766ff2007-12-31 00:29:24 -0800126EXPORT_SYMBOL(sysctl_udp_wmem_min);
127
Eric Dumazet8d987e52010-11-09 23:24:26 +0000128atomic_long_t udp_memory_allocated;
Hideo Aoki95766ff2007-12-31 00:29:24 -0800129EXPORT_SYMBOL(udp_memory_allocated);
130
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000131#define MAX_UDP_PORTS 65536
132#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
Eric Dumazet98322f22009-01-26 21:35:35 -0800133
Eric Dumazetf24d43c2008-10-09 14:51:27 -0700134static int udp_lib_lport_inuse(struct net *net, __u16 num,
Eric Dumazet645ca702008-10-29 01:41:45 -0700135 const struct udp_hslot *hslot,
Eric Dumazet98322f22009-01-26 21:35:35 -0800136 unsigned long *bitmap,
Eric Dumazetf24d43c2008-10-09 14:51:27 -0700137 struct sock *sk,
138 int (*saddr_comp)(const struct sock *sk1,
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000139 const struct sock *sk2),
140 unsigned int log)
Gerrit Renker25030a72006-08-26 20:06:05 -0700141{
Eric Dumazetf24d43c2008-10-09 14:51:27 -0700142 struct sock *sk2;
Eric Dumazet88ab1932008-11-16 19:39:21 -0800143 struct hlist_nulls_node *node;
Tom Herbertba418fa2013-01-22 09:50:32 +0000144 kuid_t uid = sock_i_uid(sk);
Gerrit Renker25030a72006-08-26 20:06:05 -0700145
Eric Dumazet88ab1932008-11-16 19:39:21 -0800146 sk_nulls_for_each(sk2, node, &hslot->head)
Joe Perches9d4fb272009-11-23 10:41:23 -0800147 if (net_eq(sock_net(sk2), net) &&
148 sk2 != sk &&
Eric Dumazetd4cada42009-11-08 10:17:30 +0000149 (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
Joe Perches9d4fb272009-11-23 10:41:23 -0800150 (!sk2->sk_reuse || !sk->sk_reuse) &&
151 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
152 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
Tom Herbertba418fa2013-01-22 09:50:32 +0000153 (!sk2->sk_reuseport || !sk->sk_reuseport ||
154 !uid_eq(uid, sock_i_uid(sk2))) &&
Eric Dumazet98322f22009-01-26 21:35:35 -0800155 (*saddr_comp)(sk, sk2)) {
156 if (bitmap)
Eric Dumazetd4cada42009-11-08 10:17:30 +0000157 __set_bit(udp_sk(sk2)->udp_port_hash >> log,
158 bitmap);
Eric Dumazet98322f22009-01-26 21:35:35 -0800159 else
160 return 1;
161 }
Gerrit Renker25030a72006-08-26 20:06:05 -0700162 return 0;
163}
164
Eric Dumazet30fff922009-11-09 05:26:33 +0000165/*
166 * Note: we still hold spinlock of primary hash chain, so no other writer
167 * can insert/delete a socket with local_port == num
168 */
169static int udp_lib_lport_inuse2(struct net *net, __u16 num,
170 struct udp_hslot *hslot2,
171 struct sock *sk,
172 int (*saddr_comp)(const struct sock *sk1,
173 const struct sock *sk2))
174{
175 struct sock *sk2;
176 struct hlist_nulls_node *node;
Tom Herbertba418fa2013-01-22 09:50:32 +0000177 kuid_t uid = sock_i_uid(sk);
Eric Dumazet30fff922009-11-09 05:26:33 +0000178 int res = 0;
179
180 spin_lock(&hslot2->lock);
181 udp_portaddr_for_each_entry(sk2, node, &hslot2->head)
Joe Perches9d4fb272009-11-23 10:41:23 -0800182 if (net_eq(sock_net(sk2), net) &&
183 sk2 != sk &&
184 (udp_sk(sk2)->udp_port_hash == num) &&
185 (!sk2->sk_reuse || !sk->sk_reuse) &&
186 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
187 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
Tom Herbertba418fa2013-01-22 09:50:32 +0000188 (!sk2->sk_reuseport || !sk->sk_reuseport ||
189 !uid_eq(uid, sock_i_uid(sk2))) &&
Eric Dumazet30fff922009-11-09 05:26:33 +0000190 (*saddr_comp)(sk, sk2)) {
191 res = 1;
192 break;
193 }
194 spin_unlock(&hslot2->lock);
195 return res;
196}
197
Gerrit Renker25030a72006-08-26 20:06:05 -0700198/**
Pavel Emelyanov6ba5a3c2008-03-22 16:51:21 -0700199 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
Gerrit Renker25030a72006-08-26 20:06:05 -0700200 *
201 * @sk: socket struct in question
202 * @snum: port number to look up
David S. Millerdf2bc452007-06-05 15:18:43 -0700203 * @saddr_comp: AF-dependent comparison of bound local IP addresses
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300204 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
Eric Dumazet30fff922009-11-09 05:26:33 +0000205 * with NULL address
Gerrit Renker25030a72006-08-26 20:06:05 -0700206 */
Pavel Emelyanov6ba5a3c2008-03-22 16:51:21 -0700207int udp_lib_get_port(struct sock *sk, unsigned short snum,
David S. Millerdf2bc452007-06-05 15:18:43 -0700208 int (*saddr_comp)(const struct sock *sk1,
Eric Dumazet30fff922009-11-09 05:26:33 +0000209 const struct sock *sk2),
210 unsigned int hash2_nulladdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
Eric Dumazet512615b2009-11-08 10:17:58 +0000212 struct udp_hslot *hslot, *hslot2;
Eric Dumazet645ca702008-10-29 01:41:45 -0700213 struct udp_table *udptable = sk->sk_prot->h.udp_table;
Gerrit Renker25030a72006-08-26 20:06:05 -0700214 int error = 1;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900215 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Stephen Hemminger32c1da72007-08-24 23:09:41 -0700217 if (!snum) {
Eric Dumazet9088c562008-10-08 11:44:17 -0700218 int low, high, remaining;
Eric Dumazet95c96172012-04-15 05:58:06 +0000219 unsigned int rand;
Eric Dumazet98322f22009-01-26 21:35:35 -0800220 unsigned short first, last;
221 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Eric W. Biederman0bbf87d2013-09-28 14:10:59 -0700223 inet_get_local_port_range(net, &low, &high);
Anton Arapova25de532007-10-18 22:00:17 -0700224 remaining = (high - low) + 1;
Stephen Hemminger227b60f2007-10-10 17:30:46 -0700225
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500226 rand = prandom_u32();
Eric Dumazet98322f22009-01-26 21:35:35 -0800227 first = (((u64)rand * remaining) >> 32) + low;
228 /*
229 * force rand to be an odd multiple of UDP_HTABLE_SIZE
230 */
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000231 rand = (rand | 1) * (udptable->mask + 1);
Eric Dumazet5781b232009-12-13 19:32:39 -0800232 last = first + udptable->mask + 1;
233 do {
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000234 hslot = udp_hashslot(udptable, net, first);
Eric Dumazet98322f22009-01-26 21:35:35 -0800235 bitmap_zero(bitmap, PORTS_PER_CHAIN);
Eric Dumazet645ca702008-10-29 01:41:45 -0700236 spin_lock_bh(&hslot->lock);
Eric Dumazet98322f22009-01-26 21:35:35 -0800237 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000238 saddr_comp, udptable->log);
Eric Dumazet98322f22009-01-26 21:35:35 -0800239
240 snum = first;
241 /*
242 * Iterate on all possible values of snum for this hash.
243 * Using steps of an odd multiple of UDP_HTABLE_SIZE
244 * give us randomization and full range coverage.
245 */
Eric Dumazet9088c562008-10-08 11:44:17 -0700246 do {
Eric Dumazet98322f22009-01-26 21:35:35 -0800247 if (low <= snum && snum <= high &&
Amerigo Wange3826f12010-05-05 00:27:06 +0000248 !test_bit(snum >> udptable->log, bitmap) &&
249 !inet_is_reserved_local_port(snum))
Eric Dumazet98322f22009-01-26 21:35:35 -0800250 goto found;
251 snum += rand;
252 } while (snum != first);
253 spin_unlock_bh(&hslot->lock);
Eric Dumazet5781b232009-12-13 19:32:39 -0800254 } while (++first != last);
Eric Dumazet98322f22009-01-26 21:35:35 -0800255 goto fail;
Eric Dumazet645ca702008-10-29 01:41:45 -0700256 } else {
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000257 hslot = udp_hashslot(udptable, net, snum);
Eric Dumazet645ca702008-10-29 01:41:45 -0700258 spin_lock_bh(&hslot->lock);
Eric Dumazet30fff922009-11-09 05:26:33 +0000259 if (hslot->count > 10) {
260 int exist;
261 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
262
263 slot2 &= udptable->mask;
264 hash2_nulladdr &= udptable->mask;
265
266 hslot2 = udp_hashslot2(udptable, slot2);
267 if (hslot->count < hslot2->count)
268 goto scan_primary_hash;
269
270 exist = udp_lib_lport_inuse2(net, snum, hslot2,
271 sk, saddr_comp);
272 if (!exist && (hash2_nulladdr != slot2)) {
273 hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
274 exist = udp_lib_lport_inuse2(net, snum, hslot2,
275 sk, saddr_comp);
276 }
277 if (exist)
278 goto fail_unlock;
279 else
280 goto found;
281 }
282scan_primary_hash:
Eric Dumazetf86dcc52009-10-07 00:37:59 +0000283 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
284 saddr_comp, 0))
Eric Dumazet645ca702008-10-29 01:41:45 -0700285 goto fail_unlock;
286 }
Eric Dumazet98322f22009-01-26 21:35:35 -0800287found:
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000288 inet_sk(sk)->inet_num = snum;
Eric Dumazetd4cada42009-11-08 10:17:30 +0000289 udp_sk(sk)->udp_port_hash = snum;
290 udp_sk(sk)->udp_portaddr_hash ^= snum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 if (sk_unhashed(sk)) {
Eric Dumazet88ab1932008-11-16 19:39:21 -0800292 sk_nulls_add_node_rcu(sk, &hslot->head);
Eric Dumazetfdcc8aa92009-11-08 10:17:05 +0000293 hslot->count++;
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -0700294 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Eric Dumazet512615b2009-11-08 10:17:58 +0000295
296 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
297 spin_lock(&hslot2->lock);
298 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
299 &hslot2->head);
300 hslot2->count++;
301 spin_unlock(&hslot2->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 }
Gerrit Renker25030a72006-08-26 20:06:05 -0700303 error = 0;
Eric Dumazet645ca702008-10-29 01:41:45 -0700304fail_unlock:
305 spin_unlock_bh(&hslot->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306fail:
Gerrit Renker25030a72006-08-26 20:06:05 -0700307 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308}
Eric Dumazetc482c562009-07-17 00:26:32 +0000309EXPORT_SYMBOL(udp_lib_get_port);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
Vlad Yasevich499923c2009-04-09 17:37:33 +0000311static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
David S. Millerdb8dac22008-03-06 16:22:02 -0800312{
313 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
314
Eric Dumazetc482c562009-07-17 00:26:32 +0000315 return (!ipv6_only_sock(sk2) &&
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000316 (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr ||
317 inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
David S. Millerdb8dac22008-03-06 16:22:02 -0800318}
319
Eric Dumazetd4cada42009-11-08 10:17:30 +0000320static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
321 unsigned int port)
322{
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700323 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
Eric Dumazetd4cada42009-11-08 10:17:30 +0000324}
325
Pavel Emelyanov6ba5a3c2008-03-22 16:51:21 -0700326int udp_v4_get_port(struct sock *sk, unsigned short snum)
David S. Millerdb8dac22008-03-06 16:22:02 -0800327{
Eric Dumazet30fff922009-11-09 05:26:33 +0000328 unsigned int hash2_nulladdr =
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700329 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
Eric Dumazet30fff922009-11-09 05:26:33 +0000330 unsigned int hash2_partial =
331 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
332
Eric Dumazetd4cada42009-11-08 10:17:30 +0000333 /* precompute partial secondary hash */
Eric Dumazet30fff922009-11-09 05:26:33 +0000334 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
335 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
David S. Millerdb8dac22008-03-06 16:22:02 -0800336}
337
Eric Dumazet645ca702008-10-29 01:41:45 -0700338static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
339 unsigned short hnum,
340 __be16 sport, __be32 daddr, __be16 dport, int dif)
341{
342 int score = -1;
343
Eric Dumazetd4cada42009-11-08 10:17:30 +0000344 if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
Eric Dumazet645ca702008-10-29 01:41:45 -0700345 !ipv6_only_sock(sk)) {
346 struct inet_sock *inet = inet_sk(sk);
347
Tom Herbertba418fa2013-01-22 09:50:32 +0000348 score = (sk->sk_family == PF_INET ? 2 : 1);
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000349 if (inet->inet_rcv_saddr) {
350 if (inet->inet_rcv_saddr != daddr)
Eric Dumazet645ca702008-10-29 01:41:45 -0700351 return -1;
Tom Herbertba418fa2013-01-22 09:50:32 +0000352 score += 4;
Eric Dumazet645ca702008-10-29 01:41:45 -0700353 }
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000354 if (inet->inet_daddr) {
355 if (inet->inet_daddr != saddr)
Eric Dumazet645ca702008-10-29 01:41:45 -0700356 return -1;
Tom Herbertba418fa2013-01-22 09:50:32 +0000357 score += 4;
Eric Dumazet645ca702008-10-29 01:41:45 -0700358 }
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000359 if (inet->inet_dport) {
360 if (inet->inet_dport != sport)
Eric Dumazet645ca702008-10-29 01:41:45 -0700361 return -1;
Tom Herbertba418fa2013-01-22 09:50:32 +0000362 score += 4;
Eric Dumazet645ca702008-10-29 01:41:45 -0700363 }
364 if (sk->sk_bound_dev_if) {
365 if (sk->sk_bound_dev_if != dif)
366 return -1;
Tom Herbertba418fa2013-01-22 09:50:32 +0000367 score += 4;
Eric Dumazet645ca702008-10-29 01:41:45 -0700368 }
369 }
370 return score;
371}
372
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000373/*
374 * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
375 */
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000376static inline int compute_score2(struct sock *sk, struct net *net,
377 __be32 saddr, __be16 sport,
378 __be32 daddr, unsigned int hnum, int dif)
379{
380 int score = -1;
381
382 if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) {
383 struct inet_sock *inet = inet_sk(sk);
384
385 if (inet->inet_rcv_saddr != daddr)
386 return -1;
387 if (inet->inet_num != hnum)
388 return -1;
389
Tom Herbertba418fa2013-01-22 09:50:32 +0000390 score = (sk->sk_family == PF_INET ? 2 : 1);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000391 if (inet->inet_daddr) {
392 if (inet->inet_daddr != saddr)
393 return -1;
Tom Herbertba418fa2013-01-22 09:50:32 +0000394 score += 4;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000395 }
396 if (inet->inet_dport) {
397 if (inet->inet_dport != sport)
398 return -1;
Tom Herbertba418fa2013-01-22 09:50:32 +0000399 score += 4;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000400 }
401 if (sk->sk_bound_dev_if) {
402 if (sk->sk_bound_dev_if != dif)
403 return -1;
Tom Herbertba418fa2013-01-22 09:50:32 +0000404 score += 4;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000405 }
406 }
407 return score;
408}
409
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200410static unsigned int udp_ehashfn(struct net *net, const __be32 laddr,
411 const __u16 lport, const __be32 faddr,
412 const __be16 fport)
413{
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +0200414 static u32 udp_ehash_secret __read_mostly;
415
416 net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
417
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200418 return __inet_ehashfn(laddr, lport, faddr, fport,
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +0200419 udp_ehash_secret + net_hash_mix(net));
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200420}
421
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000422
423/* called with read_rcu_lock() */
424static struct sock *udp4_lib_lookup2(struct net *net,
425 __be32 saddr, __be16 sport,
426 __be32 daddr, unsigned int hnum, int dif,
427 struct udp_hslot *hslot2, unsigned int slot2)
428{
429 struct sock *sk, *result;
430 struct hlist_nulls_node *node;
Tom Herbertba418fa2013-01-22 09:50:32 +0000431 int score, badness, matches = 0, reuseport = 0;
432 u32 hash = 0;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000433
434begin:
435 result = NULL;
Tom Herbertba418fa2013-01-22 09:50:32 +0000436 badness = 0;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000437 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
438 score = compute_score2(sk, net, saddr, sport,
439 daddr, hnum, dif);
440 if (score > badness) {
441 result = sk;
442 badness = score;
Tom Herbertba418fa2013-01-22 09:50:32 +0000443 reuseport = sk->sk_reuseport;
444 if (reuseport) {
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200445 hash = udp_ehashfn(net, daddr, hnum,
446 saddr, sport);
Tom Herbertba418fa2013-01-22 09:50:32 +0000447 matches = 1;
448 }
449 } else if (score == badness && reuseport) {
450 matches++;
451 if (((u64)hash * matches) >> 32 == 0)
452 result = sk;
453 hash = next_pseudo_random32(hash);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000454 }
455 }
456 /*
457 * if the nulls value we got at the end of this lookup is
458 * not the expected one, we must restart lookup.
459 * We probably met an item that was moved to another chain.
460 */
461 if (get_nulls_value(node) != slot2)
462 goto begin;
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000463 if (result) {
Eric Dumazetc31504d2010-11-15 19:58:26 +0000464 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000465 result = NULL;
466 else if (unlikely(compute_score2(result, net, saddr, sport,
467 daddr, hnum, dif) < badness)) {
468 sock_put(result);
469 goto begin;
470 }
471 }
472 return result;
473}
474
David S. Millerdb8dac22008-03-06 16:22:02 -0800475/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
476 * harder than this. -DaveM
477 */
Pavel Emelyanovfce82332011-12-09 06:23:34 +0000478struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
David S. Millerdb8dac22008-03-06 16:22:02 -0800479 __be16 sport, __be32 daddr, __be16 dport,
Eric Dumazet645ca702008-10-29 01:41:45 -0700480 int dif, struct udp_table *udptable)
David S. Millerdb8dac22008-03-06 16:22:02 -0800481{
Eric Dumazet271b72c2008-10-29 02:11:14 -0700482 struct sock *sk, *result;
Eric Dumazet88ab1932008-11-16 19:39:21 -0800483 struct hlist_nulls_node *node;
David S. Millerdb8dac22008-03-06 16:22:02 -0800484 unsigned short hnum = ntohs(dport);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000485 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
486 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
Tom Herbertba418fa2013-01-22 09:50:32 +0000487 int score, badness, matches = 0, reuseport = 0;
488 u32 hash = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -0800489
Eric Dumazet271b72c2008-10-29 02:11:14 -0700490 rcu_read_lock();
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000491 if (hslot->count > 10) {
492 hash2 = udp4_portaddr_hash(net, daddr, hnum);
493 slot2 = hash2 & udptable->mask;
494 hslot2 = &udptable->hash2[slot2];
495 if (hslot->count < hslot2->count)
496 goto begin;
497
498 result = udp4_lib_lookup2(net, saddr, sport,
499 daddr, hnum, dif,
500 hslot2, slot2);
501 if (!result) {
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700502 hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000503 slot2 = hash2 & udptable->mask;
504 hslot2 = &udptable->hash2[slot2];
505 if (hslot->count < hslot2->count)
506 goto begin;
507
Jorge Boncompte [DTI2]1223c672010-04-08 04:56:48 +0000508 result = udp4_lib_lookup2(net, saddr, sport,
Eric Dumazet0eae88f2010-04-20 19:06:52 -0700509 htonl(INADDR_ANY), hnum, dif,
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000510 hslot2, slot2);
511 }
512 rcu_read_unlock();
513 return result;
514 }
Eric Dumazet271b72c2008-10-29 02:11:14 -0700515begin:
516 result = NULL;
Tom Herbertba418fa2013-01-22 09:50:32 +0000517 badness = 0;
Eric Dumazet88ab1932008-11-16 19:39:21 -0800518 sk_nulls_for_each_rcu(sk, node, &hslot->head) {
Eric Dumazet645ca702008-10-29 01:41:45 -0700519 score = compute_score(sk, net, saddr, hnum, sport,
520 daddr, dport, dif);
521 if (score > badness) {
522 result = sk;
523 badness = score;
Tom Herbertba418fa2013-01-22 09:50:32 +0000524 reuseport = sk->sk_reuseport;
525 if (reuseport) {
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +0200526 hash = udp_ehashfn(net, daddr, hnum,
527 saddr, sport);
Tom Herbertba418fa2013-01-22 09:50:32 +0000528 matches = 1;
529 }
530 } else if (score == badness && reuseport) {
531 matches++;
532 if (((u64)hash * matches) >> 32 == 0)
533 result = sk;
534 hash = next_pseudo_random32(hash);
David S. Millerdb8dac22008-03-06 16:22:02 -0800535 }
536 }
Eric Dumazet88ab1932008-11-16 19:39:21 -0800537 /*
538 * if the nulls value we got at the end of this lookup is
539 * not the expected one, we must restart lookup.
540 * We probably met an item that was moved to another chain.
541 */
Eric Dumazet5051ebd2009-11-08 10:18:11 +0000542 if (get_nulls_value(node) != slot)
Eric Dumazet88ab1932008-11-16 19:39:21 -0800543 goto begin;
544
Eric Dumazet271b72c2008-10-29 02:11:14 -0700545 if (result) {
Eric Dumazetc31504d2010-11-15 19:58:26 +0000546 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
Eric Dumazet271b72c2008-10-29 02:11:14 -0700547 result = NULL;
548 else if (unlikely(compute_score(result, net, saddr, hnum, sport,
549 daddr, dport, dif) < badness)) {
550 sock_put(result);
551 goto begin;
552 }
553 }
554 rcu_read_unlock();
David S. Millerdb8dac22008-03-06 16:22:02 -0800555 return result;
556}
Pavel Emelyanovfce82332011-12-09 06:23:34 +0000557EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
David S. Millerdb8dac22008-03-06 16:22:02 -0800558
KOVACS Krisztian607c4aa2008-10-07 12:38:32 -0700559static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
560 __be16 sport, __be16 dport,
Eric Dumazet645ca702008-10-29 01:41:45 -0700561 struct udp_table *udptable)
KOVACS Krisztian607c4aa2008-10-07 12:38:32 -0700562{
563 const struct iphdr *iph = ip_hdr(skb);
564
Eric Dumazet8afdd992013-12-10 18:07:23 -0800565 return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
566 iph->daddr, dport, inet_iif(skb),
567 udptable);
KOVACS Krisztian607c4aa2008-10-07 12:38:32 -0700568}
569
KOVACS Krisztianbcd41302008-10-01 07:48:10 -0700570struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
571 __be32 daddr, __be16 dport, int dif)
572{
Eric Dumazet645ca702008-10-29 01:41:45 -0700573 return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
KOVACS Krisztianbcd41302008-10-01 07:48:10 -0700574}
575EXPORT_SYMBOL_GPL(udp4_lib_lookup);
576
Shawn Bohrer421b3882013-10-07 11:01:39 -0500577static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
578 __be16 loc_port, __be32 loc_addr,
579 __be16 rmt_port, __be32 rmt_addr,
580 int dif, unsigned short hnum)
581{
582 struct inet_sock *inet = inet_sk(sk);
583
584 if (!net_eq(sock_net(sk), net) ||
585 udp_sk(sk)->udp_port_hash != hnum ||
586 (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
587 (inet->inet_dport != rmt_port && inet->inet_dport) ||
588 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
589 ipv6_only_sock(sk) ||
590 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
591 return false;
592 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
593 return false;
594 return true;
595}
596
Eric Dumazet920a4612008-11-01 21:22:23 -0700597static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
David S. Millerdb8dac22008-03-06 16:22:02 -0800598 __be16 loc_port, __be32 loc_addr,
599 __be16 rmt_port, __be32 rmt_addr,
600 int dif)
601{
Eric Dumazet88ab1932008-11-16 19:39:21 -0800602 struct hlist_nulls_node *node;
David S. Millerdb8dac22008-03-06 16:22:02 -0800603 struct sock *s = sk;
604 unsigned short hnum = ntohs(loc_port);
605
Eric Dumazet88ab1932008-11-16 19:39:21 -0800606 sk_nulls_for_each_from(s, node) {
Shawn Bohrer421b3882013-10-07 11:01:39 -0500607 if (__udp_is_mcast_sock(net, s,
608 loc_port, loc_addr,
609 rmt_port, rmt_addr,
610 dif, hnum))
611 goto found;
David S. Millerdb8dac22008-03-06 16:22:02 -0800612 }
613 s = NULL;
614found:
615 return s;
616}
617
618/*
619 * This routine is called by the ICMP module when it gets some
620 * sort of error condition. If err < 0 then the socket should
621 * be closed and the error returned to the user. If err > 0
622 * it's just the icmp type << 8 | icmp code.
623 * Header points to the ip header of the error packet. We move
624 * on past this. Then (as it used to claim before adjustment)
625 * header points to the first 8 bytes of the udp header. We need
626 * to find the appropriate port.
627 */
628
Eric Dumazet645ca702008-10-29 01:41:45 -0700629void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
David S. Millerdb8dac22008-03-06 16:22:02 -0800630{
631 struct inet_sock *inet;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000632 const struct iphdr *iph = (const struct iphdr *)skb->data;
Eric Dumazetc482c562009-07-17 00:26:32 +0000633 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
David S. Millerdb8dac22008-03-06 16:22:02 -0800634 const int type = icmp_hdr(skb)->type;
635 const int code = icmp_hdr(skb)->code;
636 struct sock *sk;
637 int harderr;
638 int err;
Pavel Emelyanovfd54d712008-07-14 23:01:40 -0700639 struct net *net = dev_net(skb->dev);
David S. Millerdb8dac22008-03-06 16:22:02 -0800640
Pavel Emelyanovfd54d712008-07-14 23:01:40 -0700641 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
David S. Millerdb8dac22008-03-06 16:22:02 -0800642 iph->saddr, uh->source, skb->dev->ifindex, udptable);
643 if (sk == NULL) {
Pavel Emelyanovdcfc23c2008-07-14 23:03:00 -0700644 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
David S. Millerdb8dac22008-03-06 16:22:02 -0800645 return; /* No socket for error */
646 }
647
648 err = 0;
649 harderr = 0;
650 inet = inet_sk(sk);
651
652 switch (type) {
653 default:
654 case ICMP_TIME_EXCEEDED:
655 err = EHOSTUNREACH;
656 break;
657 case ICMP_SOURCE_QUENCH:
658 goto out;
659 case ICMP_PARAMETERPROB:
660 err = EPROTO;
661 harderr = 1;
662 break;
663 case ICMP_DEST_UNREACH:
664 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
David S. Miller36393392012-06-14 22:21:46 -0700665 ipv4_sk_update_pmtu(skb, sk, info);
David S. Millerdb8dac22008-03-06 16:22:02 -0800666 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
667 err = EMSGSIZE;
668 harderr = 1;
669 break;
670 }
671 goto out;
672 }
673 err = EHOSTUNREACH;
674 if (code <= NR_ICMP_UNREACH) {
675 harderr = icmp_err_convert[code].fatal;
676 err = icmp_err_convert[code].errno;
677 }
678 break;
David S. Miller55be7a92012-07-11 21:27:49 -0700679 case ICMP_REDIRECT:
680 ipv4_sk_redirect(skb, sk);
Duan Jiong1a462d12013-09-20 18:20:28 +0800681 goto out;
David S. Millerdb8dac22008-03-06 16:22:02 -0800682 }
683
684 /*
685 * RFC1122: OK. Passes ICMP errors back to application, as per
686 * 4.1.3.3.
687 */
688 if (!inet->recverr) {
689 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
690 goto out;
Eric Dumazetb1faf562010-05-31 23:44:05 -0700691 } else
Eric Dumazetc482c562009-07-17 00:26:32 +0000692 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
Eric Dumazetb1faf562010-05-31 23:44:05 -0700693
David S. Millerdb8dac22008-03-06 16:22:02 -0800694 sk->sk_err = err;
695 sk->sk_error_report(sk);
696out:
697 sock_put(sk);
698}
699
700void udp_err(struct sk_buff *skb, u32 info)
701{
Eric Dumazet645ca702008-10-29 01:41:45 -0700702 __udp4_lib_err(skb, info, &udp_table);
David S. Millerdb8dac22008-03-06 16:22:02 -0800703}
704
705/*
706 * Throw away all pending data and cancel the corking. Socket is locked.
707 */
Denis V. Lunev36d926b2008-06-04 15:49:07 +0400708void udp_flush_pending_frames(struct sock *sk)
David S. Millerdb8dac22008-03-06 16:22:02 -0800709{
710 struct udp_sock *up = udp_sk(sk);
711
712 if (up->pending) {
713 up->len = 0;
714 up->pending = 0;
715 ip_flush_pending_frames(sk);
716 }
717}
Denis V. Lunev36d926b2008-06-04 15:49:07 +0400718EXPORT_SYMBOL(udp_flush_pending_frames);
David S. Millerdb8dac22008-03-06 16:22:02 -0800719
720/**
Herbert Xuf6b96642011-03-01 02:36:48 +0000721 * udp4_hwcsum - handle outgoing HW checksumming
David S. Millerdb8dac22008-03-06 16:22:02 -0800722 * @skb: sk_buff containing the filled-in UDP header
723 * (checksum field must be zeroed out)
Herbert Xuf6b96642011-03-01 02:36:48 +0000724 * @src: source IP address
725 * @dst: destination IP address
David S. Millerdb8dac22008-03-06 16:22:02 -0800726 */
Thomas Grafc26bf4a2013-07-25 18:12:18 +0200727void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
David S. Millerdb8dac22008-03-06 16:22:02 -0800728{
David S. Millerdb8dac22008-03-06 16:22:02 -0800729 struct udphdr *uh = udp_hdr(skb);
Herbert Xuf6b96642011-03-01 02:36:48 +0000730 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
731 int offset = skb_transport_offset(skb);
732 int len = skb->len - offset;
733 int hlen = len;
David S. Millerdb8dac22008-03-06 16:22:02 -0800734 __wsum csum = 0;
735
Herbert Xuf6b96642011-03-01 02:36:48 +0000736 if (!frags) {
David S. Millerdb8dac22008-03-06 16:22:02 -0800737 /*
738 * Only one fragment on the socket.
739 */
740 skb->csum_start = skb_transport_header(skb) - skb->head;
741 skb->csum_offset = offsetof(struct udphdr, check);
Herbert Xuf6b96642011-03-01 02:36:48 +0000742 uh->check = ~csum_tcpudp_magic(src, dst, len,
743 IPPROTO_UDP, 0);
David S. Millerdb8dac22008-03-06 16:22:02 -0800744 } else {
745 /*
746 * HW-checksum won't work as there are two or more
747 * fragments on the socket so that all csums of sk_buffs
748 * should be together
749 */
Herbert Xuf6b96642011-03-01 02:36:48 +0000750 do {
751 csum = csum_add(csum, frags->csum);
752 hlen -= frags->len;
753 } while ((frags = frags->next));
David S. Millerdb8dac22008-03-06 16:22:02 -0800754
Herbert Xuf6b96642011-03-01 02:36:48 +0000755 csum = skb_checksum(skb, offset, hlen, csum);
David S. Millerdb8dac22008-03-06 16:22:02 -0800756 skb->ip_summed = CHECKSUM_NONE;
757
David S. Millerdb8dac22008-03-06 16:22:02 -0800758 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
759 if (uh->check == 0)
760 uh->check = CSUM_MANGLED_0;
761 }
762}
Thomas Grafc26bf4a2013-07-25 18:12:18 +0200763EXPORT_SYMBOL_GPL(udp4_hwcsum);
David S. Millerdb8dac22008-03-06 16:22:02 -0800764
David S. Miller79ab0532011-05-09 13:31:04 -0700765static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
Herbert Xuf6b96642011-03-01 02:36:48 +0000766{
767 struct sock *sk = skb->sk;
768 struct inet_sock *inet = inet_sk(sk);
769 struct udphdr *uh;
Herbert Xuf6b96642011-03-01 02:36:48 +0000770 int err = 0;
771 int is_udplite = IS_UDPLITE(sk);
772 int offset = skb_transport_offset(skb);
773 int len = skb->len - offset;
774 __wsum csum = 0;
775
776 /*
777 * Create a UDP header
778 */
779 uh = udp_hdr(skb);
780 uh->source = inet->inet_sport;
David S. Miller79ab0532011-05-09 13:31:04 -0700781 uh->dest = fl4->fl4_dport;
Herbert Xuf6b96642011-03-01 02:36:48 +0000782 uh->len = htons(len);
783 uh->check = 0;
784
785 if (is_udplite) /* UDP-Lite */
786 csum = udplite_csum(skb);
787
788 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
789
790 skb->ip_summed = CHECKSUM_NONE;
791 goto send;
792
793 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
794
David S. Miller79ab0532011-05-09 13:31:04 -0700795 udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
Herbert Xuf6b96642011-03-01 02:36:48 +0000796 goto send;
797
798 } else
799 csum = udp_csum(skb);
800
801 /* add protocol-dependent pseudo-header */
David S. Miller79ab0532011-05-09 13:31:04 -0700802 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
Herbert Xuf6b96642011-03-01 02:36:48 +0000803 sk->sk_protocol, csum);
804 if (uh->check == 0)
805 uh->check = CSUM_MANGLED_0;
806
807send:
Eric Dumazetb5ec8ee2012-08-10 02:22:47 +0000808 err = ip_send_skb(sock_net(sk), skb);
Herbert Xuf6b96642011-03-01 02:36:48 +0000809 if (err) {
810 if (err == -ENOBUFS && !inet->recverr) {
811 UDP_INC_STATS_USER(sock_net(sk),
812 UDP_MIB_SNDBUFERRORS, is_udplite);
813 err = 0;
814 }
815 } else
816 UDP_INC_STATS_USER(sock_net(sk),
817 UDP_MIB_OUTDATAGRAMS, is_udplite);
818 return err;
819}
820
David S. Millerdb8dac22008-03-06 16:22:02 -0800821/*
822 * Push out all pending data as one UDP datagram. Socket is locked.
823 */
Hannes Frederic Sowa8822b642013-07-01 20:21:30 +0200824int udp_push_pending_frames(struct sock *sk)
David S. Millerdb8dac22008-03-06 16:22:02 -0800825{
826 struct udp_sock *up = udp_sk(sk);
827 struct inet_sock *inet = inet_sk(sk);
David S. Millerb6f21b22011-03-12 02:09:18 -0500828 struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
David S. Millerdb8dac22008-03-06 16:22:02 -0800829 struct sk_buff *skb;
David S. Millerdb8dac22008-03-06 16:22:02 -0800830 int err = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -0800831
David S. Miller77968b72011-05-08 17:12:19 -0700832 skb = ip_finish_skb(sk, fl4);
Herbert Xuf6b96642011-03-01 02:36:48 +0000833 if (!skb)
David S. Millerdb8dac22008-03-06 16:22:02 -0800834 goto out;
835
David S. Miller79ab0532011-05-09 13:31:04 -0700836 err = udp_send_skb(skb, fl4);
David S. Millerdb8dac22008-03-06 16:22:02 -0800837
David S. Millerdb8dac22008-03-06 16:22:02 -0800838out:
839 up->len = 0;
840 up->pending = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -0800841 return err;
842}
Hannes Frederic Sowa8822b642013-07-01 20:21:30 +0200843EXPORT_SYMBOL(udp_push_pending_frames);
David S. Millerdb8dac22008-03-06 16:22:02 -0800844
845int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
846 size_t len)
847{
848 struct inet_sock *inet = inet_sk(sk);
849 struct udp_sock *up = udp_sk(sk);
David S. Millere4749952011-05-08 16:38:45 -0700850 struct flowi4 fl4_stack;
David S. Millerb6f21b22011-03-12 02:09:18 -0500851 struct flowi4 *fl4;
David S. Millerdb8dac22008-03-06 16:22:02 -0800852 int ulen = len;
853 struct ipcm_cookie ipc;
854 struct rtable *rt = NULL;
855 int free = 0;
856 int connected = 0;
857 __be32 daddr, faddr, saddr;
858 __be16 dport;
859 u8 tos;
860 int err, is_udplite = IS_UDPLITE(sk);
861 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
862 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
Herbert Xu903ab862011-03-01 02:36:48 +0000863 struct sk_buff *skb;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000864 struct ip_options_data opt_copy;
David S. Millerdb8dac22008-03-06 16:22:02 -0800865
866 if (len > 0xFFFF)
867 return -EMSGSIZE;
868
869 /*
870 * Check the flags.
871 */
872
Eric Dumazetc482c562009-07-17 00:26:32 +0000873 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
David S. Millerdb8dac22008-03-06 16:22:02 -0800874 return -EOPNOTSUPP;
875
876 ipc.opt = NULL;
Oliver Hartkopp2244d072010-08-17 08:59:14 +0000877 ipc.tx_flags = 0;
Francesco Fuscoaa661582013-09-24 15:43:09 +0200878 ipc.ttl = 0;
879 ipc.tos = -1;
David S. Millerdb8dac22008-03-06 16:22:02 -0800880
Herbert Xu903ab862011-03-01 02:36:48 +0000881 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
882
David S. Millerf5fca602011-05-08 17:24:10 -0700883 fl4 = &inet->cork.fl.u.ip4;
David S. Millerdb8dac22008-03-06 16:22:02 -0800884 if (up->pending) {
885 /*
886 * There are pending frames.
887 * The socket lock must be held while it's corked.
888 */
889 lock_sock(sk);
890 if (likely(up->pending)) {
891 if (unlikely(up->pending != AF_INET)) {
892 release_sock(sk);
893 return -EINVAL;
894 }
895 goto do_append_data;
896 }
897 release_sock(sk);
898 }
899 ulen += sizeof(struct udphdr);
900
901 /*
902 * Get and verify the address.
903 */
904 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +0100905 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
David S. Millerdb8dac22008-03-06 16:22:02 -0800906 if (msg->msg_namelen < sizeof(*usin))
907 return -EINVAL;
908 if (usin->sin_family != AF_INET) {
909 if (usin->sin_family != AF_UNSPEC)
910 return -EAFNOSUPPORT;
911 }
912
913 daddr = usin->sin_addr.s_addr;
914 dport = usin->sin_port;
915 if (dport == 0)
916 return -EINVAL;
917 } else {
918 if (sk->sk_state != TCP_ESTABLISHED)
919 return -EDESTADDRREQ;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000920 daddr = inet->inet_daddr;
921 dport = inet->inet_dport;
David S. Millerdb8dac22008-03-06 16:22:02 -0800922 /* Open fast path for connected socket.
923 Route will not be used, if at least one option is set.
924 */
925 connected = 1;
926 }
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000927 ipc.addr = inet->inet_saddr;
David S. Millerdb8dac22008-03-06 16:22:02 -0800928
929 ipc.oif = sk->sk_bound_dev_if;
Daniel Borkmannbf84a012013-04-14 08:08:13 +0000930
931 sock_tx_timestamp(sk, &ipc.tx_flags);
932
David S. Millerdb8dac22008-03-06 16:22:02 -0800933 if (msg->msg_controllen) {
Hannes Frederic Sowac8e6ad02014-02-18 21:38:08 +0100934 err = ip_cmsg_send(sock_net(sk), msg, &ipc,
935 sk->sk_family == AF_INET6);
David S. Millerdb8dac22008-03-06 16:22:02 -0800936 if (err)
937 return err;
938 if (ipc.opt)
939 free = 1;
940 connected = 0;
941 }
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000942 if (!ipc.opt) {
943 struct ip_options_rcu *inet_opt;
944
945 rcu_read_lock();
946 inet_opt = rcu_dereference(inet->inet_opt);
947 if (inet_opt) {
948 memcpy(&opt_copy, inet_opt,
949 sizeof(*inet_opt) + inet_opt->opt.optlen);
950 ipc.opt = &opt_copy.opt;
951 }
952 rcu_read_unlock();
953 }
David S. Millerdb8dac22008-03-06 16:22:02 -0800954
955 saddr = ipc.addr;
956 ipc.addr = faddr = daddr;
957
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000958 if (ipc.opt && ipc.opt->opt.srr) {
David S. Millerdb8dac22008-03-06 16:22:02 -0800959 if (!daddr)
960 return -EINVAL;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000961 faddr = ipc.opt->opt.faddr;
David S. Millerdb8dac22008-03-06 16:22:02 -0800962 connected = 0;
963 }
Francesco Fuscoaa661582013-09-24 15:43:09 +0200964 tos = get_rttos(&ipc, inet);
David S. Millerdb8dac22008-03-06 16:22:02 -0800965 if (sock_flag(sk, SOCK_LOCALROUTE) ||
966 (msg->msg_flags & MSG_DONTROUTE) ||
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000967 (ipc.opt && ipc.opt->opt.is_strictroute)) {
David S. Millerdb8dac22008-03-06 16:22:02 -0800968 tos |= RTO_ONLINK;
969 connected = 0;
970 }
971
972 if (ipv4_is_multicast(daddr)) {
973 if (!ipc.oif)
974 ipc.oif = inet->mc_index;
975 if (!saddr)
976 saddr = inet->mc_addr;
977 connected = 0;
Erich E. Hoover76e21052012-02-08 09:11:07 +0000978 } else if (!ipc.oif)
979 ipc.oif = inet->uc_index;
David S. Millerdb8dac22008-03-06 16:22:02 -0800980
981 if (connected)
Eric Dumazetc482c562009-07-17 00:26:32 +0000982 rt = (struct rtable *)sk_dst_check(sk, 0);
David S. Millerdb8dac22008-03-06 16:22:02 -0800983
984 if (rt == NULL) {
Pavel Emelyanov84a3aa02008-07-16 20:19:08 -0700985 struct net *net = sock_net(sk);
986
David S. Millere4749952011-05-08 16:38:45 -0700987 fl4 = &fl4_stack;
988 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
David S. Millerc0951cb2011-03-31 04:54:27 -0700989 RT_SCOPE_UNIVERSE, sk->sk_protocol,
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200990 inet_sk_flowi_flags(sk),
David S. Millerc0951cb2011-03-31 04:54:27 -0700991 faddr, saddr, dport, inet->inet_sport);
992
David S. Millere4749952011-05-08 16:38:45 -0700993 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
994 rt = ip_route_output_flow(net, fl4, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800995 if (IS_ERR(rt)) {
996 err = PTR_ERR(rt);
David S. Miller06dc94b2011-03-03 10:38:01 -0800997 rt = NULL;
David S. Millerdb8dac22008-03-06 16:22:02 -0800998 if (err == -ENETUNREACH)
Eric Dumazetf1d8cba2013-11-28 09:51:22 -0800999 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
David S. Millerdb8dac22008-03-06 16:22:02 -08001000 goto out;
1001 }
1002
1003 err = -EACCES;
1004 if ((rt->rt_flags & RTCF_BROADCAST) &&
1005 !sock_flag(sk, SOCK_BROADCAST))
1006 goto out;
1007 if (connected)
Changli Gaod8d1f302010-06-10 23:31:35 -07001008 sk_dst_set(sk, dst_clone(&rt->dst));
David S. Millerdb8dac22008-03-06 16:22:02 -08001009 }
1010
1011 if (msg->msg_flags&MSG_CONFIRM)
1012 goto do_confirm;
1013back_from_confirm:
1014
David S. Millere4749952011-05-08 16:38:45 -07001015 saddr = fl4->saddr;
David S. Millerdb8dac22008-03-06 16:22:02 -08001016 if (!ipc.addr)
David S. Millere4749952011-05-08 16:38:45 -07001017 daddr = ipc.addr = fl4->daddr;
David S. Millerdb8dac22008-03-06 16:22:02 -08001018
Herbert Xu903ab862011-03-01 02:36:48 +00001019 /* Lockless fast path for the non-corking case. */
1020 if (!corkreq) {
David S. Miller77968b72011-05-08 17:12:19 -07001021 skb = ip_make_skb(sk, fl4, getfrag, msg->msg_iov, ulen,
Herbert Xu903ab862011-03-01 02:36:48 +00001022 sizeof(struct udphdr), &ipc, &rt,
1023 msg->msg_flags);
1024 err = PTR_ERR(skb);
YOSHIFUJI Hideaki / 吉藤英明50c3a482013-01-22 06:32:49 +00001025 if (!IS_ERR_OR_NULL(skb))
David S. Miller79ab0532011-05-09 13:31:04 -07001026 err = udp_send_skb(skb, fl4);
Herbert Xu903ab862011-03-01 02:36:48 +00001027 goto out;
1028 }
1029
David S. Millerdb8dac22008-03-06 16:22:02 -08001030 lock_sock(sk);
1031 if (unlikely(up->pending)) {
1032 /* The socket is already corked while preparing it. */
1033 /* ... which is an evident application bug. --ANK */
1034 release_sock(sk);
1035
Joe Perchesafd465032012-03-12 07:03:32 +00001036 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("cork app bug 2\n"));
David S. Millerdb8dac22008-03-06 16:22:02 -08001037 err = -EINVAL;
1038 goto out;
1039 }
1040 /*
1041 * Now cork the socket to pend data.
1042 */
David S. Millerb6f21b22011-03-12 02:09:18 -05001043 fl4 = &inet->cork.fl.u.ip4;
1044 fl4->daddr = daddr;
1045 fl4->saddr = saddr;
David S. Miller9cce96d2011-03-12 03:00:33 -05001046 fl4->fl4_dport = dport;
1047 fl4->fl4_sport = inet->inet_sport;
David S. Millerdb8dac22008-03-06 16:22:02 -08001048 up->pending = AF_INET;
1049
1050do_append_data:
1051 up->len += ulen;
David S. Millerf5fca602011-05-08 17:24:10 -07001052 err = ip_append_data(sk, fl4, getfrag, msg->msg_iov, ulen,
1053 sizeof(struct udphdr), &ipc, &rt,
1054 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
David S. Millerdb8dac22008-03-06 16:22:02 -08001055 if (err)
1056 udp_flush_pending_frames(sk);
1057 else if (!corkreq)
1058 err = udp_push_pending_frames(sk);
1059 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1060 up->pending = 0;
1061 release_sock(sk);
1062
1063out:
1064 ip_rt_put(rt);
1065 if (free)
1066 kfree(ipc.opt);
1067 if (!err)
1068 return len;
1069 /*
1070 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1071 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1072 * we don't have a good statistic (IpOutDiscards but it can be too many
1073 * things). We could add another new stat but at least for now that
1074 * seems like overkill.
1075 */
1076 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
Pavel Emelyanov629ca232008-07-05 21:18:07 -07001077 UDP_INC_STATS_USER(sock_net(sk),
1078 UDP_MIB_SNDBUFERRORS, is_udplite);
David S. Millerdb8dac22008-03-06 16:22:02 -08001079 }
1080 return err;
1081
1082do_confirm:
Changli Gaod8d1f302010-06-10 23:31:35 -07001083 dst_confirm(&rt->dst);
David S. Millerdb8dac22008-03-06 16:22:02 -08001084 if (!(msg->msg_flags&MSG_PROBE) || len)
1085 goto back_from_confirm;
1086 err = 0;
1087 goto out;
1088}
Eric Dumazetc482c562009-07-17 00:26:32 +00001089EXPORT_SYMBOL(udp_sendmsg);
David S. Millerdb8dac22008-03-06 16:22:02 -08001090
1091int udp_sendpage(struct sock *sk, struct page *page, int offset,
1092 size_t size, int flags)
1093{
David S. Millerf5fca602011-05-08 17:24:10 -07001094 struct inet_sock *inet = inet_sk(sk);
David S. Millerdb8dac22008-03-06 16:22:02 -08001095 struct udp_sock *up = udp_sk(sk);
1096 int ret;
1097
Shawn Landdend3f7d562013-11-24 22:36:28 -08001098 if (flags & MSG_SENDPAGE_NOTLAST)
1099 flags |= MSG_MORE;
1100
David S. Millerdb8dac22008-03-06 16:22:02 -08001101 if (!up->pending) {
1102 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
1103
1104 /* Call udp_sendmsg to specify destination address which
1105 * sendpage interface can't pass.
1106 * This will succeed only when the socket is connected.
1107 */
1108 ret = udp_sendmsg(NULL, sk, &msg, 0);
1109 if (ret < 0)
1110 return ret;
1111 }
1112
1113 lock_sock(sk);
1114
1115 if (unlikely(!up->pending)) {
1116 release_sock(sk);
1117
Joe Perchesafd465032012-03-12 07:03:32 +00001118 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("udp cork app bug 3\n"));
David S. Millerdb8dac22008-03-06 16:22:02 -08001119 return -EINVAL;
1120 }
1121
David S. Millerf5fca602011-05-08 17:24:10 -07001122 ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
1123 page, offset, size, flags);
David S. Millerdb8dac22008-03-06 16:22:02 -08001124 if (ret == -EOPNOTSUPP) {
1125 release_sock(sk);
1126 return sock_no_sendpage(sk->sk_socket, page, offset,
1127 size, flags);
1128 }
1129 if (ret < 0) {
1130 udp_flush_pending_frames(sk);
1131 goto out;
1132 }
1133
1134 up->len += size;
1135 if (!(up->corkflag || (flags&MSG_MORE)))
1136 ret = udp_push_pending_frames(sk);
1137 if (!ret)
1138 ret = size;
1139out:
1140 release_sock(sk);
1141 return ret;
1142}
1143
Eric Dumazet85584672009-10-09 04:43:40 +00001144
1145/**
1146 * first_packet_length - return length of first packet in receive queue
1147 * @sk: socket
1148 *
1149 * Drops all bad checksum frames, until a valid one is found.
1150 * Returns the length of found skb, or 0 if none is found.
1151 */
1152static unsigned int first_packet_length(struct sock *sk)
1153{
1154 struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
1155 struct sk_buff *skb;
1156 unsigned int res;
1157
1158 __skb_queue_head_init(&list_kill);
1159
1160 spin_lock_bh(&rcvq->lock);
1161 while ((skb = skb_peek(rcvq)) != NULL &&
1162 udp_lib_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001163 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS,
1164 IS_UDPLITE(sk));
Eric Dumazet85584672009-10-09 04:43:40 +00001165 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1166 IS_UDPLITE(sk));
Eric Dumazet8edf19c2009-10-15 00:12:40 +00001167 atomic_inc(&sk->sk_drops);
Eric Dumazet85584672009-10-09 04:43:40 +00001168 __skb_unlink(skb, rcvq);
1169 __skb_queue_tail(&list_kill, skb);
1170 }
1171 res = skb ? skb->len : 0;
1172 spin_unlock_bh(&rcvq->lock);
1173
1174 if (!skb_queue_empty(&list_kill)) {
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001175 bool slow = lock_sock_fast(sk);
1176
Eric Dumazet85584672009-10-09 04:43:40 +00001177 __skb_queue_purge(&list_kill);
1178 sk_mem_reclaim_partial(sk);
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001179 unlock_sock_fast(sk, slow);
Eric Dumazet85584672009-10-09 04:43:40 +00001180 }
1181 return res;
1182}
1183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184/*
1185 * IOCTL requests applicable to the UDP protocol
1186 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001187
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
1189{
Stephen Hemminger6516c652007-03-08 20:41:55 -08001190 switch (cmd) {
1191 case SIOCOUTQ:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 {
Eric Dumazet31e6d362009-06-17 19:05:41 -07001193 int amount = sk_wmem_alloc_get(sk);
1194
Stephen Hemminger6516c652007-03-08 20:41:55 -08001195 return put_user(amount, (int __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 }
Stephen Hemminger6516c652007-03-08 20:41:55 -08001197
1198 case SIOCINQ:
1199 {
Eric Dumazet85584672009-10-09 04:43:40 +00001200 unsigned int amount = first_packet_length(sk);
Stephen Hemminger6516c652007-03-08 20:41:55 -08001201
Eric Dumazet85584672009-10-09 04:43:40 +00001202 if (amount)
Stephen Hemminger6516c652007-03-08 20:41:55 -08001203 /*
1204 * We will only return the amount
1205 * of this packet since that is all
1206 * that will be read.
1207 */
Eric Dumazet85584672009-10-09 04:43:40 +00001208 amount -= sizeof(struct udphdr);
1209
Stephen Hemminger6516c652007-03-08 20:41:55 -08001210 return put_user(amount, (int __user *)arg);
1211 }
1212
1213 default:
1214 return -ENOIOCTLCMD;
1215 }
1216
1217 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218}
Eric Dumazetc482c562009-07-17 00:26:32 +00001219EXPORT_SYMBOL(udp_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220
David S. Millerdb8dac22008-03-06 16:22:02 -08001221/*
1222 * This should be easy, if there is something there we
1223 * return it, otherwise we block.
1224 */
1225
1226int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1227 size_t len, int noblock, int flags, int *addr_len)
1228{
1229 struct inet_sock *inet = inet_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01001230 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
David S. Millerdb8dac22008-03-06 16:22:02 -08001231 struct sk_buff *skb;
David S. Miller59c2cda2011-12-01 14:12:55 -05001232 unsigned int ulen, copied;
Pavel Emelyanov3f518bf2012-02-21 07:30:58 +00001233 int peeked, off = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -08001234 int err;
1235 int is_udplite = IS_UDPLITE(sk);
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001236 bool slow;
David S. Millerdb8dac22008-03-06 16:22:02 -08001237
David S. Millerdb8dac22008-03-06 16:22:02 -08001238 if (flags & MSG_ERRQUEUE)
Hannes Frederic Sowa85fbaa72013-11-23 00:46:12 +01001239 return ip_recv_error(sk, msg, len, addr_len);
David S. Millerdb8dac22008-03-06 16:22:02 -08001240
1241try_again:
1242 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
Pavel Emelyanov3f518bf2012-02-21 07:30:58 +00001243 &peeked, &off, &err);
David S. Millerdb8dac22008-03-06 16:22:02 -08001244 if (!skb)
1245 goto out;
1246
1247 ulen = skb->len - sizeof(struct udphdr);
David S. Miller59c2cda2011-12-01 14:12:55 -05001248 copied = len;
1249 if (copied > ulen)
1250 copied = ulen;
1251 else if (copied < ulen)
David S. Millerdb8dac22008-03-06 16:22:02 -08001252 msg->msg_flags |= MSG_TRUNC;
1253
1254 /*
1255 * If checksum is needed at all, try to do it while copying the
1256 * data. If the data is truncated, or if we only want a partial
1257 * coverage checksum (UDP-Lite), do it before the copy.
1258 */
1259
David S. Miller59c2cda2011-12-01 14:12:55 -05001260 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
David S. Millerdb8dac22008-03-06 16:22:02 -08001261 if (udp_lib_checksum_complete(skb))
1262 goto csum_copy_err;
1263 }
1264
1265 if (skb_csum_unnecessary(skb))
1266 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
David S. Miller59c2cda2011-12-01 14:12:55 -05001267 msg->msg_iov, copied);
David S. Millerdb8dac22008-03-06 16:22:02 -08001268 else {
Eric Dumazetc482c562009-07-17 00:26:32 +00001269 err = skb_copy_and_csum_datagram_iovec(skb,
1270 sizeof(struct udphdr),
1271 msg->msg_iov);
David S. Millerdb8dac22008-03-06 16:22:02 -08001272
1273 if (err == -EINVAL)
1274 goto csum_copy_err;
1275 }
1276
Eric Dumazet22911fc2012-06-27 00:23:44 +00001277 if (unlikely(err)) {
1278 trace_kfree_skb(skb, udp_recvmsg);
Eric Dumazet979402b2012-09-05 23:34:44 +00001279 if (!peeked) {
1280 atomic_inc(&sk->sk_drops);
1281 UDP_INC_STATS_USER(sock_net(sk),
1282 UDP_MIB_INERRORS, is_udplite);
1283 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001284 goto out_free;
Eric Dumazet22911fc2012-06-27 00:23:44 +00001285 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001286
1287 if (!peeked)
Pavel Emelyanov629ca232008-07-05 21:18:07 -07001288 UDP_INC_STATS_USER(sock_net(sk),
1289 UDP_MIB_INDATAGRAMS, is_udplite);
David S. Millerdb8dac22008-03-06 16:22:02 -08001290
Neil Horman3b885782009-10-12 13:26:31 -07001291 sock_recv_ts_and_drops(msg, sk, skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001292
1293 /* Copy the address. */
Eric Dumazetc482c562009-07-17 00:26:32 +00001294 if (sin) {
David S. Millerdb8dac22008-03-06 16:22:02 -08001295 sin->sin_family = AF_INET;
1296 sin->sin_port = udp_hdr(skb)->source;
1297 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
1298 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
Hannes Frederic Sowabceaa902013-11-18 04:20:45 +01001299 *addr_len = sizeof(*sin);
David S. Millerdb8dac22008-03-06 16:22:02 -08001300 }
1301 if (inet->cmsg_flags)
1302 ip_cmsg_recv(msg, skb);
1303
David S. Miller59c2cda2011-12-01 14:12:55 -05001304 err = copied;
David S. Millerdb8dac22008-03-06 16:22:02 -08001305 if (flags & MSG_TRUNC)
1306 err = ulen;
1307
1308out_free:
Eric Dumazet9d410c72009-10-30 05:03:53 +00001309 skb_free_datagram_locked(sk, skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001310out:
1311 return err;
1312
1313csum_copy_err:
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001314 slow = lock_sock_fast(sk);
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001315 if (!skb_kill_datagram(sk, skb, flags)) {
1316 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
Pavel Emelyanov629ca232008-07-05 21:18:07 -07001317 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001318 }
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001319 unlock_sock_fast(sk, slow);
David S. Millerdb8dac22008-03-06 16:22:02 -08001320
1321 if (noblock)
1322 return -EAGAIN;
Xufeng Zhang9cfaa8d2011-06-21 10:43:40 +00001323
1324 /* starting over for a new packet */
1325 msg->msg_flags &= ~MSG_TRUNC;
David S. Millerdb8dac22008-03-06 16:22:02 -08001326 goto try_again;
1327}
1328
1329
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330int udp_disconnect(struct sock *sk, int flags)
1331{
1332 struct inet_sock *inet = inet_sk(sk);
1333 /*
1334 * 1003.1g - break association.
1335 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001336
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 sk->sk_state = TCP_CLOSE;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001338 inet->inet_daddr = 0;
1339 inet->inet_dport = 0;
Tom Herbertbdeab992011-08-14 19:45:55 +00001340 sock_rps_reset_rxhash(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 sk->sk_bound_dev_if = 0;
1342 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1343 inet_reset_saddr(sk);
1344
1345 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
1346 sk->sk_prot->unhash(sk);
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001347 inet->inet_sport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 }
1349 sk_dst_reset(sk);
1350 return 0;
1351}
Eric Dumazetc482c562009-07-17 00:26:32 +00001352EXPORT_SYMBOL(udp_disconnect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
Eric Dumazet645ca702008-10-29 01:41:45 -07001354void udp_lib_unhash(struct sock *sk)
1355{
Eric Dumazet723b4612008-11-25 13:55:15 -08001356 if (sk_hashed(sk)) {
1357 struct udp_table *udptable = sk->sk_prot->h.udp_table;
Eric Dumazet512615b2009-11-08 10:17:58 +00001358 struct udp_hslot *hslot, *hslot2;
1359
1360 hslot = udp_hashslot(udptable, sock_net(sk),
1361 udp_sk(sk)->udp_port_hash);
1362 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
Eric Dumazet645ca702008-10-29 01:41:45 -07001363
Eric Dumazet723b4612008-11-25 13:55:15 -08001364 spin_lock_bh(&hslot->lock);
1365 if (sk_nulls_del_node_init_rcu(sk)) {
Eric Dumazetfdcc8aa92009-11-08 10:17:05 +00001366 hslot->count--;
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001367 inet_sk(sk)->inet_num = 0;
Eric Dumazet723b4612008-11-25 13:55:15 -08001368 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
Eric Dumazet512615b2009-11-08 10:17:58 +00001369
1370 spin_lock(&hslot2->lock);
1371 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1372 hslot2->count--;
1373 spin_unlock(&hslot2->lock);
Eric Dumazet723b4612008-11-25 13:55:15 -08001374 }
1375 spin_unlock_bh(&hslot->lock);
Eric Dumazet645ca702008-10-29 01:41:45 -07001376 }
Eric Dumazet645ca702008-10-29 01:41:45 -07001377}
1378EXPORT_SYMBOL(udp_lib_unhash);
1379
Eric Dumazet719f8352010-09-08 05:08:44 +00001380/*
1381 * inet_rcv_saddr was changed, we must rehash secondary hash
1382 */
1383void udp_lib_rehash(struct sock *sk, u16 newhash)
1384{
1385 if (sk_hashed(sk)) {
1386 struct udp_table *udptable = sk->sk_prot->h.udp_table;
1387 struct udp_hslot *hslot, *hslot2, *nhslot2;
1388
1389 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1390 nhslot2 = udp_hashslot2(udptable, newhash);
1391 udp_sk(sk)->udp_portaddr_hash = newhash;
1392 if (hslot2 != nhslot2) {
1393 hslot = udp_hashslot(udptable, sock_net(sk),
1394 udp_sk(sk)->udp_port_hash);
1395 /* we must lock primary chain too */
1396 spin_lock_bh(&hslot->lock);
1397
1398 spin_lock(&hslot2->lock);
1399 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1400 hslot2->count--;
1401 spin_unlock(&hslot2->lock);
1402
1403 spin_lock(&nhslot2->lock);
1404 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
1405 &nhslot2->head);
1406 nhslot2->count++;
1407 spin_unlock(&nhslot2->lock);
1408
1409 spin_unlock_bh(&hslot->lock);
1410 }
1411 }
1412}
1413EXPORT_SYMBOL(udp_lib_rehash);
1414
1415static void udp_v4_rehash(struct sock *sk)
1416{
1417 u16 new_hash = udp4_portaddr_hash(sock_net(sk),
1418 inet_sk(sk)->inet_rcv_saddr,
1419 inet_sk(sk)->inet_num);
1420 udp_lib_rehash(sk, new_hash);
1421}
1422
Herbert Xu93821772008-09-15 11:48:46 -07001423static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1424{
Tom Herbertfec5e652010-04-16 16:01:27 -07001425 int rc;
Herbert Xu93821772008-09-15 11:48:46 -07001426
Shawn Bohrer005ec972013-10-07 11:01:38 -05001427 if (inet_sk(sk)->inet_daddr) {
Tom Herbertbdeab992011-08-14 19:45:55 +00001428 sock_rps_save_rxhash(sk, skb);
Shawn Bohrer005ec972013-10-07 11:01:38 -05001429 sk_mark_napi_id(sk, skb);
1430 }
Tom Herbertfec5e652010-04-16 16:01:27 -07001431
Eric Dumazetd826eb12011-11-09 07:24:35 +00001432 rc = sock_queue_rcv_skb(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -07001433 if (rc < 0) {
1434 int is_udplite = IS_UDPLITE(sk);
1435
Herbert Xu93821772008-09-15 11:48:46 -07001436 /* Note that an ENOMEM error is charged twice */
Eric Dumazet766e90372009-10-14 20:40:11 -07001437 if (rc == -ENOMEM)
Herbert Xu93821772008-09-15 11:48:46 -07001438 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1439 is_udplite);
Eric Dumazet766e90372009-10-14 20:40:11 -07001440 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1441 kfree_skb(skb);
Satoru Moriya296f7ea2011-06-17 11:58:39 +00001442 trace_udp_fail_queue_rcv_skb(rc, sk);
Eric Dumazet766e90372009-10-14 20:40:11 -07001443 return -1;
Herbert Xu93821772008-09-15 11:48:46 -07001444 }
1445
1446 return 0;
1447
Herbert Xu93821772008-09-15 11:48:46 -07001448}
1449
Eric Dumazet447167b2012-04-11 23:05:28 +00001450static struct static_key udp_encap_needed __read_mostly;
1451void udp_encap_enable(void)
1452{
1453 if (!static_key_enabled(&udp_encap_needed))
1454 static_key_slow_inc(&udp_encap_needed);
1455}
1456EXPORT_SYMBOL(udp_encap_enable);
1457
David S. Millerdb8dac22008-03-06 16:22:02 -08001458/* returns:
1459 * -1: error
1460 * 0: success
1461 * >0: "udp encap" protocol resubmission
1462 *
1463 * Note that in the success and error cases, the skb is assumed to
1464 * have either been requeued or freed.
1465 */
Eric Dumazetc482c562009-07-17 00:26:32 +00001466int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
David S. Millerdb8dac22008-03-06 16:22:02 -08001467{
1468 struct udp_sock *up = udp_sk(sk);
1469 int rc;
1470 int is_udplite = IS_UDPLITE(sk);
1471
1472 /*
1473 * Charge it to the socket, dropping if the queue is full.
1474 */
1475 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1476 goto drop;
1477 nf_reset(skb);
1478
Eric Dumazet447167b2012-04-11 23:05:28 +00001479 if (static_key_false(&udp_encap_needed) && up->encap_type) {
Eric Dumazet0ad92ad2011-11-01 12:56:59 +00001480 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
1481
David S. Millerdb8dac22008-03-06 16:22:02 -08001482 /*
1483 * This is an encapsulation socket so pass the skb to
1484 * the socket's udp_encap_rcv() hook. Otherwise, just
1485 * fall through and pass this up the UDP socket.
1486 * up->encap_rcv() returns the following value:
1487 * =0 if skb was successfully passed to the encap
1488 * handler or was discarded by it.
1489 * >0 if skb should be passed on to UDP.
1490 * <0 if skb should be resubmitted as proto -N
1491 */
1492
1493 /* if we're overly short, let UDP handle it */
Eric Dumazet0ad92ad2011-11-01 12:56:59 +00001494 encap_rcv = ACCESS_ONCE(up->encap_rcv);
1495 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
David S. Millerdb8dac22008-03-06 16:22:02 -08001496 int ret;
1497
Eric Dumazet0ad92ad2011-11-01 12:56:59 +00001498 ret = encap_rcv(sk, skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001499 if (ret <= 0) {
Pavel Emelyanov02833282008-07-05 21:18:48 -07001500 UDP_INC_STATS_BH(sock_net(sk),
1501 UDP_MIB_INDATAGRAMS,
David S. Millerdb8dac22008-03-06 16:22:02 -08001502 is_udplite);
1503 return -ret;
1504 }
1505 }
1506
1507 /* FALLTHROUGH -- it's a UDP Packet */
1508 }
1509
1510 /*
1511 * UDP-Lite specific tests, ignored on UDP sockets
1512 */
1513 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
1514
1515 /*
1516 * MIB statistics other than incrementing the error count are
1517 * disabled for the following two types of errors: these depend
1518 * on the application settings, not on the functioning of the
1519 * protocol stack as such.
1520 *
1521 * RFC 3828 here recommends (sec 3.3): "There should also be a
1522 * way ... to ... at least let the receiving application block
1523 * delivery of packets with coverage values less than a value
1524 * provided by the application."
1525 */
1526 if (up->pcrlen == 0) { /* full coverage was set */
Joe Perchesafd465032012-03-12 07:03:32 +00001527 LIMIT_NETDEBUG(KERN_WARNING "UDPLite: partial coverage %d while full coverage %d requested\n",
1528 UDP_SKB_CB(skb)->cscov, skb->len);
David S. Millerdb8dac22008-03-06 16:22:02 -08001529 goto drop;
1530 }
1531 /* The next case involves violating the min. coverage requested
1532 * by the receiver. This is subtle: if receiver wants x and x is
1533 * greater than the buffersize/MTU then receiver will complain
1534 * that it wants x while sender emits packets of smaller size y.
1535 * Therefore the above ...()->partial_cov statement is essential.
1536 */
1537 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
Joe Perchesafd465032012-03-12 07:03:32 +00001538 LIMIT_NETDEBUG(KERN_WARNING "UDPLite: coverage %d too small, need min %d\n",
1539 UDP_SKB_CB(skb)->cscov, up->pcrlen);
David S. Millerdb8dac22008-03-06 16:22:02 -08001540 goto drop;
1541 }
1542 }
1543
Eric Dumazet33d480c2011-08-11 19:30:52 +00001544 if (rcu_access_pointer(sk->sk_filter) &&
1545 udp_lib_checksum_complete(skb))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001546 goto csum_error;
David S. Millerdb8dac22008-03-06 16:22:02 -08001547
Eric Dumazetc3774112010-04-27 15:13:20 -07001548
Eric Dumazetf545a382012-04-22 23:34:26 +00001549 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
Eric Dumazetc3774112010-04-27 15:13:20 -07001550 goto drop;
1551
Herbert Xu93821772008-09-15 11:48:46 -07001552 rc = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -08001553
Shawn Bohrerfbf88662013-10-07 11:01:40 -05001554 ipv4_pktinfo_prepare(sk, skb);
Herbert Xu93821772008-09-15 11:48:46 -07001555 bh_lock_sock(sk);
1556 if (!sock_owned_by_user(sk))
1557 rc = __udp_queue_rcv_skb(sk, skb);
Eric Dumazetf545a382012-04-22 23:34:26 +00001558 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi55349792010-03-04 18:01:42 +00001559 bh_unlock_sock(sk);
1560 goto drop;
1561 }
Herbert Xu93821772008-09-15 11:48:46 -07001562 bh_unlock_sock(sk);
1563
1564 return rc;
David S. Millerdb8dac22008-03-06 16:22:02 -08001565
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001566csum_error:
1567 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
David S. Millerdb8dac22008-03-06 16:22:02 -08001568drop:
Pavel Emelyanov02833282008-07-05 21:18:48 -07001569 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
Eric Dumazet8edf19c2009-10-15 00:12:40 +00001570 atomic_inc(&sk->sk_drops);
David S. Millerdb8dac22008-03-06 16:22:02 -08001571 kfree_skb(skb);
1572 return -1;
1573}
1574
Eric Dumazet1240d132009-11-08 10:18:44 +00001575
1576static void flush_stack(struct sock **stack, unsigned int count,
1577 struct sk_buff *skb, unsigned int final)
1578{
1579 unsigned int i;
1580 struct sk_buff *skb1 = NULL;
Eric Dumazetf6b8f322009-11-08 10:20:19 +00001581 struct sock *sk;
Eric Dumazet1240d132009-11-08 10:18:44 +00001582
1583 for (i = 0; i < count; i++) {
Eric Dumazetf6b8f322009-11-08 10:20:19 +00001584 sk = stack[i];
Eric Dumazet1240d132009-11-08 10:18:44 +00001585 if (likely(skb1 == NULL))
1586 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
1587
Eric Dumazetf6b8f322009-11-08 10:20:19 +00001588 if (!skb1) {
1589 atomic_inc(&sk->sk_drops);
1590 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1591 IS_UDPLITE(sk));
1592 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1593 IS_UDPLITE(sk));
1594 }
1595
1596 if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
Eric Dumazet1240d132009-11-08 10:18:44 +00001597 skb1 = NULL;
1598 }
1599 if (unlikely(skb1))
1600 kfree_skb(skb1);
1601}
1602
Eric Dumazet97502232013-12-11 14:46:51 -08001603/* For TCP sockets, sk_rx_dst is protected by socket lock
Eric Dumazete47eb5d2013-12-15 10:53:46 -08001604 * For UDP, we use xchg() to guard against concurrent changes.
Eric Dumazet97502232013-12-11 14:46:51 -08001605 */
1606static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
Shawn Bohrer421b3882013-10-07 11:01:39 -05001607{
Eric Dumazet97502232013-12-11 14:46:51 -08001608 struct dst_entry *old;
Shawn Bohrer421b3882013-10-07 11:01:39 -05001609
Eric Dumazete47eb5d2013-12-15 10:53:46 -08001610 dst_hold(dst);
1611 old = xchg(&sk->sk_rx_dst, dst);
1612 dst_release(old);
Shawn Bohrer421b3882013-10-07 11:01:39 -05001613}
1614
David S. Millerdb8dac22008-03-06 16:22:02 -08001615/*
1616 * Multicasts and broadcasts go to each listener.
1617 *
Eric Dumazet1240d132009-11-08 10:18:44 +00001618 * Note: called only from the BH handler context.
David S. Millerdb8dac22008-03-06 16:22:02 -08001619 */
Pavel Emelyanove3163492008-06-16 17:12:11 -07001620static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
David S. Millerdb8dac22008-03-06 16:22:02 -08001621 struct udphdr *uh,
1622 __be32 saddr, __be32 daddr,
Eric Dumazet645ca702008-10-29 01:41:45 -07001623 struct udp_table *udptable)
David S. Millerdb8dac22008-03-06 16:22:02 -08001624{
Eric Dumazet1240d132009-11-08 10:18:44 +00001625 struct sock *sk, *stack[256 / sizeof(struct sock *)];
Eric Dumazetf86dcc52009-10-07 00:37:59 +00001626 struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
David S. Millerdb8dac22008-03-06 16:22:02 -08001627 int dif;
Eric Dumazet1240d132009-11-08 10:18:44 +00001628 unsigned int i, count = 0;
David S. Millerdb8dac22008-03-06 16:22:02 -08001629
Eric Dumazet645ca702008-10-29 01:41:45 -07001630 spin_lock(&hslot->lock);
Eric Dumazet88ab1932008-11-16 19:39:21 -08001631 sk = sk_nulls_head(&hslot->head);
David S. Millerdb8dac22008-03-06 16:22:02 -08001632 dif = skb->dev->ifindex;
Eric Dumazet920a4612008-11-01 21:22:23 -07001633 sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
Eric Dumazet1240d132009-11-08 10:18:44 +00001634 while (sk) {
1635 stack[count++] = sk;
1636 sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
1637 daddr, uh->source, saddr, dif);
1638 if (unlikely(count == ARRAY_SIZE(stack))) {
1639 if (!sk)
1640 break;
1641 flush_stack(stack, count, skb, ~0);
1642 count = 0;
1643 }
1644 }
1645 /*
1646 * before releasing chain lock, we must take a reference on sockets
1647 */
1648 for (i = 0; i < count; i++)
1649 sock_hold(stack[i]);
David S. Millerdb8dac22008-03-06 16:22:02 -08001650
Eric Dumazet645ca702008-10-29 01:41:45 -07001651 spin_unlock(&hslot->lock);
Eric Dumazet1240d132009-11-08 10:18:44 +00001652
1653 /*
1654 * do the slow work with no lock held
1655 */
1656 if (count) {
1657 flush_stack(stack, count, skb, count - 1);
1658
1659 for (i = 0; i < count; i++)
1660 sock_put(stack[i]);
1661 } else {
1662 kfree_skb(skb);
1663 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001664 return 0;
1665}
1666
1667/* Initialize UDP checksum. If exited with zero value (success),
1668 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1669 * Otherwise, csum completion requires chacksumming packet body,
1670 * including udp header and folding it to skb->csum.
1671 */
1672static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1673 int proto)
1674{
David S. Millerdb8dac22008-03-06 16:22:02 -08001675 int err;
1676
1677 UDP_SKB_CB(skb)->partial_cov = 0;
1678 UDP_SKB_CB(skb)->cscov = skb->len;
1679
1680 if (proto == IPPROTO_UDPLITE) {
1681 err = udplite_checksum_init(skb, uh);
1682 if (err)
1683 return err;
1684 }
1685
Tom Herberted70fcf2014-05-02 16:29:38 -07001686 return skb_checksum_init_zero_check(skb, proto, uh->check,
1687 inet_compute_pseudo);
David S. Millerdb8dac22008-03-06 16:22:02 -08001688}
1689
1690/*
1691 * All we need to do is get the socket, and then do a checksum.
1692 */
1693
Eric Dumazet645ca702008-10-29 01:41:45 -07001694int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
David S. Millerdb8dac22008-03-06 16:22:02 -08001695 int proto)
1696{
1697 struct sock *sk;
Jesper Dangaard Brouer7b5e56f2009-02-05 15:05:45 -08001698 struct udphdr *uh;
David S. Millerdb8dac22008-03-06 16:22:02 -08001699 unsigned short ulen;
Eric Dumazetadf30902009-06-02 05:19:30 +00001700 struct rtable *rt = skb_rtable(skb);
Jesper Dangaard Brouer2783ef22009-02-06 01:59:12 -08001701 __be32 saddr, daddr;
Pavel Emelyanov02833282008-07-05 21:18:48 -07001702 struct net *net = dev_net(skb->dev);
David S. Millerdb8dac22008-03-06 16:22:02 -08001703
1704 /*
1705 * Validate the packet.
1706 */
1707 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1708 goto drop; /* No space for header. */
1709
Jesper Dangaard Brouer7b5e56f2009-02-05 15:05:45 -08001710 uh = udp_hdr(skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001711 ulen = ntohs(uh->len);
Bjørn Morkccc2d972010-05-06 03:44:34 +00001712 saddr = ip_hdr(skb)->saddr;
1713 daddr = ip_hdr(skb)->daddr;
1714
David S. Millerdb8dac22008-03-06 16:22:02 -08001715 if (ulen > skb->len)
1716 goto short_packet;
1717
1718 if (proto == IPPROTO_UDP) {
1719 /* UDP validates ulen. */
1720 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
1721 goto short_packet;
1722 uh = udp_hdr(skb);
1723 }
1724
1725 if (udp4_csum_init(skb, uh, proto))
1726 goto csum_error;
1727
Eric Dumazet8afdd992013-12-10 18:07:23 -08001728 sk = skb_steal_sock(skb);
1729 if (sk) {
Eric Dumazet97502232013-12-11 14:46:51 -08001730 struct dst_entry *dst = skb_dst(skb);
Shawn Bohrer421b3882013-10-07 11:01:39 -05001731 int ret;
David S. Millerdb8dac22008-03-06 16:22:02 -08001732
Eric Dumazet97502232013-12-11 14:46:51 -08001733 if (unlikely(sk->sk_rx_dst != dst))
1734 udp_sk_rx_dst_set(sk, dst);
Shawn Bohrer421b3882013-10-07 11:01:39 -05001735
1736 ret = udp_queue_rcv_skb(sk, skb);
Eric Dumazet8afdd992013-12-10 18:07:23 -08001737 sock_put(sk);
Shawn Bohrer421b3882013-10-07 11:01:39 -05001738 /* a return value > 0 means to resubmit the input, but
1739 * it wants the return to be -protocol, or 0
1740 */
1741 if (ret > 0)
1742 return -ret;
1743 return 0;
1744 } else {
1745 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1746 return __udp4_lib_mcast_deliver(net, skb, uh,
1747 saddr, daddr, udptable);
1748
1749 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1750 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001751
1752 if (sk != NULL) {
Eliezer Tamira5b50472013-06-10 11:40:00 +03001753 int ret;
1754
Eliezer Tamira5b50472013-06-10 11:40:00 +03001755 ret = udp_queue_rcv_skb(sk, skb);
David S. Millerdb8dac22008-03-06 16:22:02 -08001756 sock_put(sk);
1757
1758 /* a return value > 0 means to resubmit the input, but
1759 * it wants the return to be -protocol, or 0
1760 */
1761 if (ret > 0)
1762 return -ret;
1763 return 0;
1764 }
1765
1766 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1767 goto drop;
1768 nf_reset(skb);
1769
1770 /* No socket. Drop packet silently, if checksum is wrong */
1771 if (udp_lib_checksum_complete(skb))
1772 goto csum_error;
1773
Pavel Emelyanov02833282008-07-05 21:18:48 -07001774 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
David S. Millerdb8dac22008-03-06 16:22:02 -08001775 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1776
1777 /*
1778 * Hmm. We got an UDP packet to a port to which we
1779 * don't wanna listen. Ignore it.
1780 */
1781 kfree_skb(skb);
1782 return 0;
1783
1784short_packet:
Harvey Harrison673d57e2008-10-31 00:53:57 -07001785 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
Joe Perchesafd465032012-03-12 07:03:32 +00001786 proto == IPPROTO_UDPLITE ? "Lite" : "",
1787 &saddr, ntohs(uh->source),
1788 ulen, skb->len,
1789 &daddr, ntohs(uh->dest));
David S. Millerdb8dac22008-03-06 16:22:02 -08001790 goto drop;
1791
1792csum_error:
1793 /*
1794 * RFC1122: OK. Discards the bad packet silently (as far as
1795 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1796 */
Harvey Harrison673d57e2008-10-31 00:53:57 -07001797 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
Joe Perchesafd465032012-03-12 07:03:32 +00001798 proto == IPPROTO_UDPLITE ? "Lite" : "",
1799 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
David S. Millerdb8dac22008-03-06 16:22:02 -08001800 ulen);
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001801 UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
David S. Millerdb8dac22008-03-06 16:22:02 -08001802drop:
Pavel Emelyanov02833282008-07-05 21:18:48 -07001803 UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
David S. Millerdb8dac22008-03-06 16:22:02 -08001804 kfree_skb(skb);
1805 return 0;
1806}
1807
Shawn Bohrer421b3882013-10-07 11:01:39 -05001808/* We can only early demux multicast if there is a single matching socket.
1809 * If more than one socket found returns NULL
1810 */
1811static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
1812 __be16 loc_port, __be32 loc_addr,
1813 __be16 rmt_port, __be32 rmt_addr,
1814 int dif)
1815{
1816 struct sock *sk, *result;
1817 struct hlist_nulls_node *node;
1818 unsigned short hnum = ntohs(loc_port);
1819 unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
1820 struct udp_hslot *hslot = &udp_table.hash[slot];
1821
1822 rcu_read_lock();
1823begin:
1824 count = 0;
1825 result = NULL;
1826 sk_nulls_for_each_rcu(sk, node, &hslot->head) {
1827 if (__udp_is_mcast_sock(net, sk,
1828 loc_port, loc_addr,
1829 rmt_port, rmt_addr,
1830 dif, hnum)) {
1831 result = sk;
1832 ++count;
1833 }
1834 }
1835 /*
1836 * if the nulls value we got at the end of this lookup is
1837 * not the expected one, we must restart lookup.
1838 * We probably met an item that was moved to another chain.
1839 */
1840 if (get_nulls_value(node) != slot)
1841 goto begin;
1842
1843 if (result) {
1844 if (count != 1 ||
1845 unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
1846 result = NULL;
Eric Dumazetf69b9232013-10-08 21:47:29 -07001847 else if (unlikely(!__udp_is_mcast_sock(net, result,
Shawn Bohrer421b3882013-10-07 11:01:39 -05001848 loc_port, loc_addr,
1849 rmt_port, rmt_addr,
1850 dif, hnum))) {
1851 sock_put(result);
1852 result = NULL;
1853 }
1854 }
1855 rcu_read_unlock();
1856 return result;
1857}
1858
1859/* For unicast we should only early demux connected sockets or we can
1860 * break forwarding setups. The chains here can be long so only check
1861 * if the first socket is an exact match and if not move on.
1862 */
1863static struct sock *__udp4_lib_demux_lookup(struct net *net,
1864 __be16 loc_port, __be32 loc_addr,
1865 __be16 rmt_port, __be32 rmt_addr,
1866 int dif)
1867{
1868 struct sock *sk, *result;
1869 struct hlist_nulls_node *node;
1870 unsigned short hnum = ntohs(loc_port);
1871 unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
1872 unsigned int slot2 = hash2 & udp_table.mask;
1873 struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1874 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr)
1875 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1876
1877 rcu_read_lock();
1878 result = NULL;
1879 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
1880 if (INET_MATCH(sk, net, acookie,
1881 rmt_addr, loc_addr, ports, dif))
1882 result = sk;
1883 /* Only check first socket in chain */
1884 break;
1885 }
1886
1887 if (result) {
1888 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
1889 result = NULL;
1890 else if (unlikely(!INET_MATCH(sk, net, acookie,
1891 rmt_addr, loc_addr,
1892 ports, dif))) {
1893 sock_put(result);
1894 result = NULL;
1895 }
1896 }
1897 rcu_read_unlock();
1898 return result;
1899}
1900
1901void udp_v4_early_demux(struct sk_buff *skb)
1902{
Eric Dumazet610438b2013-12-11 08:10:05 -08001903 struct net *net = dev_net(skb->dev);
1904 const struct iphdr *iph;
1905 const struct udphdr *uh;
Shawn Bohrer421b3882013-10-07 11:01:39 -05001906 struct sock *sk;
1907 struct dst_entry *dst;
Shawn Bohrer421b3882013-10-07 11:01:39 -05001908 int dif = skb->dev->ifindex;
1909
1910 /* validate the packet */
1911 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
1912 return;
1913
Eric Dumazet610438b2013-12-11 08:10:05 -08001914 iph = ip_hdr(skb);
1915 uh = udp_hdr(skb);
1916
Shawn Bohrer421b3882013-10-07 11:01:39 -05001917 if (skb->pkt_type == PACKET_BROADCAST ||
1918 skb->pkt_type == PACKET_MULTICAST)
1919 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
1920 uh->source, iph->saddr, dif);
1921 else if (skb->pkt_type == PACKET_HOST)
1922 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
1923 uh->source, iph->saddr, dif);
1924 else
1925 return;
1926
1927 if (!sk)
1928 return;
1929
1930 skb->sk = sk;
1931 skb->destructor = sock_edemux;
1932 dst = sk->sk_rx_dst;
1933
1934 if (dst)
1935 dst = dst_check(dst, 0);
1936 if (dst)
1937 skb_dst_set_noref(skb, dst);
1938}
1939
David S. Millerdb8dac22008-03-06 16:22:02 -08001940int udp_rcv(struct sk_buff *skb)
1941{
Eric Dumazet645ca702008-10-29 01:41:45 -07001942 return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
David S. Millerdb8dac22008-03-06 16:22:02 -08001943}
1944
Brian Haley7d06b2e2008-06-14 17:04:49 -07001945void udp_destroy_sock(struct sock *sk)
David S. Millerdb8dac22008-03-06 16:22:02 -08001946{
Tom Parkin44046a52013-03-19 06:11:12 +00001947 struct udp_sock *up = udp_sk(sk);
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001948 bool slow = lock_sock_fast(sk);
David S. Millerdb8dac22008-03-06 16:22:02 -08001949 udp_flush_pending_frames(sk);
Eric Dumazet8a74ad62010-05-26 19:20:18 +00001950 unlock_sock_fast(sk, slow);
Tom Parkin44046a52013-03-19 06:11:12 +00001951 if (static_key_false(&udp_encap_needed) && up->encap_type) {
1952 void (*encap_destroy)(struct sock *sk);
1953 encap_destroy = ACCESS_ONCE(up->encap_destroy);
1954 if (encap_destroy)
1955 encap_destroy(sk);
1956 }
David S. Millerdb8dac22008-03-06 16:22:02 -08001957}
1958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959/*
1960 * Socket option code for UDP
1961 */
Gerrit Renker4c0a6cb2006-11-27 09:29:59 -08001962int udp_lib_setsockopt(struct sock *sk, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07001963 char __user *optval, unsigned int optlen,
Gerrit Renker4c0a6cb2006-11-27 09:29:59 -08001964 int (*push_pending_frames)(struct sock *))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965{
1966 struct udp_sock *up = udp_sk(sk);
1967 int val;
1968 int err = 0;
Wang Chenb2bf1e22007-12-03 22:34:16 +11001969 int is_udplite = IS_UDPLITE(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
Eric Dumazetc482c562009-07-17 00:26:32 +00001971 if (optlen < sizeof(int))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 return -EINVAL;
1973
1974 if (get_user(val, (int __user *)optval))
1975 return -EFAULT;
1976
Stephen Hemminger6516c652007-03-08 20:41:55 -08001977 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 case UDP_CORK:
1979 if (val != 0) {
1980 up->corkflag = 1;
1981 } else {
1982 up->corkflag = 0;
1983 lock_sock(sk);
Gerrit Renker4c0a6cb2006-11-27 09:29:59 -08001984 (*push_pending_frames)(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 release_sock(sk);
1986 }
1987 break;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001988
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 case UDP_ENCAP:
1990 switch (val) {
1991 case 0:
1992 case UDP_ENCAP_ESPINUDP:
1993 case UDP_ENCAP_ESPINUDP_NON_IKE:
James Chapman067b2072007-07-05 17:08:05 -07001994 up->encap_rcv = xfrm4_udp_encap_rcv;
1995 /* FALLTHROUGH */
James Chapman342f0232007-06-27 15:37:46 -07001996 case UDP_ENCAP_L2TPINUDP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 up->encap_type = val;
Eric Dumazet447167b2012-04-11 23:05:28 +00001998 udp_encap_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 break;
2000 default:
2001 err = -ENOPROTOOPT;
2002 break;
2003 }
2004 break;
2005
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002006 /*
2007 * UDP-Lite's partial checksum coverage (RFC 3828).
2008 */
2009 /* The sender sets actual checksum coverage length via this option.
2010 * The case coverage > packet length is handled by send module. */
2011 case UDPLITE_SEND_CSCOV:
Wang Chenb2bf1e22007-12-03 22:34:16 +11002012 if (!is_udplite) /* Disable the option on UDP sockets */
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002013 return -ENOPROTOOPT;
2014 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
2015 val = 8;
Alexey Dobriyan4be929b2010-05-24 14:33:03 -07002016 else if (val > USHRT_MAX)
2017 val = USHRT_MAX;
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002018 up->pcslen = val;
2019 up->pcflag |= UDPLITE_SEND_CC;
2020 break;
2021
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002022 /* The receiver specifies a minimum checksum coverage value. To make
2023 * sense, this should be set to at least 8 (as done below). If zero is
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002024 * used, this again means full checksum coverage. */
2025 case UDPLITE_RECV_CSCOV:
Wang Chenb2bf1e22007-12-03 22:34:16 +11002026 if (!is_udplite) /* Disable the option on UDP sockets */
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002027 return -ENOPROTOOPT;
2028 if (val != 0 && val < 8) /* Avoid silly minimal values. */
2029 val = 8;
Alexey Dobriyan4be929b2010-05-24 14:33:03 -07002030 else if (val > USHRT_MAX)
2031 val = USHRT_MAX;
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002032 up->pcrlen = val;
2033 up->pcflag |= UDPLITE_RECV_CC;
2034 break;
2035
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 default:
2037 err = -ENOPROTOOPT;
2038 break;
Stephen Hemminger6516c652007-03-08 20:41:55 -08002039 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
2041 return err;
2042}
Eric Dumazetc482c562009-07-17 00:26:32 +00002043EXPORT_SYMBOL(udp_lib_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
David S. Millerdb8dac22008-03-06 16:22:02 -08002045int udp_setsockopt(struct sock *sk, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002046 char __user *optval, unsigned int optlen)
David S. Millerdb8dac22008-03-06 16:22:02 -08002047{
2048 if (level == SOL_UDP || level == SOL_UDPLITE)
2049 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
2050 udp_push_pending_frames);
2051 return ip_setsockopt(sk, level, optname, optval, optlen);
2052}
2053
2054#ifdef CONFIG_COMPAT
2055int compat_udp_setsockopt(struct sock *sk, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002056 char __user *optval, unsigned int optlen)
David S. Millerdb8dac22008-03-06 16:22:02 -08002057{
2058 if (level == SOL_UDP || level == SOL_UDPLITE)
2059 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
2060 udp_push_pending_frames);
2061 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
2062}
2063#endif
2064
Gerrit Renker4c0a6cb2006-11-27 09:29:59 -08002065int udp_lib_getsockopt(struct sock *sk, int level, int optname,
2066 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067{
2068 struct udp_sock *up = udp_sk(sk);
2069 int val, len;
2070
Eric Dumazetc482c562009-07-17 00:26:32 +00002071 if (get_user(len, optlen))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 return -EFAULT;
2073
2074 len = min_t(unsigned int, len, sizeof(int));
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002075
Stephen Hemminger6516c652007-03-08 20:41:55 -08002076 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 return -EINVAL;
2078
Stephen Hemminger6516c652007-03-08 20:41:55 -08002079 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 case UDP_CORK:
2081 val = up->corkflag;
2082 break;
2083
2084 case UDP_ENCAP:
2085 val = up->encap_type;
2086 break;
2087
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002088 /* The following two cannot be changed on UDP sockets, the return is
2089 * always 0 (which corresponds to the full checksum coverage of UDP). */
2090 case UDPLITE_SEND_CSCOV:
2091 val = up->pcslen;
2092 break;
2093
2094 case UDPLITE_RECV_CSCOV:
2095 val = up->pcrlen;
2096 break;
2097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 default:
2099 return -ENOPROTOOPT;
Stephen Hemminger6516c652007-03-08 20:41:55 -08002100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
Stephen Hemminger6516c652007-03-08 20:41:55 -08002102 if (put_user(len, optlen))
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002103 return -EFAULT;
Eric Dumazetc482c562009-07-17 00:26:32 +00002104 if (copy_to_user(optval, &val, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 return -EFAULT;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002106 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107}
Eric Dumazetc482c562009-07-17 00:26:32 +00002108EXPORT_SYMBOL(udp_lib_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
David S. Millerdb8dac22008-03-06 16:22:02 -08002110int udp_getsockopt(struct sock *sk, int level, int optname,
2111 char __user *optval, int __user *optlen)
2112{
2113 if (level == SOL_UDP || level == SOL_UDPLITE)
2114 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
2115 return ip_getsockopt(sk, level, optname, optval, optlen);
2116}
2117
2118#ifdef CONFIG_COMPAT
2119int compat_udp_getsockopt(struct sock *sk, int level, int optname,
2120 char __user *optval, int __user *optlen)
2121{
2122 if (level == SOL_UDP || level == SOL_UDPLITE)
2123 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
2124 return compat_ip_getsockopt(sk, level, optname, optval, optlen);
2125}
2126#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127/**
2128 * udp_poll - wait for a UDP event.
2129 * @file - file struct
2130 * @sock - socket
2131 * @wait - poll table
2132 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002133 * This is same as datagram poll, except for the special case of
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 * blocking sockets. If application is using a blocking fd
2135 * and a packet with checksum error is in the queue;
2136 * then it could get return from select indicating data available
2137 * but then block when reading it. Add special case code
2138 * to work around these arguably broken applications.
2139 */
2140unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
2141{
2142 unsigned int mask = datagram_poll(file, sock, wait);
2143 struct sock *sk = sock->sk;
Gerrit Renkerba4e58e2006-11-27 11:10:57 -08002144
David Majnemerc3f1dba2013-05-31 13:15:38 +00002145 sock_rps_record_flow(sk);
2146
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 /* Check for false positives due to checksum errors */
Eric Dumazet85584672009-10-09 04:43:40 +00002148 if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
2149 !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
2150 mask &= ~(POLLIN | POLLRDNORM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
2152 return mask;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002153
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154}
Eric Dumazetc482c562009-07-17 00:26:32 +00002155EXPORT_SYMBOL(udp_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
David S. Millerdb8dac22008-03-06 16:22:02 -08002157struct proto udp_prot = {
2158 .name = "UDP",
2159 .owner = THIS_MODULE,
2160 .close = udp_lib_close,
2161 .connect = ip4_datagram_connect,
2162 .disconnect = udp_disconnect,
2163 .ioctl = udp_ioctl,
2164 .destroy = udp_destroy_sock,
2165 .setsockopt = udp_setsockopt,
2166 .getsockopt = udp_getsockopt,
2167 .sendmsg = udp_sendmsg,
2168 .recvmsg = udp_recvmsg,
2169 .sendpage = udp_sendpage,
Herbert Xu93821772008-09-15 11:48:46 -07002170 .backlog_rcv = __udp_queue_rcv_skb,
Steffen Klassert8141ed92013-01-21 02:00:03 +00002171 .release_cb = ip4_datagram_release_cb,
David S. Millerdb8dac22008-03-06 16:22:02 -08002172 .hash = udp_lib_hash,
2173 .unhash = udp_lib_unhash,
Eric Dumazet719f8352010-09-08 05:08:44 +00002174 .rehash = udp_v4_rehash,
David S. Millerdb8dac22008-03-06 16:22:02 -08002175 .get_port = udp_v4_get_port,
2176 .memory_allocated = &udp_memory_allocated,
2177 .sysctl_mem = sysctl_udp_mem,
2178 .sysctl_wmem = &sysctl_udp_wmem_min,
2179 .sysctl_rmem = &sysctl_udp_rmem_min,
2180 .obj_size = sizeof(struct udp_sock),
Eric Dumazet271b72c2008-10-29 02:11:14 -07002181 .slab_flags = SLAB_DESTROY_BY_RCU,
Eric Dumazet645ca702008-10-29 01:41:45 -07002182 .h.udp_table = &udp_table,
David S. Millerdb8dac22008-03-06 16:22:02 -08002183#ifdef CONFIG_COMPAT
2184 .compat_setsockopt = compat_udp_setsockopt,
2185 .compat_getsockopt = compat_udp_getsockopt,
2186#endif
Octavian Purdilafcbdf092010-12-16 14:26:56 -08002187 .clear_sk = sk_prot_clear_portaddr_nulls,
David S. Millerdb8dac22008-03-06 16:22:02 -08002188};
Eric Dumazetc482c562009-07-17 00:26:32 +00002189EXPORT_SYMBOL(udp_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
2191/* ------------------------------------------------------------------------ */
2192#ifdef CONFIG_PROC_FS
2193
Eric Dumazet645ca702008-10-29 01:41:45 -07002194static struct sock *udp_get_first(struct seq_file *seq, int start)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195{
2196 struct sock *sk;
2197 struct udp_iter_state *state = seq->private;
Denis V. Lunev6f191ef2008-03-28 18:23:33 -07002198 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002200 for (state->bucket = start; state->bucket <= state->udp_table->mask;
2201 ++state->bucket) {
Eric Dumazet88ab1932008-11-16 19:39:21 -08002202 struct hlist_nulls_node *node;
Eric Dumazet645ca702008-10-29 01:41:45 -07002203 struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002204
2205 if (hlist_nulls_empty(&hslot->head))
2206 continue;
2207
Eric Dumazet645ca702008-10-29 01:41:45 -07002208 spin_lock_bh(&hslot->lock);
Eric Dumazet88ab1932008-11-16 19:39:21 -08002209 sk_nulls_for_each(sk, node, &hslot->head) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002210 if (!net_eq(sock_net(sk), net))
Daniel Lezcanoa91275e2008-03-21 04:11:58 -07002211 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 if (sk->sk_family == state->family)
2213 goto found;
2214 }
Eric Dumazet645ca702008-10-29 01:41:45 -07002215 spin_unlock_bh(&hslot->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 }
2217 sk = NULL;
2218found:
2219 return sk;
2220}
2221
2222static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
2223{
2224 struct udp_iter_state *state = seq->private;
Denis V. Lunev6f191ef2008-03-28 18:23:33 -07002225 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226
2227 do {
Eric Dumazet88ab1932008-11-16 19:39:21 -08002228 sk = sk_nulls_next(sk);
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002229 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230
Eric Dumazet645ca702008-10-29 01:41:45 -07002231 if (!sk) {
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002232 if (state->bucket <= state->udp_table->mask)
Vitaly Mayatskikh30842f22009-03-23 15:22:33 -07002233 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
Eric Dumazet645ca702008-10-29 01:41:45 -07002234 return udp_get_first(seq, state->bucket + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 }
2236 return sk;
2237}
2238
2239static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
2240{
Eric Dumazet645ca702008-10-29 01:41:45 -07002241 struct sock *sk = udp_get_first(seq, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
2243 if (sk)
Stephen Hemminger6516c652007-03-08 20:41:55 -08002244 while (pos && (sk = udp_get_next(seq, sk)) != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 --pos;
2246 return pos ? NULL : sk;
2247}
2248
2249static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
2250{
Vitaly Mayatskikh30842f22009-03-23 15:22:33 -07002251 struct udp_iter_state *state = seq->private;
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002252 state->bucket = MAX_UDP_PORTS;
Vitaly Mayatskikh30842f22009-03-23 15:22:33 -07002253
YOSHIFUJI Hideakib50660f2008-03-31 19:38:15 -07002254 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255}
2256
2257static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2258{
2259 struct sock *sk;
2260
YOSHIFUJI Hideakib50660f2008-03-31 19:38:15 -07002261 if (v == SEQ_START_TOKEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 sk = udp_get_idx(seq, 0);
2263 else
2264 sk = udp_get_next(seq, v);
2265
2266 ++*pos;
2267 return sk;
2268}
2269
2270static void udp_seq_stop(struct seq_file *seq, void *v)
2271{
Eric Dumazet645ca702008-10-29 01:41:45 -07002272 struct udp_iter_state *state = seq->private;
2273
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002274 if (state->bucket <= state->udp_table->mask)
Eric Dumazet645ca702008-10-29 01:41:45 -07002275 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276}
2277
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002278int udp_seq_open(struct inode *inode, struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279{
Al Virod9dda782013-03-31 18:16:14 -04002280 struct udp_seq_afinfo *afinfo = PDE_DATA(inode);
Denis V. Luneva2be75c2008-03-28 18:25:06 -07002281 struct udp_iter_state *s;
2282 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283
Denis V. Luneva2be75c2008-03-28 18:25:06 -07002284 err = seq_open_net(inode, file, &afinfo->seq_ops,
2285 sizeof(struct udp_iter_state));
2286 if (err < 0)
2287 return err;
Daniel Lezcanoa91275e2008-03-21 04:11:58 -07002288
Denis V. Luneva2be75c2008-03-28 18:25:06 -07002289 s = ((struct seq_file *)file->private_data)->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 s->family = afinfo->family;
Eric Dumazet645ca702008-10-29 01:41:45 -07002291 s->udp_table = afinfo->udp_table;
Denis V. Luneva2be75c2008-03-28 18:25:06 -07002292 return err;
Daniel Lezcanoa91275e2008-03-21 04:11:58 -07002293}
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002294EXPORT_SYMBOL(udp_seq_open);
Daniel Lezcanoa91275e2008-03-21 04:11:58 -07002295
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296/* ------------------------------------------------------------------------ */
Daniel Lezcano0c96d8c2008-03-21 04:14:17 -07002297int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298{
2299 struct proc_dir_entry *p;
2300 int rc = 0;
2301
Denis V. Lunevdda61922008-03-28 18:24:26 -07002302 afinfo->seq_ops.start = udp_seq_start;
2303 afinfo->seq_ops.next = udp_seq_next;
2304 afinfo->seq_ops.stop = udp_seq_stop;
2305
Denis V. Lunev84841c32008-05-02 04:10:08 -07002306 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002307 afinfo->seq_fops, afinfo);
Denis V. Lunev84841c32008-05-02 04:10:08 -07002308 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 rc = -ENOMEM;
2310 return rc;
2311}
Eric Dumazetc482c562009-07-17 00:26:32 +00002312EXPORT_SYMBOL(udp_proc_register);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
Daniel Lezcano0c96d8c2008-03-21 04:14:17 -07002314void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315{
Gao fengece31ff2013-02-18 01:34:56 +00002316 remove_proc_entry(afinfo->name, net->proc_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317}
Eric Dumazetc482c562009-07-17 00:26:32 +00002318EXPORT_SYMBOL(udp_proc_unregister);
David S. Millerdb8dac22008-03-06 16:22:02 -08002319
2320/* ------------------------------------------------------------------------ */
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002321static void udp4_format_sock(struct sock *sp, struct seq_file *f,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002322 int bucket)
David S. Millerdb8dac22008-03-06 16:22:02 -08002323{
2324 struct inet_sock *inet = inet_sk(sp);
Eric Dumazetc720c7e2009-10-15 06:30:45 +00002325 __be32 dest = inet->inet_daddr;
2326 __be32 src = inet->inet_rcv_saddr;
2327 __u16 destp = ntohs(inet->inet_dport);
2328 __u16 srcp = ntohs(inet->inet_sport);
David S. Millerdb8dac22008-03-06 16:22:02 -08002329
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002330 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002331 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
David S. Millerdb8dac22008-03-06 16:22:02 -08002332 bucket, src, srcp, dest, destp, sp->sk_state,
Eric Dumazet31e6d362009-06-17 19:05:41 -07002333 sk_wmem_alloc_get(sp),
2334 sk_rmem_alloc_get(sp),
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002335 0, 0L, 0,
2336 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
2337 0, sock_i_ino(sp),
Eric Dumazetcb61cb92008-06-17 21:04:56 -07002338 atomic_read(&sp->sk_refcnt), sp,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002339 atomic_read(&sp->sk_drops));
David S. Millerdb8dac22008-03-06 16:22:02 -08002340}
2341
2342int udp4_seq_show(struct seq_file *seq, void *v)
2343{
Tetsuo Handa652586d2013-11-14 14:31:57 -08002344 seq_setwidth(seq, 127);
David S. Millerdb8dac22008-03-06 16:22:02 -08002345 if (v == SEQ_START_TOKEN)
Tetsuo Handa652586d2013-11-14 14:31:57 -08002346 seq_puts(seq, " sl local_address rem_address st tx_queue "
David S. Millerdb8dac22008-03-06 16:22:02 -08002347 "rx_queue tr tm->when retrnsmt uid timeout "
Eric Dumazetcb61cb92008-06-17 21:04:56 -07002348 "inode ref pointer drops");
David S. Millerdb8dac22008-03-06 16:22:02 -08002349 else {
David S. Millerdb8dac22008-03-06 16:22:02 -08002350 struct udp_iter_state *state = seq->private;
2351
Tetsuo Handa652586d2013-11-14 14:31:57 -08002352 udp4_format_sock(v, seq, state->bucket);
David S. Millerdb8dac22008-03-06 16:22:02 -08002353 }
Tetsuo Handa652586d2013-11-14 14:31:57 -08002354 seq_pad(seq, '\n');
David S. Millerdb8dac22008-03-06 16:22:02 -08002355 return 0;
2356}
2357
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002358static const struct file_operations udp_afinfo_seq_fops = {
2359 .owner = THIS_MODULE,
2360 .open = udp_seq_open,
2361 .read = seq_read,
2362 .llseek = seq_lseek,
2363 .release = seq_release_net
2364};
2365
David S. Millerdb8dac22008-03-06 16:22:02 -08002366/* ------------------------------------------------------------------------ */
David S. Millerdb8dac22008-03-06 16:22:02 -08002367static struct udp_seq_afinfo udp4_seq_afinfo = {
David S. Millerdb8dac22008-03-06 16:22:02 -08002368 .name = "udp",
2369 .family = AF_INET,
Eric Dumazet645ca702008-10-29 01:41:45 -07002370 .udp_table = &udp_table,
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002371 .seq_fops = &udp_afinfo_seq_fops,
Denis V. Lunevdda61922008-03-28 18:24:26 -07002372 .seq_ops = {
2373 .show = udp4_seq_show,
2374 },
David S. Millerdb8dac22008-03-06 16:22:02 -08002375};
2376
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002377static int __net_init udp4_proc_init_net(struct net *net)
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002378{
2379 return udp_proc_register(net, &udp4_seq_afinfo);
2380}
2381
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002382static void __net_exit udp4_proc_exit_net(struct net *net)
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002383{
2384 udp_proc_unregister(net, &udp4_seq_afinfo);
2385}
2386
2387static struct pernet_operations udp4_net_ops = {
2388 .init = udp4_proc_init_net,
2389 .exit = udp4_proc_exit_net,
2390};
2391
David S. Millerdb8dac22008-03-06 16:22:02 -08002392int __init udp4_proc_init(void)
2393{
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002394 return register_pernet_subsys(&udp4_net_ops);
David S. Millerdb8dac22008-03-06 16:22:02 -08002395}
2396
2397void udp4_proc_exit(void)
2398{
Pavel Emelyanov15439fe2008-03-24 14:53:49 -07002399 unregister_pernet_subsys(&udp4_net_ops);
David S. Millerdb8dac22008-03-06 16:22:02 -08002400}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401#endif /* CONFIG_PROC_FS */
2402
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002403static __initdata unsigned long uhash_entries;
2404static int __init set_uhash_entries(char *str)
Eric Dumazet645ca702008-10-29 01:41:45 -07002405{
Eldad Zack413c27d2012-05-19 14:13:18 +00002406 ssize_t ret;
2407
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002408 if (!str)
2409 return 0;
Eldad Zack413c27d2012-05-19 14:13:18 +00002410
2411 ret = kstrtoul(str, 0, &uhash_entries);
2412 if (ret)
2413 return 0;
2414
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002415 if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
2416 uhash_entries = UDP_HTABLE_SIZE_MIN;
2417 return 1;
2418}
2419__setup("uhash_entries=", set_uhash_entries);
Eric Dumazet645ca702008-10-29 01:41:45 -07002420
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002421void __init udp_table_init(struct udp_table *table, const char *name)
2422{
2423 unsigned int i;
2424
Tim Bird31fe62b2012-05-23 13:33:35 +00002425 table->hash = alloc_large_system_hash(name,
2426 2 * sizeof(struct udp_hslot),
2427 uhash_entries,
2428 21, /* one slot per 2 MB */
2429 0,
2430 &table->log,
2431 &table->mask,
2432 UDP_HTABLE_SIZE_MIN,
2433 64 * 1024);
2434
Eric Dumazet512615b2009-11-08 10:17:58 +00002435 table->hash2 = table->hash + (table->mask + 1);
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002436 for (i = 0; i <= table->mask; i++) {
Eric Dumazet88ab1932008-11-16 19:39:21 -08002437 INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
Eric Dumazetfdcc8aa92009-11-08 10:17:05 +00002438 table->hash[i].count = 0;
Eric Dumazet645ca702008-10-29 01:41:45 -07002439 spin_lock_init(&table->hash[i].lock);
2440 }
Eric Dumazet512615b2009-11-08 10:17:58 +00002441 for (i = 0; i <= table->mask; i++) {
2442 INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i);
2443 table->hash2[i].count = 0;
2444 spin_lock_init(&table->hash2[i].lock);
2445 }
Eric Dumazet645ca702008-10-29 01:41:45 -07002446}
2447
Hideo Aoki95766ff2007-12-31 00:29:24 -08002448void __init udp_init(void)
2449{
Eric Dumazetf03d78d2011-07-07 00:27:05 -07002450 unsigned long limit;
Hideo Aoki95766ff2007-12-31 00:29:24 -08002451
Eric Dumazetf86dcc52009-10-07 00:37:59 +00002452 udp_table_init(&udp_table, "UDP");
Eric Dumazetf03d78d2011-07-07 00:27:05 -07002453 limit = nr_free_buffer_pages() / 8;
Hideo Aoki95766ff2007-12-31 00:29:24 -08002454 limit = max(limit, 128UL);
2455 sysctl_udp_mem[0] = limit / 4 * 3;
2456 sysctl_udp_mem[1] = limit;
2457 sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
2458
2459 sysctl_udp_rmem_min = SK_MEM_QUANTUM;
2460 sysctl_udp_wmem_min = SK_MEM_QUANTUM;
2461}
2462
Daniel Borkmannda5bab02013-06-08 12:56:03 +02002463struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2464 netdev_features_t features)
Pravin B Shelar73136262013-03-07 13:21:51 +00002465{
2466 struct sk_buff *segs = ERR_PTR(-EINVAL);
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -08002467 u16 mac_offset = skb->mac_header;
Pravin B Shelar73136262013-03-07 13:21:51 +00002468 int mac_len = skb->mac_len;
2469 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
Pravin B Shelar0d055352013-05-02 16:17:02 +00002470 __be16 protocol = skb->protocol;
Pravin B Shelar73136262013-03-07 13:21:51 +00002471 netdev_features_t enc_features;
Pravin B Shelar0d055352013-05-02 16:17:02 +00002472 int outer_hlen;
Pravin B Shelar73136262013-03-07 13:21:51 +00002473
2474 if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
2475 goto out;
2476
2477 skb->encapsulation = 0;
2478 __skb_pull(skb, tnl_hlen);
2479 skb_reset_mac_header(skb);
2480 skb_set_network_header(skb, skb_inner_network_offset(skb));
2481 skb->mac_len = skb_inner_network_offset(skb);
Pravin B Shelar19acc322013-05-07 20:41:07 +00002482 skb->protocol = htons(ETH_P_TEB);
Pravin B Shelar73136262013-03-07 13:21:51 +00002483
2484 /* segment inner packet. */
2485 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
2486 segs = skb_mac_gso_segment(skb, enc_features);
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -08002487 if (!segs || IS_ERR(segs)) {
2488 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
2489 mac_len);
Pravin B Shelar73136262013-03-07 13:21:51 +00002490 goto out;
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -08002491 }
Pravin B Shelar73136262013-03-07 13:21:51 +00002492
2493 outer_hlen = skb_tnl_header_len(skb);
2494 skb = segs;
2495 do {
2496 struct udphdr *uh;
2497 int udp_offset = outer_hlen - tnl_hlen;
2498
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002499 skb_reset_inner_headers(skb);
2500 skb->encapsulation = 1;
2501
Pravin B Shelar73136262013-03-07 13:21:51 +00002502 skb->mac_len = mac_len;
2503
2504 skb_push(skb, outer_hlen);
2505 skb_reset_mac_header(skb);
2506 skb_set_network_header(skb, mac_len);
2507 skb_set_transport_header(skb, udp_offset);
2508 uh = udp_hdr(skb);
2509 uh->len = htons(skb->len - udp_offset);
2510
2511 /* csum segment if tunnel sets skb with csum. */
Cong Wangeb3c0d82013-08-31 13:44:38 +08002512 if (protocol == htons(ETH_P_IP) && unlikely(uh->check)) {
Pravin B Shelar73136262013-03-07 13:21:51 +00002513 struct iphdr *iph = ip_hdr(skb);
2514
2515 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2516 skb->len - udp_offset,
2517 IPPROTO_UDP, 0);
2518 uh->check = csum_fold(skb_checksum(skb, udp_offset,
2519 skb->len - udp_offset, 0));
2520 if (uh->check == 0)
2521 uh->check = CSUM_MANGLED_0;
2522
Cong Wangeb3c0d82013-08-31 13:44:38 +08002523 } else if (protocol == htons(ETH_P_IPV6)) {
2524 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2525 u32 len = skb->len - udp_offset;
2526
2527 uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2528 len, IPPROTO_UDP, 0);
2529 uh->check = csum_fold(skb_checksum(skb, udp_offset, len, 0));
2530 if (uh->check == 0)
2531 uh->check = CSUM_MANGLED_0;
2532 skb->ip_summed = CHECKSUM_NONE;
Pravin B Shelar73136262013-03-07 13:21:51 +00002533 }
Cong Wangeb3c0d82013-08-31 13:44:38 +08002534
Pravin B Shelar0d055352013-05-02 16:17:02 +00002535 skb->protocol = protocol;
Pravin B Shelar73136262013-03-07 13:21:51 +00002536 } while ((skb = skb->next));
2537out:
2538 return segs;
2539}