Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 3 | * operating system. INET is implemented using the BSD Socket |
| 4 | * interface as the means of communication with the user level. |
| 5 | * |
| 6 | * The User Datagram Protocol (UDP). |
| 7 | * |
| 8 | * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $ |
| 9 | * |
Jesper Juhl | 02c30a8 | 2005-05-05 16:16:16 -0700 | [diff] [blame] | 10 | * Authors: Ross Biro |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
| 12 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
| 13 | * Alan Cox, <Alan.Cox@linux.org> |
| 14 | * Hirokazu Takahashi, <taka@valinux.co.jp> |
| 15 | * |
| 16 | * Fixes: |
| 17 | * Alan Cox : verify_area() calls |
| 18 | * Alan Cox : stopped close while in use off icmp |
| 19 | * messages. Not a fix but a botch that |
| 20 | * for udp at least is 'valid'. |
| 21 | * Alan Cox : Fixed icmp handling properly |
| 22 | * Alan Cox : Correct error for oversized datagrams |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 23 | * Alan Cox : Tidied select() semantics. |
| 24 | * Alan Cox : udp_err() fixed properly, also now |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | * select and read wake correctly on errors |
| 26 | * Alan Cox : udp_send verify_area moved to avoid mem leak |
| 27 | * Alan Cox : UDP can count its memory |
| 28 | * Alan Cox : send to an unknown connection causes |
| 29 | * an ECONNREFUSED off the icmp, but |
| 30 | * does NOT close. |
| 31 | * Alan Cox : Switched to new sk_buff handlers. No more backlog! |
| 32 | * Alan Cox : Using generic datagram code. Even smaller and the PEEK |
| 33 | * bug no longer crashes it. |
| 34 | * Fred Van Kempen : Net2e support for sk->broadcast. |
| 35 | * Alan Cox : Uses skb_free_datagram |
| 36 | * Alan Cox : Added get/set sockopt support. |
| 37 | * Alan Cox : Broadcasting without option set returns EACCES. |
| 38 | * Alan Cox : No wakeup calls. Instead we now use the callbacks. |
| 39 | * Alan Cox : Use ip_tos and ip_ttl |
| 40 | * Alan Cox : SNMP Mibs |
| 41 | * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. |
| 42 | * Matt Dillon : UDP length checks. |
| 43 | * Alan Cox : Smarter af_inet used properly. |
| 44 | * Alan Cox : Use new kernel side addressing. |
| 45 | * Alan Cox : Incorrect return on truncated datagram receive. |
| 46 | * Arnt Gulbrandsen : New udp_send and stuff |
| 47 | * Alan Cox : Cache last socket |
| 48 | * Alan Cox : Route cache |
| 49 | * Jon Peatfield : Minor efficiency fix to sendto(). |
| 50 | * Mike Shaver : RFC1122 checks. |
| 51 | * Alan Cox : Nonblocking error fix. |
| 52 | * Willy Konynenberg : Transparent proxying support. |
| 53 | * Mike McLagan : Routing by source |
| 54 | * David S. Miller : New socket lookup architecture. |
| 55 | * Last socket cache retained as it |
| 56 | * does have a high hit rate. |
| 57 | * Olaf Kirch : Don't linearise iovec on sendmsg. |
| 58 | * Andi Kleen : Some cleanups, cache destination entry |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 59 | * for connect. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | * Vitaly E. Lavrov : Transparent proxy revived after year coma. |
| 61 | * Melvin Smith : Check msg_name not msg_namelen in sendto(), |
| 62 | * return ENOTCONN for unconnected sockets (POSIX) |
| 63 | * Janos Farkas : don't deliver multi/broadcasts to a different |
| 64 | * bound-to-device socket |
| 65 | * Hirokazu Takahashi : HW checksumming for outgoing UDP |
| 66 | * datagrams. |
| 67 | * Hirokazu Takahashi : sendfile() on UDP works now. |
| 68 | * Arnaldo C. Melo : convert /proc/net/udp to seq_file |
| 69 | * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which |
| 70 | * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind |
| 71 | * a single port at the same time. |
| 72 | * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support |
James Chapman | 342f023 | 2007-06-27 15:37:46 -0700 | [diff] [blame] | 73 | * James Chapman : Add L2TP encapsulation type. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | * |
| 75 | * |
| 76 | * This program is free software; you can redistribute it and/or |
| 77 | * modify it under the terms of the GNU General Public License |
| 78 | * as published by the Free Software Foundation; either version |
| 79 | * 2 of the License, or (at your option) any later version. |
| 80 | */ |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 81 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | #include <asm/system.h> |
| 83 | #include <asm/uaccess.h> |
| 84 | #include <asm/ioctls.h> |
Hideo Aoki | 95766ff | 2007-12-31 00:29:24 -0800 | [diff] [blame] | 85 | #include <linux/bootmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | #include <linux/types.h> |
| 87 | #include <linux/fcntl.h> |
| 88 | #include <linux/module.h> |
| 89 | #include <linux/socket.h> |
| 90 | #include <linux/sockios.h> |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 91 | #include <linux/igmp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | #include <linux/in.h> |
| 93 | #include <linux/errno.h> |
| 94 | #include <linux/timer.h> |
| 95 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | #include <linux/inet.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | #include <linux/netdevice.h> |
Arnaldo Carvalho de Melo | c752f07 | 2005-08-09 20:08:28 -0700 | [diff] [blame] | 98 | #include <net/tcp_states.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | #include <linux/skbuff.h> |
| 100 | #include <linux/proc_fs.h> |
| 101 | #include <linux/seq_file.h> |
Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 102 | #include <net/net_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | #include <net/icmp.h> |
| 104 | #include <net/route.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | #include <net/checksum.h> |
| 106 | #include <net/xfrm.h> |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 107 | #include "udp_impl.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
| 109 | /* |
| 110 | * Snmp MIB for the UDP layer |
| 111 | */ |
| 112 | |
Eric Dumazet | ba89966 | 2005-08-26 12:05:31 -0700 | [diff] [blame] | 113 | DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly; |
Herbert Xu | 1781f7f | 2007-12-11 11:30:32 -0800 | [diff] [blame] | 114 | EXPORT_SYMBOL(udp_statistics); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | |
Herbert Xu | 9055e05 | 2007-12-14 11:25:26 -0800 | [diff] [blame] | 116 | DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly; |
| 117 | EXPORT_SYMBOL(udp_stats_in6); |
| 118 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | struct hlist_head udp_hash[UDP_HTABLE_SIZE]; |
| 120 | DEFINE_RWLOCK(udp_hash_lock); |
| 121 | |
Hideo Aoki | 95766ff | 2007-12-31 00:29:24 -0800 | [diff] [blame] | 122 | int sysctl_udp_mem[3] __read_mostly; |
| 123 | int sysctl_udp_rmem_min __read_mostly; |
| 124 | int sysctl_udp_wmem_min __read_mostly; |
| 125 | |
| 126 | EXPORT_SYMBOL(sysctl_udp_mem); |
| 127 | EXPORT_SYMBOL(sysctl_udp_rmem_min); |
| 128 | EXPORT_SYMBOL(sysctl_udp_wmem_min); |
| 129 | |
| 130 | atomic_t udp_memory_allocated; |
| 131 | EXPORT_SYMBOL(udp_memory_allocated); |
| 132 | |
Pavel Emelyanov | fa4d3c6 | 2008-01-31 05:07:57 -0800 | [diff] [blame] | 133 | static inline int __udp_lib_lport_inuse(struct net *net, __u16 num, |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 134 | const struct hlist_head udptable[]) |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 135 | { |
| 136 | struct sock *sk; |
| 137 | struct hlist_node *node; |
| 138 | |
David S. Miller | df2bc45 | 2007-06-05 15:18:43 -0700 | [diff] [blame] | 139 | sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) |
YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 140 | if (net_eq(sock_net(sk), net) && sk->sk_hash == num) |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 141 | return 1; |
| 142 | return 0; |
| 143 | } |
| 144 | |
| 145 | /** |
Pavel Emelyanov | 6ba5a3c | 2008-03-22 16:51:21 -0700 | [diff] [blame] | 146 | * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 147 | * |
| 148 | * @sk: socket struct in question |
| 149 | * @snum: port number to look up |
David S. Miller | df2bc45 | 2007-06-05 15:18:43 -0700 | [diff] [blame] | 150 | * @saddr_comp: AF-dependent comparison of bound local IP addresses |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 151 | */ |
Pavel Emelyanov | 6ba5a3c | 2008-03-22 16:51:21 -0700 | [diff] [blame] | 152 | int udp_lib_get_port(struct sock *sk, unsigned short snum, |
David S. Miller | df2bc45 | 2007-06-05 15:18:43 -0700 | [diff] [blame] | 153 | int (*saddr_comp)(const struct sock *sk1, |
| 154 | const struct sock *sk2 ) ) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | { |
Pavel Emelyanov | 6ba5a3c | 2008-03-22 16:51:21 -0700 | [diff] [blame] | 156 | struct hlist_head *udptable = sk->sk_prot->h.udp_hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | struct hlist_node *node; |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 158 | struct hlist_head *head; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | struct sock *sk2; |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 160 | int error = 1; |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 161 | struct net *net = sock_net(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | |
| 163 | write_lock_bh(&udp_hash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 165 | if (!snum) { |
Anton Arapov | a25de53 | 2007-10-18 22:00:17 -0700 | [diff] [blame] | 166 | int i, low, high, remaining; |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 167 | unsigned rover, best, best_size_so_far; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
Stephen Hemminger | 227b60f | 2007-10-10 17:30:46 -0700 | [diff] [blame] | 169 | inet_get_local_port_range(&low, &high); |
Anton Arapov | a25de53 | 2007-10-18 22:00:17 -0700 | [diff] [blame] | 170 | remaining = (high - low) + 1; |
Stephen Hemminger | 227b60f | 2007-10-10 17:30:46 -0700 | [diff] [blame] | 171 | |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 172 | best_size_so_far = UINT_MAX; |
Anton Arapov | a25de53 | 2007-10-18 22:00:17 -0700 | [diff] [blame] | 173 | best = rover = net_random() % remaining + low; |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 174 | |
| 175 | /* 1st pass: look for empty (or shortest) hash chain */ |
| 176 | for (i = 0; i < UDP_HTABLE_SIZE; i++) { |
| 177 | int size = 0; |
| 178 | |
| 179 | head = &udptable[rover & (UDP_HTABLE_SIZE - 1)]; |
| 180 | if (hlist_empty(head)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | goto gotit; |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 182 | |
David S. Miller | 5c66870 | 2006-12-22 11:42:26 -0800 | [diff] [blame] | 183 | sk_for_each(sk2, node, head) { |
| 184 | if (++size >= best_size_so_far) |
| 185 | goto next; |
| 186 | } |
| 187 | best_size_so_far = size; |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 188 | best = rover; |
David S. Miller | 5c66870 | 2006-12-22 11:42:26 -0800 | [diff] [blame] | 189 | next: |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 190 | /* fold back if end of range */ |
| 191 | if (++rover > high) |
| 192 | rover = low + ((rover - low) |
| 193 | & (UDP_HTABLE_SIZE - 1)); |
| 194 | |
| 195 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | } |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 197 | |
| 198 | /* 2nd pass: find hole in shortest hash chain */ |
| 199 | rover = best; |
| 200 | for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++) { |
Pavel Emelyanov | fa4d3c6 | 2008-01-31 05:07:57 -0800 | [diff] [blame] | 201 | if (! __udp_lib_lport_inuse(net, rover, udptable)) |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 202 | goto gotit; |
| 203 | rover += UDP_HTABLE_SIZE; |
| 204 | if (rover > high) |
| 205 | rover = low + ((rover - low) |
| 206 | & (UDP_HTABLE_SIZE - 1)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | } |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 208 | |
| 209 | |
| 210 | /* All ports in use! */ |
| 211 | goto fail; |
| 212 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | gotit: |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 214 | snum = rover; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | } else { |
David S. Miller | df2bc45 | 2007-06-05 15:18:43 -0700 | [diff] [blame] | 216 | head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 218 | sk_for_each(sk2, node, head) |
David S. Miller | df2bc45 | 2007-06-05 15:18:43 -0700 | [diff] [blame] | 219 | if (sk2->sk_hash == snum && |
| 220 | sk2 != sk && |
YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 221 | net_eq(sock_net(sk2), net) && |
David S. Miller | df2bc45 | 2007-06-05 15:18:43 -0700 | [diff] [blame] | 222 | (!sk2->sk_reuse || !sk->sk_reuse) && |
| 223 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if |
| 224 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
| 225 | (*saddr_comp)(sk, sk2) ) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | goto fail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | } |
Stephen Hemminger | 32c1da7 | 2007-08-24 23:09:41 -0700 | [diff] [blame] | 228 | |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 229 | inet_sk(sk)->num = snum; |
David S. Miller | df2bc45 | 2007-06-05 15:18:43 -0700 | [diff] [blame] | 230 | sk->sk_hash = snum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | if (sk_unhashed(sk)) { |
David S. Miller | df2bc45 | 2007-06-05 15:18:43 -0700 | [diff] [blame] | 232 | head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 233 | sk_add_node(sk, head); |
Pavel Emelyanov | c29a0bc | 2008-03-31 19:41:46 -0700 | [diff] [blame] | 234 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | } |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 236 | error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | fail: |
| 238 | write_unlock_bh(&udp_hash_lock); |
Gerrit Renker | 25030a7 | 2006-08-26 20:06:05 -0700 | [diff] [blame] | 239 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | } |
| 241 | |
Pavel Emelyanov | 6ba5a3c | 2008-03-22 16:51:21 -0700 | [diff] [blame] | 242 | static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 243 | { |
| 244 | struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); |
| 245 | |
| 246 | return ( !ipv6_only_sock(sk2) && |
| 247 | (!inet1->rcv_saddr || !inet2->rcv_saddr || |
| 248 | inet1->rcv_saddr == inet2->rcv_saddr )); |
| 249 | } |
| 250 | |
Pavel Emelyanov | 6ba5a3c | 2008-03-22 16:51:21 -0700 | [diff] [blame] | 251 | int udp_v4_get_port(struct sock *sk, unsigned short snum) |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 252 | { |
Pavel Emelyanov | 6ba5a3c | 2008-03-22 16:51:21 -0700 | [diff] [blame] | 253 | return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 254 | } |
| 255 | |
| 256 | /* UDP is nearly always wildcards out the wazoo, it makes no sense to try |
| 257 | * harder than this. -DaveM |
| 258 | */ |
| 259 | static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, |
| 260 | __be16 sport, __be32 daddr, __be16 dport, |
| 261 | int dif, struct hlist_head udptable[]) |
| 262 | { |
| 263 | struct sock *sk, *result = NULL; |
| 264 | struct hlist_node *node; |
| 265 | unsigned short hnum = ntohs(dport); |
| 266 | int badness = -1; |
| 267 | |
| 268 | read_lock(&udp_hash_lock); |
| 269 | sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { |
| 270 | struct inet_sock *inet = inet_sk(sk); |
| 271 | |
YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 272 | if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 273 | !ipv6_only_sock(sk)) { |
| 274 | int score = (sk->sk_family == PF_INET ? 1 : 0); |
| 275 | if (inet->rcv_saddr) { |
| 276 | if (inet->rcv_saddr != daddr) |
| 277 | continue; |
| 278 | score+=2; |
| 279 | } |
| 280 | if (inet->daddr) { |
| 281 | if (inet->daddr != saddr) |
| 282 | continue; |
| 283 | score+=2; |
| 284 | } |
| 285 | if (inet->dport) { |
| 286 | if (inet->dport != sport) |
| 287 | continue; |
| 288 | score+=2; |
| 289 | } |
| 290 | if (sk->sk_bound_dev_if) { |
| 291 | if (sk->sk_bound_dev_if != dif) |
| 292 | continue; |
| 293 | score+=2; |
| 294 | } |
| 295 | if (score == 9) { |
| 296 | result = sk; |
| 297 | break; |
| 298 | } else if (score > badness) { |
| 299 | result = sk; |
| 300 | badness = score; |
| 301 | } |
| 302 | } |
| 303 | } |
| 304 | if (result) |
| 305 | sock_hold(result); |
| 306 | read_unlock(&udp_hash_lock); |
| 307 | return result; |
| 308 | } |
| 309 | |
| 310 | static inline struct sock *udp_v4_mcast_next(struct sock *sk, |
| 311 | __be16 loc_port, __be32 loc_addr, |
| 312 | __be16 rmt_port, __be32 rmt_addr, |
| 313 | int dif) |
| 314 | { |
| 315 | struct hlist_node *node; |
| 316 | struct sock *s = sk; |
| 317 | unsigned short hnum = ntohs(loc_port); |
| 318 | |
| 319 | sk_for_each_from(s, node) { |
| 320 | struct inet_sock *inet = inet_sk(s); |
| 321 | |
| 322 | if (s->sk_hash != hnum || |
| 323 | (inet->daddr && inet->daddr != rmt_addr) || |
| 324 | (inet->dport != rmt_port && inet->dport) || |
| 325 | (inet->rcv_saddr && inet->rcv_saddr != loc_addr) || |
| 326 | ipv6_only_sock(s) || |
| 327 | (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) |
| 328 | continue; |
| 329 | if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif)) |
| 330 | continue; |
| 331 | goto found; |
| 332 | } |
| 333 | s = NULL; |
| 334 | found: |
| 335 | return s; |
| 336 | } |
| 337 | |
| 338 | /* |
| 339 | * This routine is called by the ICMP module when it gets some |
| 340 | * sort of error condition. If err < 0 then the socket should |
| 341 | * be closed and the error returned to the user. If err > 0 |
| 342 | * it's just the icmp type << 8 | icmp code. |
| 343 | * Header points to the ip header of the error packet. We move |
| 344 | * on past this. Then (as it used to claim before adjustment) |
| 345 | * header points to the first 8 bytes of the udp header. We need |
| 346 | * to find the appropriate port. |
| 347 | */ |
| 348 | |
| 349 | void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[]) |
| 350 | { |
| 351 | struct inet_sock *inet; |
| 352 | struct iphdr *iph = (struct iphdr*)skb->data; |
| 353 | struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2)); |
| 354 | const int type = icmp_hdr(skb)->type; |
| 355 | const int code = icmp_hdr(skb)->code; |
| 356 | struct sock *sk; |
| 357 | int harderr; |
| 358 | int err; |
| 359 | |
YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 360 | sk = __udp4_lib_lookup(dev_net(skb->dev), iph->daddr, uh->dest, |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 361 | iph->saddr, uh->source, skb->dev->ifindex, udptable); |
| 362 | if (sk == NULL) { |
| 363 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); |
| 364 | return; /* No socket for error */ |
| 365 | } |
| 366 | |
| 367 | err = 0; |
| 368 | harderr = 0; |
| 369 | inet = inet_sk(sk); |
| 370 | |
| 371 | switch (type) { |
| 372 | default: |
| 373 | case ICMP_TIME_EXCEEDED: |
| 374 | err = EHOSTUNREACH; |
| 375 | break; |
| 376 | case ICMP_SOURCE_QUENCH: |
| 377 | goto out; |
| 378 | case ICMP_PARAMETERPROB: |
| 379 | err = EPROTO; |
| 380 | harderr = 1; |
| 381 | break; |
| 382 | case ICMP_DEST_UNREACH: |
| 383 | if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ |
| 384 | if (inet->pmtudisc != IP_PMTUDISC_DONT) { |
| 385 | err = EMSGSIZE; |
| 386 | harderr = 1; |
| 387 | break; |
| 388 | } |
| 389 | goto out; |
| 390 | } |
| 391 | err = EHOSTUNREACH; |
| 392 | if (code <= NR_ICMP_UNREACH) { |
| 393 | harderr = icmp_err_convert[code].fatal; |
| 394 | err = icmp_err_convert[code].errno; |
| 395 | } |
| 396 | break; |
| 397 | } |
| 398 | |
| 399 | /* |
| 400 | * RFC1122: OK. Passes ICMP errors back to application, as per |
| 401 | * 4.1.3.3. |
| 402 | */ |
| 403 | if (!inet->recverr) { |
| 404 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) |
| 405 | goto out; |
| 406 | } else { |
| 407 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1)); |
| 408 | } |
| 409 | sk->sk_err = err; |
| 410 | sk->sk_error_report(sk); |
| 411 | out: |
| 412 | sock_put(sk); |
| 413 | } |
| 414 | |
| 415 | void udp_err(struct sk_buff *skb, u32 info) |
| 416 | { |
| 417 | __udp4_lib_err(skb, info, udp_hash); |
| 418 | } |
| 419 | |
| 420 | /* |
| 421 | * Throw away all pending data and cancel the corking. Socket is locked. |
| 422 | */ |
| 423 | static void udp_flush_pending_frames(struct sock *sk) |
| 424 | { |
| 425 | struct udp_sock *up = udp_sk(sk); |
| 426 | |
| 427 | if (up->pending) { |
| 428 | up->len = 0; |
| 429 | up->pending = 0; |
| 430 | ip_flush_pending_frames(sk); |
| 431 | } |
| 432 | } |
| 433 | |
| 434 | /** |
| 435 | * udp4_hwcsum_outgoing - handle outgoing HW checksumming |
| 436 | * @sk: socket we are sending on |
| 437 | * @skb: sk_buff containing the filled-in UDP header |
| 438 | * (checksum field must be zeroed out) |
| 439 | */ |
| 440 | static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, |
| 441 | __be32 src, __be32 dst, int len ) |
| 442 | { |
| 443 | unsigned int offset; |
| 444 | struct udphdr *uh = udp_hdr(skb); |
| 445 | __wsum csum = 0; |
| 446 | |
| 447 | if (skb_queue_len(&sk->sk_write_queue) == 1) { |
| 448 | /* |
| 449 | * Only one fragment on the socket. |
| 450 | */ |
| 451 | skb->csum_start = skb_transport_header(skb) - skb->head; |
| 452 | skb->csum_offset = offsetof(struct udphdr, check); |
| 453 | uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); |
| 454 | } else { |
| 455 | /* |
| 456 | * HW-checksum won't work as there are two or more |
| 457 | * fragments on the socket so that all csums of sk_buffs |
| 458 | * should be together |
| 459 | */ |
| 460 | offset = skb_transport_offset(skb); |
| 461 | skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); |
| 462 | |
| 463 | skb->ip_summed = CHECKSUM_NONE; |
| 464 | |
| 465 | skb_queue_walk(&sk->sk_write_queue, skb) { |
| 466 | csum = csum_add(csum, skb->csum); |
| 467 | } |
| 468 | |
| 469 | uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); |
| 470 | if (uh->check == 0) |
| 471 | uh->check = CSUM_MANGLED_0; |
| 472 | } |
| 473 | } |
| 474 | |
| 475 | /* |
| 476 | * Push out all pending data as one UDP datagram. Socket is locked. |
| 477 | */ |
| 478 | static int udp_push_pending_frames(struct sock *sk) |
| 479 | { |
| 480 | struct udp_sock *up = udp_sk(sk); |
| 481 | struct inet_sock *inet = inet_sk(sk); |
| 482 | struct flowi *fl = &inet->cork.fl; |
| 483 | struct sk_buff *skb; |
| 484 | struct udphdr *uh; |
| 485 | int err = 0; |
| 486 | int is_udplite = IS_UDPLITE(sk); |
| 487 | __wsum csum = 0; |
| 488 | |
| 489 | /* Grab the skbuff where UDP header space exists. */ |
| 490 | if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) |
| 491 | goto out; |
| 492 | |
| 493 | /* |
| 494 | * Create a UDP header |
| 495 | */ |
| 496 | uh = udp_hdr(skb); |
| 497 | uh->source = fl->fl_ip_sport; |
| 498 | uh->dest = fl->fl_ip_dport; |
| 499 | uh->len = htons(up->len); |
| 500 | uh->check = 0; |
| 501 | |
| 502 | if (is_udplite) /* UDP-Lite */ |
| 503 | csum = udplite_csum_outgoing(sk, skb); |
| 504 | |
| 505 | else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ |
| 506 | |
| 507 | skb->ip_summed = CHECKSUM_NONE; |
| 508 | goto send; |
| 509 | |
| 510 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ |
| 511 | |
| 512 | udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len); |
| 513 | goto send; |
| 514 | |
| 515 | } else /* `normal' UDP */ |
| 516 | csum = udp_csum_outgoing(sk, skb); |
| 517 | |
| 518 | /* add protocol-dependent pseudo-header */ |
| 519 | uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len, |
| 520 | sk->sk_protocol, csum ); |
| 521 | if (uh->check == 0) |
| 522 | uh->check = CSUM_MANGLED_0; |
| 523 | |
| 524 | send: |
| 525 | err = ip_push_pending_frames(sk); |
| 526 | out: |
| 527 | up->len = 0; |
| 528 | up->pending = 0; |
| 529 | if (!err) |
| 530 | UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); |
| 531 | return err; |
| 532 | } |
| 533 | |
| 534 | int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
| 535 | size_t len) |
| 536 | { |
| 537 | struct inet_sock *inet = inet_sk(sk); |
| 538 | struct udp_sock *up = udp_sk(sk); |
| 539 | int ulen = len; |
| 540 | struct ipcm_cookie ipc; |
| 541 | struct rtable *rt = NULL; |
| 542 | int free = 0; |
| 543 | int connected = 0; |
| 544 | __be32 daddr, faddr, saddr; |
| 545 | __be16 dport; |
| 546 | u8 tos; |
| 547 | int err, is_udplite = IS_UDPLITE(sk); |
| 548 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; |
| 549 | int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); |
| 550 | |
| 551 | if (len > 0xFFFF) |
| 552 | return -EMSGSIZE; |
| 553 | |
| 554 | /* |
| 555 | * Check the flags. |
| 556 | */ |
| 557 | |
| 558 | if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */ |
| 559 | return -EOPNOTSUPP; |
| 560 | |
| 561 | ipc.opt = NULL; |
| 562 | |
| 563 | if (up->pending) { |
| 564 | /* |
| 565 | * There are pending frames. |
| 566 | * The socket lock must be held while it's corked. |
| 567 | */ |
| 568 | lock_sock(sk); |
| 569 | if (likely(up->pending)) { |
| 570 | if (unlikely(up->pending != AF_INET)) { |
| 571 | release_sock(sk); |
| 572 | return -EINVAL; |
| 573 | } |
| 574 | goto do_append_data; |
| 575 | } |
| 576 | release_sock(sk); |
| 577 | } |
| 578 | ulen += sizeof(struct udphdr); |
| 579 | |
| 580 | /* |
| 581 | * Get and verify the address. |
| 582 | */ |
| 583 | if (msg->msg_name) { |
| 584 | struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name; |
| 585 | if (msg->msg_namelen < sizeof(*usin)) |
| 586 | return -EINVAL; |
| 587 | if (usin->sin_family != AF_INET) { |
| 588 | if (usin->sin_family != AF_UNSPEC) |
| 589 | return -EAFNOSUPPORT; |
| 590 | } |
| 591 | |
| 592 | daddr = usin->sin_addr.s_addr; |
| 593 | dport = usin->sin_port; |
| 594 | if (dport == 0) |
| 595 | return -EINVAL; |
| 596 | } else { |
| 597 | if (sk->sk_state != TCP_ESTABLISHED) |
| 598 | return -EDESTADDRREQ; |
| 599 | daddr = inet->daddr; |
| 600 | dport = inet->dport; |
| 601 | /* Open fast path for connected socket. |
| 602 | Route will not be used, if at least one option is set. |
| 603 | */ |
| 604 | connected = 1; |
| 605 | } |
| 606 | ipc.addr = inet->saddr; |
| 607 | |
| 608 | ipc.oif = sk->sk_bound_dev_if; |
| 609 | if (msg->msg_controllen) { |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 610 | err = ip_cmsg_send(sock_net(sk), msg, &ipc); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 611 | if (err) |
| 612 | return err; |
| 613 | if (ipc.opt) |
| 614 | free = 1; |
| 615 | connected = 0; |
| 616 | } |
| 617 | if (!ipc.opt) |
| 618 | ipc.opt = inet->opt; |
| 619 | |
| 620 | saddr = ipc.addr; |
| 621 | ipc.addr = faddr = daddr; |
| 622 | |
| 623 | if (ipc.opt && ipc.opt->srr) { |
| 624 | if (!daddr) |
| 625 | return -EINVAL; |
| 626 | faddr = ipc.opt->faddr; |
| 627 | connected = 0; |
| 628 | } |
| 629 | tos = RT_TOS(inet->tos); |
| 630 | if (sock_flag(sk, SOCK_LOCALROUTE) || |
| 631 | (msg->msg_flags & MSG_DONTROUTE) || |
| 632 | (ipc.opt && ipc.opt->is_strictroute)) { |
| 633 | tos |= RTO_ONLINK; |
| 634 | connected = 0; |
| 635 | } |
| 636 | |
| 637 | if (ipv4_is_multicast(daddr)) { |
| 638 | if (!ipc.oif) |
| 639 | ipc.oif = inet->mc_index; |
| 640 | if (!saddr) |
| 641 | saddr = inet->mc_addr; |
| 642 | connected = 0; |
| 643 | } |
| 644 | |
| 645 | if (connected) |
| 646 | rt = (struct rtable*)sk_dst_check(sk, 0); |
| 647 | |
| 648 | if (rt == NULL) { |
| 649 | struct flowi fl = { .oif = ipc.oif, |
| 650 | .nl_u = { .ip4_u = |
| 651 | { .daddr = faddr, |
| 652 | .saddr = saddr, |
| 653 | .tos = tos } }, |
| 654 | .proto = sk->sk_protocol, |
| 655 | .uli_u = { .ports = |
| 656 | { .sport = inet->sport, |
| 657 | .dport = dport } } }; |
| 658 | security_sk_classify_flow(sk, &fl); |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 659 | err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 660 | if (err) { |
| 661 | if (err == -ENETUNREACH) |
| 662 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); |
| 663 | goto out; |
| 664 | } |
| 665 | |
| 666 | err = -EACCES; |
| 667 | if ((rt->rt_flags & RTCF_BROADCAST) && |
| 668 | !sock_flag(sk, SOCK_BROADCAST)) |
| 669 | goto out; |
| 670 | if (connected) |
| 671 | sk_dst_set(sk, dst_clone(&rt->u.dst)); |
| 672 | } |
| 673 | |
| 674 | if (msg->msg_flags&MSG_CONFIRM) |
| 675 | goto do_confirm; |
| 676 | back_from_confirm: |
| 677 | |
| 678 | saddr = rt->rt_src; |
| 679 | if (!ipc.addr) |
| 680 | daddr = ipc.addr = rt->rt_dst; |
| 681 | |
| 682 | lock_sock(sk); |
| 683 | if (unlikely(up->pending)) { |
| 684 | /* The socket is already corked while preparing it. */ |
| 685 | /* ... which is an evident application bug. --ANK */ |
| 686 | release_sock(sk); |
| 687 | |
| 688 | LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); |
| 689 | err = -EINVAL; |
| 690 | goto out; |
| 691 | } |
| 692 | /* |
| 693 | * Now cork the socket to pend data. |
| 694 | */ |
| 695 | inet->cork.fl.fl4_dst = daddr; |
| 696 | inet->cork.fl.fl_ip_dport = dport; |
| 697 | inet->cork.fl.fl4_src = saddr; |
| 698 | inet->cork.fl.fl_ip_sport = inet->sport; |
| 699 | up->pending = AF_INET; |
| 700 | |
| 701 | do_append_data: |
| 702 | up->len += ulen; |
| 703 | getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; |
| 704 | err = ip_append_data(sk, getfrag, msg->msg_iov, ulen, |
| 705 | sizeof(struct udphdr), &ipc, rt, |
| 706 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); |
| 707 | if (err) |
| 708 | udp_flush_pending_frames(sk); |
| 709 | else if (!corkreq) |
| 710 | err = udp_push_pending_frames(sk); |
| 711 | else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) |
| 712 | up->pending = 0; |
| 713 | release_sock(sk); |
| 714 | |
| 715 | out: |
| 716 | ip_rt_put(rt); |
| 717 | if (free) |
| 718 | kfree(ipc.opt); |
| 719 | if (!err) |
| 720 | return len; |
| 721 | /* |
| 722 | * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting |
| 723 | * ENOBUFS might not be good (it's not tunable per se), but otherwise |
| 724 | * we don't have a good statistic (IpOutDiscards but it can be too many |
| 725 | * things). We could add another new stat but at least for now that |
| 726 | * seems like overkill. |
| 727 | */ |
| 728 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { |
| 729 | UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); |
| 730 | } |
| 731 | return err; |
| 732 | |
| 733 | do_confirm: |
| 734 | dst_confirm(&rt->u.dst); |
| 735 | if (!(msg->msg_flags&MSG_PROBE) || len) |
| 736 | goto back_from_confirm; |
| 737 | err = 0; |
| 738 | goto out; |
| 739 | } |
| 740 | |
| 741 | int udp_sendpage(struct sock *sk, struct page *page, int offset, |
| 742 | size_t size, int flags) |
| 743 | { |
| 744 | struct udp_sock *up = udp_sk(sk); |
| 745 | int ret; |
| 746 | |
| 747 | if (!up->pending) { |
| 748 | struct msghdr msg = { .msg_flags = flags|MSG_MORE }; |
| 749 | |
| 750 | /* Call udp_sendmsg to specify destination address which |
| 751 | * sendpage interface can't pass. |
| 752 | * This will succeed only when the socket is connected. |
| 753 | */ |
| 754 | ret = udp_sendmsg(NULL, sk, &msg, 0); |
| 755 | if (ret < 0) |
| 756 | return ret; |
| 757 | } |
| 758 | |
| 759 | lock_sock(sk); |
| 760 | |
| 761 | if (unlikely(!up->pending)) { |
| 762 | release_sock(sk); |
| 763 | |
| 764 | LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n"); |
| 765 | return -EINVAL; |
| 766 | } |
| 767 | |
| 768 | ret = ip_append_page(sk, page, offset, size, flags); |
| 769 | if (ret == -EOPNOTSUPP) { |
| 770 | release_sock(sk); |
| 771 | return sock_no_sendpage(sk->sk_socket, page, offset, |
| 772 | size, flags); |
| 773 | } |
| 774 | if (ret < 0) { |
| 775 | udp_flush_pending_frames(sk); |
| 776 | goto out; |
| 777 | } |
| 778 | |
| 779 | up->len += size; |
| 780 | if (!(up->corkflag || (flags&MSG_MORE))) |
| 781 | ret = udp_push_pending_frames(sk); |
| 782 | if (!ret) |
| 783 | ret = size; |
| 784 | out: |
| 785 | release_sock(sk); |
| 786 | return ret; |
| 787 | } |
| 788 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | /* |
| 790 | * IOCTL requests applicable to the UDP protocol |
| 791 | */ |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 792 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 793 | int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) |
| 794 | { |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 795 | switch (cmd) { |
| 796 | case SIOCOUTQ: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 797 | { |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 798 | int amount = atomic_read(&sk->sk_wmem_alloc); |
| 799 | return put_user(amount, (int __user *)arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | } |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 801 | |
| 802 | case SIOCINQ: |
| 803 | { |
| 804 | struct sk_buff *skb; |
| 805 | unsigned long amount; |
| 806 | |
| 807 | amount = 0; |
| 808 | spin_lock_bh(&sk->sk_receive_queue.lock); |
| 809 | skb = skb_peek(&sk->sk_receive_queue); |
| 810 | if (skb != NULL) { |
| 811 | /* |
| 812 | * We will only return the amount |
| 813 | * of this packet since that is all |
| 814 | * that will be read. |
| 815 | */ |
| 816 | amount = skb->len - sizeof(struct udphdr); |
| 817 | } |
| 818 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
| 819 | return put_user(amount, (int __user *)arg); |
| 820 | } |
| 821 | |
| 822 | default: |
| 823 | return -ENOIOCTLCMD; |
| 824 | } |
| 825 | |
| 826 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | } |
| 828 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 829 | /* |
| 830 | * This should be easy, if there is something there we |
| 831 | * return it, otherwise we block. |
| 832 | */ |
| 833 | |
| 834 | int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
| 835 | size_t len, int noblock, int flags, int *addr_len) |
| 836 | { |
| 837 | struct inet_sock *inet = inet_sk(sk); |
| 838 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; |
| 839 | struct sk_buff *skb; |
| 840 | unsigned int ulen, copied; |
| 841 | int peeked; |
| 842 | int err; |
| 843 | int is_udplite = IS_UDPLITE(sk); |
| 844 | |
| 845 | /* |
| 846 | * Check any passed addresses |
| 847 | */ |
| 848 | if (addr_len) |
| 849 | *addr_len=sizeof(*sin); |
| 850 | |
| 851 | if (flags & MSG_ERRQUEUE) |
| 852 | return ip_recv_error(sk, msg, len); |
| 853 | |
| 854 | try_again: |
| 855 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), |
| 856 | &peeked, &err); |
| 857 | if (!skb) |
| 858 | goto out; |
| 859 | |
| 860 | ulen = skb->len - sizeof(struct udphdr); |
| 861 | copied = len; |
| 862 | if (copied > ulen) |
| 863 | copied = ulen; |
| 864 | else if (copied < ulen) |
| 865 | msg->msg_flags |= MSG_TRUNC; |
| 866 | |
| 867 | /* |
| 868 | * If checksum is needed at all, try to do it while copying the |
| 869 | * data. If the data is truncated, or if we only want a partial |
| 870 | * coverage checksum (UDP-Lite), do it before the copy. |
| 871 | */ |
| 872 | |
| 873 | if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { |
| 874 | if (udp_lib_checksum_complete(skb)) |
| 875 | goto csum_copy_err; |
| 876 | } |
| 877 | |
| 878 | if (skb_csum_unnecessary(skb)) |
| 879 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), |
| 880 | msg->msg_iov, copied ); |
| 881 | else { |
| 882 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); |
| 883 | |
| 884 | if (err == -EINVAL) |
| 885 | goto csum_copy_err; |
| 886 | } |
| 887 | |
| 888 | if (err) |
| 889 | goto out_free; |
| 890 | |
| 891 | if (!peeked) |
| 892 | UDP_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite); |
| 893 | |
| 894 | sock_recv_timestamp(msg, sk, skb); |
| 895 | |
| 896 | /* Copy the address. */ |
| 897 | if (sin) |
| 898 | { |
| 899 | sin->sin_family = AF_INET; |
| 900 | sin->sin_port = udp_hdr(skb)->source; |
| 901 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; |
| 902 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); |
| 903 | } |
| 904 | if (inet->cmsg_flags) |
| 905 | ip_cmsg_recv(msg, skb); |
| 906 | |
| 907 | err = copied; |
| 908 | if (flags & MSG_TRUNC) |
| 909 | err = ulen; |
| 910 | |
| 911 | out_free: |
| 912 | lock_sock(sk); |
| 913 | skb_free_datagram(sk, skb); |
| 914 | release_sock(sk); |
| 915 | out: |
| 916 | return err; |
| 917 | |
| 918 | csum_copy_err: |
| 919 | lock_sock(sk); |
| 920 | if (!skb_kill_datagram(sk, skb, flags)) |
| 921 | UDP_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); |
| 922 | release_sock(sk); |
| 923 | |
| 924 | if (noblock) |
| 925 | return -EAGAIN; |
| 926 | goto try_again; |
| 927 | } |
| 928 | |
| 929 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | int udp_disconnect(struct sock *sk, int flags) |
| 931 | { |
| 932 | struct inet_sock *inet = inet_sk(sk); |
| 933 | /* |
| 934 | * 1003.1g - break association. |
| 935 | */ |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 936 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 937 | sk->sk_state = TCP_CLOSE; |
| 938 | inet->daddr = 0; |
| 939 | inet->dport = 0; |
| 940 | sk->sk_bound_dev_if = 0; |
| 941 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
| 942 | inet_reset_saddr(sk); |
| 943 | |
| 944 | if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { |
| 945 | sk->sk_prot->unhash(sk); |
| 946 | inet->sport = 0; |
| 947 | } |
| 948 | sk_dst_reset(sk); |
| 949 | return 0; |
| 950 | } |
| 951 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 952 | /* returns: |
| 953 | * -1: error |
| 954 | * 0: success |
| 955 | * >0: "udp encap" protocol resubmission |
| 956 | * |
| 957 | * Note that in the success and error cases, the skb is assumed to |
| 958 | * have either been requeued or freed. |
| 959 | */ |
| 960 | int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) |
| 961 | { |
| 962 | struct udp_sock *up = udp_sk(sk); |
| 963 | int rc; |
| 964 | int is_udplite = IS_UDPLITE(sk); |
| 965 | |
| 966 | /* |
| 967 | * Charge it to the socket, dropping if the queue is full. |
| 968 | */ |
| 969 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) |
| 970 | goto drop; |
| 971 | nf_reset(skb); |
| 972 | |
| 973 | if (up->encap_type) { |
| 974 | /* |
| 975 | * This is an encapsulation socket so pass the skb to |
| 976 | * the socket's udp_encap_rcv() hook. Otherwise, just |
| 977 | * fall through and pass this up the UDP socket. |
| 978 | * up->encap_rcv() returns the following value: |
| 979 | * =0 if skb was successfully passed to the encap |
| 980 | * handler or was discarded by it. |
| 981 | * >0 if skb should be passed on to UDP. |
| 982 | * <0 if skb should be resubmitted as proto -N |
| 983 | */ |
| 984 | |
| 985 | /* if we're overly short, let UDP handle it */ |
| 986 | if (skb->len > sizeof(struct udphdr) && |
| 987 | up->encap_rcv != NULL) { |
| 988 | int ret; |
| 989 | |
| 990 | ret = (*up->encap_rcv)(sk, skb); |
| 991 | if (ret <= 0) { |
| 992 | UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, |
| 993 | is_udplite); |
| 994 | return -ret; |
| 995 | } |
| 996 | } |
| 997 | |
| 998 | /* FALLTHROUGH -- it's a UDP Packet */ |
| 999 | } |
| 1000 | |
| 1001 | /* |
| 1002 | * UDP-Lite specific tests, ignored on UDP sockets |
| 1003 | */ |
| 1004 | if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { |
| 1005 | |
| 1006 | /* |
| 1007 | * MIB statistics other than incrementing the error count are |
| 1008 | * disabled for the following two types of errors: these depend |
| 1009 | * on the application settings, not on the functioning of the |
| 1010 | * protocol stack as such. |
| 1011 | * |
| 1012 | * RFC 3828 here recommends (sec 3.3): "There should also be a |
| 1013 | * way ... to ... at least let the receiving application block |
| 1014 | * delivery of packets with coverage values less than a value |
| 1015 | * provided by the application." |
| 1016 | */ |
| 1017 | if (up->pcrlen == 0) { /* full coverage was set */ |
| 1018 | LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage " |
| 1019 | "%d while full coverage %d requested\n", |
| 1020 | UDP_SKB_CB(skb)->cscov, skb->len); |
| 1021 | goto drop; |
| 1022 | } |
| 1023 | /* The next case involves violating the min. coverage requested |
| 1024 | * by the receiver. This is subtle: if receiver wants x and x is |
| 1025 | * greater than the buffersize/MTU then receiver will complain |
| 1026 | * that it wants x while sender emits packets of smaller size y. |
| 1027 | * Therefore the above ...()->partial_cov statement is essential. |
| 1028 | */ |
| 1029 | if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { |
| 1030 | LIMIT_NETDEBUG(KERN_WARNING |
| 1031 | "UDPLITE: coverage %d too small, need min %d\n", |
| 1032 | UDP_SKB_CB(skb)->cscov, up->pcrlen); |
| 1033 | goto drop; |
| 1034 | } |
| 1035 | } |
| 1036 | |
| 1037 | if (sk->sk_filter) { |
| 1038 | if (udp_lib_checksum_complete(skb)) |
| 1039 | goto drop; |
| 1040 | } |
| 1041 | |
| 1042 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { |
| 1043 | /* Note that an ENOMEM error is charged twice */ |
| 1044 | if (rc == -ENOMEM) |
| 1045 | UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite); |
| 1046 | goto drop; |
| 1047 | } |
| 1048 | |
| 1049 | return 0; |
| 1050 | |
| 1051 | drop: |
| 1052 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); |
| 1053 | kfree_skb(skb); |
| 1054 | return -1; |
| 1055 | } |
| 1056 | |
| 1057 | /* |
| 1058 | * Multicasts and broadcasts go to each listener. |
| 1059 | * |
| 1060 | * Note: called only from the BH handler context, |
| 1061 | * so we don't need to lock the hashes. |
| 1062 | */ |
| 1063 | static int __udp4_lib_mcast_deliver(struct sk_buff *skb, |
| 1064 | struct udphdr *uh, |
| 1065 | __be32 saddr, __be32 daddr, |
| 1066 | struct hlist_head udptable[]) |
| 1067 | { |
| 1068 | struct sock *sk; |
| 1069 | int dif; |
| 1070 | |
| 1071 | read_lock(&udp_hash_lock); |
| 1072 | sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); |
| 1073 | dif = skb->dev->ifindex; |
| 1074 | sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); |
| 1075 | if (sk) { |
| 1076 | struct sock *sknext = NULL; |
| 1077 | |
| 1078 | do { |
| 1079 | struct sk_buff *skb1 = skb; |
| 1080 | |
| 1081 | sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr, |
| 1082 | uh->source, saddr, dif); |
| 1083 | if (sknext) |
| 1084 | skb1 = skb_clone(skb, GFP_ATOMIC); |
| 1085 | |
| 1086 | if (skb1) { |
| 1087 | int ret = 0; |
| 1088 | |
| 1089 | bh_lock_sock_nested(sk); |
| 1090 | if (!sock_owned_by_user(sk)) |
| 1091 | ret = udp_queue_rcv_skb(sk, skb1); |
| 1092 | else |
| 1093 | sk_add_backlog(sk, skb1); |
| 1094 | bh_unlock_sock(sk); |
| 1095 | |
| 1096 | if (ret > 0) |
| 1097 | /* we should probably re-process instead |
| 1098 | * of dropping packets here. */ |
| 1099 | kfree_skb(skb1); |
| 1100 | } |
| 1101 | sk = sknext; |
| 1102 | } while (sknext); |
| 1103 | } else |
| 1104 | kfree_skb(skb); |
| 1105 | read_unlock(&udp_hash_lock); |
| 1106 | return 0; |
| 1107 | } |
| 1108 | |
| 1109 | /* Initialize UDP checksum. If exited with zero value (success), |
| 1110 | * CHECKSUM_UNNECESSARY means, that no more checks are required. |
| 1111 | * Otherwise, csum completion requires chacksumming packet body, |
| 1112 | * including udp header and folding it to skb->csum. |
| 1113 | */ |
| 1114 | static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, |
| 1115 | int proto) |
| 1116 | { |
| 1117 | const struct iphdr *iph; |
| 1118 | int err; |
| 1119 | |
| 1120 | UDP_SKB_CB(skb)->partial_cov = 0; |
| 1121 | UDP_SKB_CB(skb)->cscov = skb->len; |
| 1122 | |
| 1123 | if (proto == IPPROTO_UDPLITE) { |
| 1124 | err = udplite_checksum_init(skb, uh); |
| 1125 | if (err) |
| 1126 | return err; |
| 1127 | } |
| 1128 | |
| 1129 | iph = ip_hdr(skb); |
| 1130 | if (uh->check == 0) { |
| 1131 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 1132 | } else if (skb->ip_summed == CHECKSUM_COMPLETE) { |
| 1133 | if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, |
| 1134 | proto, skb->csum)) |
| 1135 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 1136 | } |
| 1137 | if (!skb_csum_unnecessary(skb)) |
| 1138 | skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, |
| 1139 | skb->len, proto, 0); |
| 1140 | /* Probably, we should checksum udp header (it should be in cache |
| 1141 | * in any case) and data in tiny packets (< rx copybreak). |
| 1142 | */ |
| 1143 | |
| 1144 | return 0; |
| 1145 | } |
| 1146 | |
| 1147 | /* |
| 1148 | * All we need to do is get the socket, and then do a checksum. |
| 1149 | */ |
| 1150 | |
| 1151 | int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], |
| 1152 | int proto) |
| 1153 | { |
| 1154 | struct sock *sk; |
| 1155 | struct udphdr *uh = udp_hdr(skb); |
| 1156 | unsigned short ulen; |
| 1157 | struct rtable *rt = (struct rtable*)skb->dst; |
| 1158 | __be32 saddr = ip_hdr(skb)->saddr; |
| 1159 | __be32 daddr = ip_hdr(skb)->daddr; |
| 1160 | |
| 1161 | /* |
| 1162 | * Validate the packet. |
| 1163 | */ |
| 1164 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
| 1165 | goto drop; /* No space for header. */ |
| 1166 | |
| 1167 | ulen = ntohs(uh->len); |
| 1168 | if (ulen > skb->len) |
| 1169 | goto short_packet; |
| 1170 | |
| 1171 | if (proto == IPPROTO_UDP) { |
| 1172 | /* UDP validates ulen. */ |
| 1173 | if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) |
| 1174 | goto short_packet; |
| 1175 | uh = udp_hdr(skb); |
| 1176 | } |
| 1177 | |
| 1178 | if (udp4_csum_init(skb, uh, proto)) |
| 1179 | goto csum_error; |
| 1180 | |
| 1181 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) |
| 1182 | return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); |
| 1183 | |
YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1184 | sk = __udp4_lib_lookup(dev_net(skb->dev), saddr, uh->source, daddr, |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1185 | uh->dest, inet_iif(skb), udptable); |
| 1186 | |
| 1187 | if (sk != NULL) { |
| 1188 | int ret = 0; |
| 1189 | bh_lock_sock_nested(sk); |
| 1190 | if (!sock_owned_by_user(sk)) |
| 1191 | ret = udp_queue_rcv_skb(sk, skb); |
| 1192 | else |
| 1193 | sk_add_backlog(sk, skb); |
| 1194 | bh_unlock_sock(sk); |
| 1195 | sock_put(sk); |
| 1196 | |
| 1197 | /* a return value > 0 means to resubmit the input, but |
| 1198 | * it wants the return to be -protocol, or 0 |
| 1199 | */ |
| 1200 | if (ret > 0) |
| 1201 | return -ret; |
| 1202 | return 0; |
| 1203 | } |
| 1204 | |
| 1205 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
| 1206 | goto drop; |
| 1207 | nf_reset(skb); |
| 1208 | |
| 1209 | /* No socket. Drop packet silently, if checksum is wrong */ |
| 1210 | if (udp_lib_checksum_complete(skb)) |
| 1211 | goto csum_error; |
| 1212 | |
| 1213 | UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); |
| 1214 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); |
| 1215 | |
| 1216 | /* |
| 1217 | * Hmm. We got an UDP packet to a port to which we |
| 1218 | * don't wanna listen. Ignore it. |
| 1219 | */ |
| 1220 | kfree_skb(skb); |
| 1221 | return 0; |
| 1222 | |
| 1223 | short_packet: |
YOSHIFUJI Hideaki | a7d632b | 2008-04-14 04:09:00 -0700 | [diff] [blame] | 1224 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From " NIPQUAD_FMT ":%u %d/%d to " NIPQUAD_FMT ":%u\n", |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1225 | proto == IPPROTO_UDPLITE ? "-Lite" : "", |
| 1226 | NIPQUAD(saddr), |
| 1227 | ntohs(uh->source), |
| 1228 | ulen, |
| 1229 | skb->len, |
| 1230 | NIPQUAD(daddr), |
| 1231 | ntohs(uh->dest)); |
| 1232 | goto drop; |
| 1233 | |
| 1234 | csum_error: |
| 1235 | /* |
| 1236 | * RFC1122: OK. Discards the bad packet silently (as far as |
| 1237 | * the network is concerned, anyway) as per 4.1.3.4 (MUST). |
| 1238 | */ |
YOSHIFUJI Hideaki | a7d632b | 2008-04-14 04:09:00 -0700 | [diff] [blame] | 1239 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From " NIPQUAD_FMT ":%u to " NIPQUAD_FMT ":%u ulen %d\n", |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1240 | proto == IPPROTO_UDPLITE ? "-Lite" : "", |
| 1241 | NIPQUAD(saddr), |
| 1242 | ntohs(uh->source), |
| 1243 | NIPQUAD(daddr), |
| 1244 | ntohs(uh->dest), |
| 1245 | ulen); |
| 1246 | drop: |
| 1247 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); |
| 1248 | kfree_skb(skb); |
| 1249 | return 0; |
| 1250 | } |
| 1251 | |
| 1252 | int udp_rcv(struct sk_buff *skb) |
| 1253 | { |
| 1254 | return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP); |
| 1255 | } |
| 1256 | |
| 1257 | int udp_destroy_sock(struct sock *sk) |
| 1258 | { |
| 1259 | lock_sock(sk); |
| 1260 | udp_flush_pending_frames(sk); |
| 1261 | release_sock(sk); |
| 1262 | return 0; |
| 1263 | } |
| 1264 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1265 | /* |
| 1266 | * Socket option code for UDP |
| 1267 | */ |
Gerrit Renker | 4c0a6cb | 2006-11-27 09:29:59 -0800 | [diff] [blame] | 1268 | int udp_lib_setsockopt(struct sock *sk, int level, int optname, |
| 1269 | char __user *optval, int optlen, |
| 1270 | int (*push_pending_frames)(struct sock *)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1271 | { |
| 1272 | struct udp_sock *up = udp_sk(sk); |
| 1273 | int val; |
| 1274 | int err = 0; |
Wang Chen | b2bf1e2 | 2007-12-03 22:34:16 +1100 | [diff] [blame] | 1275 | int is_udplite = IS_UDPLITE(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1277 | if (optlen<sizeof(int)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 | return -EINVAL; |
| 1279 | |
| 1280 | if (get_user(val, (int __user *)optval)) |
| 1281 | return -EFAULT; |
| 1282 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1283 | switch (optname) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1284 | case UDP_CORK: |
| 1285 | if (val != 0) { |
| 1286 | up->corkflag = 1; |
| 1287 | } else { |
| 1288 | up->corkflag = 0; |
| 1289 | lock_sock(sk); |
Gerrit Renker | 4c0a6cb | 2006-11-27 09:29:59 -0800 | [diff] [blame] | 1290 | (*push_pending_frames)(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1291 | release_sock(sk); |
| 1292 | } |
| 1293 | break; |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 1294 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | case UDP_ENCAP: |
| 1296 | switch (val) { |
| 1297 | case 0: |
| 1298 | case UDP_ENCAP_ESPINUDP: |
| 1299 | case UDP_ENCAP_ESPINUDP_NON_IKE: |
James Chapman | 067b207 | 2007-07-05 17:08:05 -0700 | [diff] [blame] | 1300 | up->encap_rcv = xfrm4_udp_encap_rcv; |
| 1301 | /* FALLTHROUGH */ |
James Chapman | 342f023 | 2007-06-27 15:37:46 -0700 | [diff] [blame] | 1302 | case UDP_ENCAP_L2TPINUDP: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | up->encap_type = val; |
| 1304 | break; |
| 1305 | default: |
| 1306 | err = -ENOPROTOOPT; |
| 1307 | break; |
| 1308 | } |
| 1309 | break; |
| 1310 | |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 1311 | /* |
| 1312 | * UDP-Lite's partial checksum coverage (RFC 3828). |
| 1313 | */ |
| 1314 | /* The sender sets actual checksum coverage length via this option. |
| 1315 | * The case coverage > packet length is handled by send module. */ |
| 1316 | case UDPLITE_SEND_CSCOV: |
Wang Chen | b2bf1e2 | 2007-12-03 22:34:16 +1100 | [diff] [blame] | 1317 | if (!is_udplite) /* Disable the option on UDP sockets */ |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 1318 | return -ENOPROTOOPT; |
| 1319 | if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ |
| 1320 | val = 8; |
| 1321 | up->pcslen = val; |
| 1322 | up->pcflag |= UDPLITE_SEND_CC; |
| 1323 | break; |
| 1324 | |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 1325 | /* The receiver specifies a minimum checksum coverage value. To make |
| 1326 | * sense, this should be set to at least 8 (as done below). If zero is |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 1327 | * used, this again means full checksum coverage. */ |
| 1328 | case UDPLITE_RECV_CSCOV: |
Wang Chen | b2bf1e2 | 2007-12-03 22:34:16 +1100 | [diff] [blame] | 1329 | if (!is_udplite) /* Disable the option on UDP sockets */ |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 1330 | return -ENOPROTOOPT; |
| 1331 | if (val != 0 && val < 8) /* Avoid silly minimal values. */ |
| 1332 | val = 8; |
| 1333 | up->pcrlen = val; |
| 1334 | up->pcflag |= UDPLITE_RECV_CC; |
| 1335 | break; |
| 1336 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 | default: |
| 1338 | err = -ENOPROTOOPT; |
| 1339 | break; |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1340 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1341 | |
| 1342 | return err; |
| 1343 | } |
| 1344 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1345 | int udp_setsockopt(struct sock *sk, int level, int optname, |
| 1346 | char __user *optval, int optlen) |
| 1347 | { |
| 1348 | if (level == SOL_UDP || level == SOL_UDPLITE) |
| 1349 | return udp_lib_setsockopt(sk, level, optname, optval, optlen, |
| 1350 | udp_push_pending_frames); |
| 1351 | return ip_setsockopt(sk, level, optname, optval, optlen); |
| 1352 | } |
| 1353 | |
| 1354 | #ifdef CONFIG_COMPAT |
| 1355 | int compat_udp_setsockopt(struct sock *sk, int level, int optname, |
| 1356 | char __user *optval, int optlen) |
| 1357 | { |
| 1358 | if (level == SOL_UDP || level == SOL_UDPLITE) |
| 1359 | return udp_lib_setsockopt(sk, level, optname, optval, optlen, |
| 1360 | udp_push_pending_frames); |
| 1361 | return compat_ip_setsockopt(sk, level, optname, optval, optlen); |
| 1362 | } |
| 1363 | #endif |
| 1364 | |
Gerrit Renker | 4c0a6cb | 2006-11-27 09:29:59 -0800 | [diff] [blame] | 1365 | int udp_lib_getsockopt(struct sock *sk, int level, int optname, |
| 1366 | char __user *optval, int __user *optlen) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1367 | { |
| 1368 | struct udp_sock *up = udp_sk(sk); |
| 1369 | int val, len; |
| 1370 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1371 | if (get_user(len,optlen)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1372 | return -EFAULT; |
| 1373 | |
| 1374 | len = min_t(unsigned int, len, sizeof(int)); |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 1375 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1376 | if (len < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | return -EINVAL; |
| 1378 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1379 | switch (optname) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1380 | case UDP_CORK: |
| 1381 | val = up->corkflag; |
| 1382 | break; |
| 1383 | |
| 1384 | case UDP_ENCAP: |
| 1385 | val = up->encap_type; |
| 1386 | break; |
| 1387 | |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 1388 | /* The following two cannot be changed on UDP sockets, the return is |
| 1389 | * always 0 (which corresponds to the full checksum coverage of UDP). */ |
| 1390 | case UDPLITE_SEND_CSCOV: |
| 1391 | val = up->pcslen; |
| 1392 | break; |
| 1393 | |
| 1394 | case UDPLITE_RECV_CSCOV: |
| 1395 | val = up->pcrlen; |
| 1396 | break; |
| 1397 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1398 | default: |
| 1399 | return -ENOPROTOOPT; |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1400 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 | |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1402 | if (put_user(len, optlen)) |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 1403 | return -EFAULT; |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1404 | if (copy_to_user(optval, &val,len)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1405 | return -EFAULT; |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 1406 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1407 | } |
| 1408 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1409 | int udp_getsockopt(struct sock *sk, int level, int optname, |
| 1410 | char __user *optval, int __user *optlen) |
| 1411 | { |
| 1412 | if (level == SOL_UDP || level == SOL_UDPLITE) |
| 1413 | return udp_lib_getsockopt(sk, level, optname, optval, optlen); |
| 1414 | return ip_getsockopt(sk, level, optname, optval, optlen); |
| 1415 | } |
| 1416 | |
| 1417 | #ifdef CONFIG_COMPAT |
| 1418 | int compat_udp_getsockopt(struct sock *sk, int level, int optname, |
| 1419 | char __user *optval, int __user *optlen) |
| 1420 | { |
| 1421 | if (level == SOL_UDP || level == SOL_UDPLITE) |
| 1422 | return udp_lib_getsockopt(sk, level, optname, optval, optlen); |
| 1423 | return compat_ip_getsockopt(sk, level, optname, optval, optlen); |
| 1424 | } |
| 1425 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1426 | /** |
| 1427 | * udp_poll - wait for a UDP event. |
| 1428 | * @file - file struct |
| 1429 | * @sock - socket |
| 1430 | * @wait - poll table |
| 1431 | * |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 1432 | * This is same as datagram poll, except for the special case of |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1433 | * blocking sockets. If application is using a blocking fd |
| 1434 | * and a packet with checksum error is in the queue; |
| 1435 | * then it could get return from select indicating data available |
| 1436 | * but then block when reading it. Add special case code |
| 1437 | * to work around these arguably broken applications. |
| 1438 | */ |
| 1439 | unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) |
| 1440 | { |
| 1441 | unsigned int mask = datagram_poll(file, sock, wait); |
| 1442 | struct sock *sk = sock->sk; |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 1443 | int is_lite = IS_UDPLITE(sk); |
| 1444 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1445 | /* Check for false positives due to checksum errors */ |
| 1446 | if ( (mask & POLLRDNORM) && |
| 1447 | !(file->f_flags & O_NONBLOCK) && |
| 1448 | !(sk->sk_shutdown & RCV_SHUTDOWN)){ |
| 1449 | struct sk_buff_head *rcvq = &sk->sk_receive_queue; |
| 1450 | struct sk_buff *skb; |
| 1451 | |
Herbert Xu | 208d898 | 2005-05-30 15:50:15 -0700 | [diff] [blame] | 1452 | spin_lock_bh(&rcvq->lock); |
Herbert Xu | 759e5d0 | 2007-03-25 20:10:56 -0700 | [diff] [blame] | 1453 | while ((skb = skb_peek(rcvq)) != NULL && |
| 1454 | udp_lib_checksum_complete(skb)) { |
| 1455 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite); |
| 1456 | __skb_unlink(skb, rcvq); |
| 1457 | kfree_skb(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1458 | } |
Herbert Xu | 208d898 | 2005-05-30 15:50:15 -0700 | [diff] [blame] | 1459 | spin_unlock_bh(&rcvq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1460 | |
| 1461 | /* nothing to see, move along */ |
| 1462 | if (skb == NULL) |
| 1463 | mask &= ~(POLLIN | POLLRDNORM); |
| 1464 | } |
| 1465 | |
| 1466 | return mask; |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 1467 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | } |
| 1469 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1470 | struct proto udp_prot = { |
| 1471 | .name = "UDP", |
| 1472 | .owner = THIS_MODULE, |
| 1473 | .close = udp_lib_close, |
| 1474 | .connect = ip4_datagram_connect, |
| 1475 | .disconnect = udp_disconnect, |
| 1476 | .ioctl = udp_ioctl, |
| 1477 | .destroy = udp_destroy_sock, |
| 1478 | .setsockopt = udp_setsockopt, |
| 1479 | .getsockopt = udp_getsockopt, |
| 1480 | .sendmsg = udp_sendmsg, |
| 1481 | .recvmsg = udp_recvmsg, |
| 1482 | .sendpage = udp_sendpage, |
| 1483 | .backlog_rcv = udp_queue_rcv_skb, |
| 1484 | .hash = udp_lib_hash, |
| 1485 | .unhash = udp_lib_unhash, |
| 1486 | .get_port = udp_v4_get_port, |
| 1487 | .memory_allocated = &udp_memory_allocated, |
| 1488 | .sysctl_mem = sysctl_udp_mem, |
| 1489 | .sysctl_wmem = &sysctl_udp_wmem_min, |
| 1490 | .sysctl_rmem = &sysctl_udp_rmem_min, |
| 1491 | .obj_size = sizeof(struct udp_sock), |
Pavel Emelyanov | 6ba5a3c | 2008-03-22 16:51:21 -0700 | [diff] [blame] | 1492 | .h.udp_hash = udp_hash, |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1493 | #ifdef CONFIG_COMPAT |
| 1494 | .compat_setsockopt = compat_udp_setsockopt, |
| 1495 | .compat_getsockopt = compat_udp_getsockopt, |
| 1496 | #endif |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1497 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1498 | |
| 1499 | /* ------------------------------------------------------------------------ */ |
| 1500 | #ifdef CONFIG_PROC_FS |
| 1501 | |
| 1502 | static struct sock *udp_get_first(struct seq_file *seq) |
| 1503 | { |
| 1504 | struct sock *sk; |
| 1505 | struct udp_iter_state *state = seq->private; |
Denis V. Lunev | 6f191ef | 2008-03-28 18:23:33 -0700 | [diff] [blame] | 1506 | struct net *net = seq_file_net(seq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1507 | |
| 1508 | for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { |
| 1509 | struct hlist_node *node; |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 1510 | sk_for_each(sk, node, state->hashtable + state->bucket) { |
YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 1511 | if (!net_eq(sock_net(sk), net)) |
Daniel Lezcano | a91275e | 2008-03-21 04:11:58 -0700 | [diff] [blame] | 1512 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 | if (sk->sk_family == state->family) |
| 1514 | goto found; |
| 1515 | } |
| 1516 | } |
| 1517 | sk = NULL; |
| 1518 | found: |
| 1519 | return sk; |
| 1520 | } |
| 1521 | |
| 1522 | static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) |
| 1523 | { |
| 1524 | struct udp_iter_state *state = seq->private; |
Denis V. Lunev | 6f191ef | 2008-03-28 18:23:33 -0700 | [diff] [blame] | 1525 | struct net *net = seq_file_net(seq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1526 | |
| 1527 | do { |
| 1528 | sk = sk_next(sk); |
| 1529 | try_again: |
| 1530 | ; |
YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 1531 | } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1532 | |
| 1533 | if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 1534 | sk = sk_head(state->hashtable + state->bucket); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 | goto try_again; |
| 1536 | } |
| 1537 | return sk; |
| 1538 | } |
| 1539 | |
| 1540 | static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) |
| 1541 | { |
| 1542 | struct sock *sk = udp_get_first(seq); |
| 1543 | |
| 1544 | if (sk) |
Stephen Hemminger | 6516c65 | 2007-03-08 20:41:55 -0800 | [diff] [blame] | 1545 | while (pos && (sk = udp_get_next(seq, sk)) != NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1546 | --pos; |
| 1547 | return pos ? NULL : sk; |
| 1548 | } |
| 1549 | |
| 1550 | static void *udp_seq_start(struct seq_file *seq, loff_t *pos) |
Eric Dumazet | 9a429c4 | 2008-01-01 21:58:02 -0800 | [diff] [blame] | 1551 | __acquires(udp_hash_lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 | { |
| 1553 | read_lock(&udp_hash_lock); |
YOSHIFUJI Hideaki | b50660f | 2008-03-31 19:38:15 -0700 | [diff] [blame] | 1554 | return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1555 | } |
| 1556 | |
| 1557 | static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 1558 | { |
| 1559 | struct sock *sk; |
| 1560 | |
YOSHIFUJI Hideaki | b50660f | 2008-03-31 19:38:15 -0700 | [diff] [blame] | 1561 | if (v == SEQ_START_TOKEN) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1562 | sk = udp_get_idx(seq, 0); |
| 1563 | else |
| 1564 | sk = udp_get_next(seq, v); |
| 1565 | |
| 1566 | ++*pos; |
| 1567 | return sk; |
| 1568 | } |
| 1569 | |
| 1570 | static void udp_seq_stop(struct seq_file *seq, void *v) |
Eric Dumazet | 9a429c4 | 2008-01-01 21:58:02 -0800 | [diff] [blame] | 1571 | __releases(udp_hash_lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1572 | { |
| 1573 | read_unlock(&udp_hash_lock); |
| 1574 | } |
| 1575 | |
| 1576 | static int udp_seq_open(struct inode *inode, struct file *file) |
| 1577 | { |
| 1578 | struct udp_seq_afinfo *afinfo = PDE(inode)->data; |
Denis V. Lunev | a2be75c | 2008-03-28 18:25:06 -0700 | [diff] [blame] | 1579 | struct udp_iter_state *s; |
| 1580 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1581 | |
Denis V. Lunev | a2be75c | 2008-03-28 18:25:06 -0700 | [diff] [blame] | 1582 | err = seq_open_net(inode, file, &afinfo->seq_ops, |
| 1583 | sizeof(struct udp_iter_state)); |
| 1584 | if (err < 0) |
| 1585 | return err; |
Daniel Lezcano | a91275e | 2008-03-21 04:11:58 -0700 | [diff] [blame] | 1586 | |
Denis V. Lunev | a2be75c | 2008-03-28 18:25:06 -0700 | [diff] [blame] | 1587 | s = ((struct seq_file *)file->private_data)->private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1588 | s->family = afinfo->family; |
Gerrit Renker | ba4e58e | 2006-11-27 11:10:57 -0800 | [diff] [blame] | 1589 | s->hashtable = afinfo->hashtable; |
Denis V. Lunev | a2be75c | 2008-03-28 18:25:06 -0700 | [diff] [blame] | 1590 | return err; |
Daniel Lezcano | a91275e | 2008-03-21 04:11:58 -0700 | [diff] [blame] | 1591 | } |
| 1592 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1593 | /* ------------------------------------------------------------------------ */ |
Daniel Lezcano | 0c96d8c | 2008-03-21 04:14:17 -0700 | [diff] [blame] | 1594 | int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | { |
| 1596 | struct proc_dir_entry *p; |
| 1597 | int rc = 0; |
| 1598 | |
Denis V. Lunev | 3ba9441 | 2008-03-28 18:25:32 -0700 | [diff] [blame] | 1599 | afinfo->seq_fops.open = udp_seq_open; |
| 1600 | afinfo->seq_fops.read = seq_read; |
| 1601 | afinfo->seq_fops.llseek = seq_lseek; |
| 1602 | afinfo->seq_fops.release = seq_release_net; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1603 | |
Denis V. Lunev | dda6192 | 2008-03-28 18:24:26 -0700 | [diff] [blame] | 1604 | afinfo->seq_ops.start = udp_seq_start; |
| 1605 | afinfo->seq_ops.next = udp_seq_next; |
| 1606 | afinfo->seq_ops.stop = udp_seq_stop; |
| 1607 | |
Denis V. Lunev | 3ba9441 | 2008-03-28 18:25:32 -0700 | [diff] [blame] | 1608 | p = proc_net_fops_create(net, afinfo->name, S_IRUGO, &afinfo->seq_fops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1609 | if (p) |
| 1610 | p->data = afinfo; |
| 1611 | else |
| 1612 | rc = -ENOMEM; |
| 1613 | return rc; |
| 1614 | } |
| 1615 | |
Daniel Lezcano | 0c96d8c | 2008-03-21 04:14:17 -0700 | [diff] [blame] | 1616 | void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1617 | { |
Daniel Lezcano | 0c96d8c | 2008-03-21 04:14:17 -0700 | [diff] [blame] | 1618 | proc_net_remove(net, afinfo->name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1619 | } |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1620 | |
| 1621 | /* ------------------------------------------------------------------------ */ |
| 1622 | static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket) |
| 1623 | { |
| 1624 | struct inet_sock *inet = inet_sk(sp); |
| 1625 | __be32 dest = inet->daddr; |
| 1626 | __be32 src = inet->rcv_saddr; |
| 1627 | __u16 destp = ntohs(inet->dport); |
| 1628 | __u16 srcp = ntohs(inet->sport); |
| 1629 | |
| 1630 | sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" |
| 1631 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", |
| 1632 | bucket, src, srcp, dest, destp, sp->sk_state, |
| 1633 | atomic_read(&sp->sk_wmem_alloc), |
| 1634 | atomic_read(&sp->sk_rmem_alloc), |
| 1635 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), |
| 1636 | atomic_read(&sp->sk_refcnt), sp); |
| 1637 | } |
| 1638 | |
| 1639 | int udp4_seq_show(struct seq_file *seq, void *v) |
| 1640 | { |
| 1641 | if (v == SEQ_START_TOKEN) |
| 1642 | seq_printf(seq, "%-127s\n", |
| 1643 | " sl local_address rem_address st tx_queue " |
| 1644 | "rx_queue tr tm->when retrnsmt uid timeout " |
| 1645 | "inode"); |
| 1646 | else { |
| 1647 | char tmpbuf[129]; |
| 1648 | struct udp_iter_state *state = seq->private; |
| 1649 | |
| 1650 | udp4_format_sock(v, tmpbuf, state->bucket); |
| 1651 | seq_printf(seq, "%-127s\n", tmpbuf); |
| 1652 | } |
| 1653 | return 0; |
| 1654 | } |
| 1655 | |
| 1656 | /* ------------------------------------------------------------------------ */ |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1657 | static struct udp_seq_afinfo udp4_seq_afinfo = { |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1658 | .name = "udp", |
| 1659 | .family = AF_INET, |
| 1660 | .hashtable = udp_hash, |
Denis V. Lunev | 4ad96d3 | 2008-03-28 18:25:53 -0700 | [diff] [blame] | 1661 | .seq_fops = { |
| 1662 | .owner = THIS_MODULE, |
| 1663 | }, |
Denis V. Lunev | dda6192 | 2008-03-28 18:24:26 -0700 | [diff] [blame] | 1664 | .seq_ops = { |
| 1665 | .show = udp4_seq_show, |
| 1666 | }, |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1667 | }; |
| 1668 | |
Pavel Emelyanov | 15439fe | 2008-03-24 14:53:49 -0700 | [diff] [blame] | 1669 | static int udp4_proc_init_net(struct net *net) |
| 1670 | { |
| 1671 | return udp_proc_register(net, &udp4_seq_afinfo); |
| 1672 | } |
| 1673 | |
| 1674 | static void udp4_proc_exit_net(struct net *net) |
| 1675 | { |
| 1676 | udp_proc_unregister(net, &udp4_seq_afinfo); |
| 1677 | } |
| 1678 | |
| 1679 | static struct pernet_operations udp4_net_ops = { |
| 1680 | .init = udp4_proc_init_net, |
| 1681 | .exit = udp4_proc_exit_net, |
| 1682 | }; |
| 1683 | |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1684 | int __init udp4_proc_init(void) |
| 1685 | { |
Pavel Emelyanov | 15439fe | 2008-03-24 14:53:49 -0700 | [diff] [blame] | 1686 | return register_pernet_subsys(&udp4_net_ops); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1687 | } |
| 1688 | |
| 1689 | void udp4_proc_exit(void) |
| 1690 | { |
Pavel Emelyanov | 15439fe | 2008-03-24 14:53:49 -0700 | [diff] [blame] | 1691 | unregister_pernet_subsys(&udp4_net_ops); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1692 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1693 | #endif /* CONFIG_PROC_FS */ |
| 1694 | |
Hideo Aoki | 95766ff | 2007-12-31 00:29:24 -0800 | [diff] [blame] | 1695 | void __init udp_init(void) |
| 1696 | { |
| 1697 | unsigned long limit; |
| 1698 | |
| 1699 | /* Set the pressure threshold up by the same strategy of TCP. It is a |
| 1700 | * fraction of global memory that is up to 1/2 at 256 MB, decreasing |
| 1701 | * toward zero with the amount of memory, with a floor of 128 pages. |
| 1702 | */ |
| 1703 | limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); |
| 1704 | limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); |
| 1705 | limit = max(limit, 128UL); |
| 1706 | sysctl_udp_mem[0] = limit / 4 * 3; |
| 1707 | sysctl_udp_mem[1] = limit; |
| 1708 | sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; |
| 1709 | |
| 1710 | sysctl_udp_rmem_min = SK_MEM_QUANTUM; |
| 1711 | sysctl_udp_wmem_min = SK_MEM_QUANTUM; |
| 1712 | } |
| 1713 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1714 | EXPORT_SYMBOL(udp_disconnect); |
| 1715 | EXPORT_SYMBOL(udp_hash); |
| 1716 | EXPORT_SYMBOL(udp_hash_lock); |
| 1717 | EXPORT_SYMBOL(udp_ioctl); |
David S. Miller | db8dac2 | 2008-03-06 16:22:02 -0800 | [diff] [blame] | 1718 | EXPORT_SYMBOL(udp_prot); |
| 1719 | EXPORT_SYMBOL(udp_sendmsg); |
Gerrit Renker | 4c0a6cb | 2006-11-27 09:29:59 -0800 | [diff] [blame] | 1720 | EXPORT_SYMBOL(udp_lib_getsockopt); |
| 1721 | EXPORT_SYMBOL(udp_lib_setsockopt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1722 | EXPORT_SYMBOL(udp_poll); |
Pavel Emelyanov | 6ba5a3c | 2008-03-22 16:51:21 -0700 | [diff] [blame] | 1723 | EXPORT_SYMBOL(udp_lib_get_port); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1724 | |
| 1725 | #ifdef CONFIG_PROC_FS |
| 1726 | EXPORT_SYMBOL(udp_proc_register); |
| 1727 | EXPORT_SYMBOL(udp_proc_unregister); |
| 1728 | #endif |