blob: 8c6563361ab538cd49cae7594d9d67bdb9b0c7e9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 *
18 * See ip_input.c for original log
19 *
20 * Fixes:
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090023 * Bradford Johnson: Fix faulty handling of some frames when
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * no route is found.
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090034 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
41 * datagrams.
42 * Hirokazu Takahashi: sendfile() on UDP works now.
43 */
44
45#include <asm/uaccess.h>
46#include <asm/system.h>
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040053#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090054#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56#include <linux/socket.h>
57#include <linux/sockios.h>
58#include <linux/in.h>
59#include <linux/inet.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/proc_fs.h>
63#include <linux/stat.h>
64#include <linux/init.h>
65
66#include <net/snmp.h>
67#include <net/ip.h>
68#include <net/protocol.h>
69#include <net/route.h>
Patrick McHardycfacb052006-01-08 22:36:54 -080070#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/skbuff.h>
72#include <net/sock.h>
73#include <net/arp.h>
74#include <net/icmp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <net/checksum.h>
76#include <net/inetpeer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#include <linux/igmp.h>
78#include <linux/netfilter_ipv4.h>
79#include <linux/netfilter_bridge.h>
80#include <linux/mroute.h>
81#include <linux/netlink.h>
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -070082#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Brian Haleyab32ea52006-09-22 14:15:41 -070084int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
David S. Miller323e1262010-12-12 21:55:08 -080085EXPORT_SYMBOL(sysctl_ip_default_ttl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87/* Generate a checksum for an outgoing IP datagram. */
88__inline__ void ip_send_check(struct iphdr *iph)
89{
90 iph->check = 0;
91 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
92}
Eric Dumazet4bc2f182010-07-09 21:22:10 +000093EXPORT_SYMBOL(ip_send_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Herbert Xuc439cb22008-01-11 19:14:00 -080095int __ip_local_out(struct sk_buff *skb)
96{
97 struct iphdr *iph = ip_hdr(skb);
98
99 iph->tot_len = htons(skb->len);
100 ip_send_check(iph);
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100101 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
102 skb_dst(skb)->dev, dst_output);
Herbert Xuc439cb22008-01-11 19:14:00 -0800103}
104
105int ip_local_out(struct sk_buff *skb)
106{
107 int err;
108
109 err = __ip_local_out(skb);
110 if (likely(err == 1))
111 err = dst_output(skb);
112
113 return err;
114}
115EXPORT_SYMBOL_GPL(ip_local_out);
116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117/* dev_loopback_xmit for use with netfilter. */
118static int ip_dev_loopback_xmit(struct sk_buff *newskb)
119{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700120 skb_reset_mac_header(newskb);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -0300121 __skb_pull(newskb, skb_network_offset(newskb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 newskb->pkt_type = PACKET_LOOPBACK;
123 newskb->ip_summed = CHECKSUM_UNNECESSARY;
Eric Dumazetadf30902009-06-02 05:19:30 +0000124 WARN_ON(!skb_dst(newskb));
Julian Anastasovd52fbfc2011-08-07 10:17:22 +0000125 skb_dst_force(newskb);
Eric Dumazete30b38c2010-04-15 09:13:03 +0000126 netif_rx_ni(newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 return 0;
128}
129
130static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
131{
132 int ttl = inet->uc_ttl;
133
134 if (ttl < 0)
David S. Miller323e1262010-12-12 21:55:08 -0800135 ttl = ip4_dst_hoplimit(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 return ttl;
137}
138
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900139/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 * Add an ip header to a skbuff and send it out.
141 *
142 */
143int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000144 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
146 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet511c3f92009-06-02 05:14:27 +0000147 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 struct iphdr *iph;
149
150 /* Build the IP header. */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000151 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300152 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700153 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 iph->version = 4;
155 iph->ihl = 5;
156 iph->tos = inet->tos;
Changli Gaod8d1f302010-06-10 23:31:35 -0700157 if (ip_dont_fragment(sk, &rt->dst))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 iph->frag_off = htons(IP_DF);
159 else
160 iph->frag_off = 0;
Changli Gaod8d1f302010-06-10 23:31:35 -0700161 iph->ttl = ip_select_ttl(inet, &rt->dst);
David S. Millerdd927a22011-05-04 12:03:30 -0700162 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
163 iph->saddr = saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 iph->protocol = sk->sk_protocol;
Changli Gaod8d1f302010-06-10 23:31:35 -0700165 ip_select_ident(iph, &rt->dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000167 if (opt && opt->opt.optlen) {
168 iph->ihl += opt->opt.optlen>>2;
169 ip_options_build(skb, &opt->opt, daddr, rt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
172 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800173 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
175 /* Send it out. */
Herbert Xuc439cb22008-01-11 19:14:00 -0800176 return ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -0700178EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180static inline int ip_finish_output2(struct sk_buff *skb)
181{
Eric Dumazetadf30902009-06-02 05:19:30 +0000182 struct dst_entry *dst = skb_dst(skb);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700183 struct rtable *rt = (struct rtable *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 struct net_device *dev = dst->dev;
Chuck Leverc2636b42007-10-23 21:07:32 -0700185 unsigned int hh_len = LL_RESERVED_SPACE(dev);
David S. Millerf6b72b62011-07-14 07:53:20 -0700186 struct neighbour *neigh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
Neil Hormanedf391f2009-04-27 02:45:02 -0700188 if (rt->rt_type == RTN_MULTICAST) {
189 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
190 } else if (rt->rt_type == RTN_BROADCAST)
191 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 /* Be paranoid, rather than too clever. */
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700194 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 struct sk_buff *skb2;
196
197 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
198 if (skb2 == NULL) {
199 kfree_skb(skb);
200 return -ENOMEM;
201 }
202 if (skb->sk)
203 skb_set_owner_w(skb2, skb->sk);
204 kfree_skb(skb);
205 skb = skb2;
206 }
207
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000208 rcu_read_lock();
David S. Miller69cce1d2011-07-17 23:09:49 -0700209 neigh = dst_get_neighbour(dst);
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000210 if (neigh) {
211 int res = neigh_output(neigh, skb);
212
213 rcu_read_unlock();
214 return res;
215 }
216 rcu_read_unlock();
David S. Miller05e3aa02011-07-16 17:26:00 -0700217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 if (net_ratelimit())
219 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
220 kfree_skb(skb);
221 return -EINVAL;
222}
223
John Heffner628a5c52007-04-20 15:53:27 -0700224static inline int ip_skb_dst_mtu(struct sk_buff *skb)
225{
226 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
227
228 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
Eric Dumazetadf30902009-06-02 05:19:30 +0000229 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
John Heffner628a5c52007-04-20 15:53:27 -0700230}
231
Patrick McHardy861d0482007-10-15 01:48:39 -0700232static int ip_finish_output(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233{
Patrick McHardy5c901da2006-01-06 23:05:36 -0800234#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
235 /* Policy lookup after SNAT yielded a new policy */
Eric Dumazetadf30902009-06-02 05:19:30 +0000236 if (skb_dst(skb)->xfrm != NULL) {
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800237 IPCB(skb)->flags |= IPSKB_REROUTED;
238 return dst_output(skb);
239 }
Patrick McHardy5c901da2006-01-06 23:05:36 -0800240#endif
John Heffner628a5c52007-04-20 15:53:27 -0700241 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800242 return ip_fragment(skb, ip_finish_output2);
243 else
244 return ip_finish_output2(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245}
246
247int ip_mc_output(struct sk_buff *skb)
248{
249 struct sock *sk = skb->sk;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000250 struct rtable *rt = skb_rtable(skb);
Changli Gaod8d1f302010-06-10 23:31:35 -0700251 struct net_device *dev = rt->dst.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253 /*
254 * If the indicated interface is up and running, send the packet.
255 */
Neil Hormanedf391f2009-04-27 02:45:02 -0700256 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
258 skb->dev = dev;
259 skb->protocol = htons(ETH_P_IP);
260
261 /*
262 * Multicasts are looped back for other local users
263 */
264
265 if (rt->rt_flags&RTCF_MULTICAST) {
Octavian Purdila7ad68482010-01-06 20:37:01 -0800266 if (sk_mc_loop(sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267#ifdef CONFIG_IP_MROUTE
268 /* Small optimization: do not loopback not local frames,
269 which returned after forwarding; they will be dropped
270 by ip_mr_input in any case.
271 Note, that local frames are looped back to be delivered
272 to local recipients.
273
274 This check is duplicated in ip_mr_input at the moment.
275 */
Joe Perches9d4fb272009-11-23 10:41:23 -0800276 &&
277 ((rt->rt_flags & RTCF_LOCAL) ||
278 !(IPCB(skb)->flags & IPSKB_FORWARDED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279#endif
Joe Perches9d4fb272009-11-23 10:41:23 -0800280 ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
282 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100283 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
284 newskb, NULL, newskb->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 ip_dev_loopback_xmit);
286 }
287
288 /* Multicasts with ttl 0 must not go beyond the host */
289
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700290 if (ip_hdr(skb)->ttl == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 kfree_skb(skb);
292 return 0;
293 }
294 }
295
296 if (rt->rt_flags&RTCF_BROADCAST) {
297 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
298 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100299 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
300 NULL, newskb->dev, ip_dev_loopback_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 }
302
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100303 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
304 skb->dev, ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800305 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
308int ip_output(struct sk_buff *skb)
309{
Eric Dumazetadf30902009-06-02 05:19:30 +0000310 struct net_device *dev = skb_dst(skb)->dev;
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800311
Neil Hormanedf391f2009-04-27 02:45:02 -0700312 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800314 skb->dev = dev;
315 skb->protocol = htons(ETH_P_IP);
316
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100317 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900318 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800319 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
321
David S. Millerd9d8da82011-05-06 22:23:20 -0700322int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323{
David S. Millere89862f2007-01-26 01:04:55 -0800324 struct sock *sk = skb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 struct inet_sock *inet = inet_sk(sk);
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000326 struct ip_options_rcu *inet_opt;
David S. Millerb57ae012011-05-06 16:24:06 -0700327 struct flowi4 *fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 struct rtable *rt;
329 struct iphdr *iph;
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000330 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332 /* Skip all of this if the packet is already routed,
333 * f.e. by something like SCTP.
334 */
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000335 rcu_read_lock();
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000336 inet_opt = rcu_dereference(inet->inet_opt);
David S. Millerea4fc0d2011-05-06 22:30:20 -0700337 fl4 = &fl->u.ip4;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000338 rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 if (rt != NULL)
340 goto packet_routed;
341
342 /* Make sure we can route this packet. */
343 rt = (struct rtable *)__sk_dst_check(sk, 0);
344 if (rt == NULL) {
Al Viro3ca3c682006-09-27 18:28:07 -0700345 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 /* Use correct destination address if we have options. */
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000348 daddr = inet->inet_daddr;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000349 if (inet_opt && inet_opt->opt.srr)
350 daddr = inet_opt->opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
David S. Miller78fbfd82011-03-12 00:00:52 -0500352 /* If this fails, retransmit mechanism of transport layer will
353 * keep trying until route appears or the connection times
354 * itself out.
355 */
David S. Millerb57ae012011-05-06 16:24:06 -0700356 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
David S. Miller78fbfd82011-03-12 00:00:52 -0500357 daddr, inet->inet_saddr,
358 inet->inet_dport,
359 inet->inet_sport,
360 sk->sk_protocol,
361 RT_CONN_FLAGS(sk),
362 sk->sk_bound_dev_if);
363 if (IS_ERR(rt))
364 goto no_route;
Changli Gaod8d1f302010-06-10 23:31:35 -0700365 sk_setup_caps(sk, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 }
Changli Gaod8d1f302010-06-10 23:31:35 -0700367 skb_dst_set_noref(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
369packet_routed:
David S. Millerea4fc0d2011-05-06 22:30:20 -0700370 if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 goto no_route;
372
373 /* OK, we know where to send it, allocate and build IP header. */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000374 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300375 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700376 iph = ip_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800377 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
Changli Gaod8d1f302010-06-10 23:31:35 -0700378 if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 iph->frag_off = htons(IP_DF);
380 else
381 iph->frag_off = 0;
Changli Gaod8d1f302010-06-10 23:31:35 -0700382 iph->ttl = ip_select_ttl(inet, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 iph->protocol = sk->sk_protocol;
David S. Millerea4fc0d2011-05-06 22:30:20 -0700384 iph->saddr = fl4->saddr;
385 iph->daddr = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 /* Transport layer set skb->h.foo itself. */
387
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000388 if (inet_opt && inet_opt->opt.optlen) {
389 iph->ihl += inet_opt->opt.optlen >> 2;
390 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 }
392
Changli Gaod8d1f302010-06-10 23:31:35 -0700393 ip_select_ident_more(iph, &rt->dst, sk,
Herbert Xu79671682006-06-22 02:40:14 -0700394 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800397 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000399 res = ip_local_out(skb);
400 rcu_read_unlock();
401 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
403no_route:
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000404 rcu_read_unlock();
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700405 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 kfree_skb(skb);
407 return -EHOSTUNREACH;
408}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000409EXPORT_SYMBOL(ip_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411
412static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
413{
414 to->pkt_type = from->pkt_type;
415 to->priority = from->priority;
416 to->protocol = from->protocol;
Eric Dumazetadf30902009-06-02 05:19:30 +0000417 skb_dst_drop(to);
Eric Dumazetfe76cda2010-07-01 23:48:22 +0000418 skb_dst_copy(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 to->dev = from->dev;
Thomas Graf82e91ff2006-11-09 15:19:14 -0800420 to->mark = from->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
422 /* Copy the flags to each fragment. */
423 IPCB(to)->flags = IPCB(from)->flags;
424
425#ifdef CONFIG_NET_SCHED
426 to->tc_index = from->tc_index;
427#endif
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -0700428 nf_copy(to, from);
Jozsef Kadlecsikba9dda32007-07-07 22:21:23 -0700429#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
430 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
431 to->nf_trace = from->nf_trace;
432#endif
Julian Anastasovc98d80e2005-10-22 13:39:21 +0300433#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
434 to->ipvs_property = from->ipvs_property;
435#endif
James Morris984bc162006-06-09 00:29:17 -0700436 skb_copy_secmark(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437}
438
439/*
440 * This IP datagram is too large to be sent in one piece. Break it up into
441 * smaller pieces (each of size equal to IP header plus
442 * a block of the data of the original IP data part) that will yet fit in a
443 * single device frame, and queue such a frame for sending.
444 */
445
Jianjun Kongd93191002008-11-03 00:23:42 -0800446int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
448 struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 int ptr;
450 struct net_device *dev;
451 struct sk_buff *skb2;
Changli Gaoc893b802010-07-31 13:25:08 +0000452 unsigned int mtu, hlen, left, len, ll_rs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 int offset;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -0800454 __be16 not_last_frag;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000455 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 int err = 0;
457
Changli Gaod8d1f302010-06-10 23:31:35 -0700458 dev = rt->dst.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
460 /*
461 * Point into the IP datagram header.
462 */
463
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700464 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
466 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700467 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
John Heffner628a5c52007-04-20 15:53:27 -0700469 htonl(ip_skb_dst_mtu(skb)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 kfree_skb(skb);
471 return -EMSGSIZE;
472 }
473
474 /*
475 * Setup starting values.
476 */
477
478 hlen = iph->ihl * 4;
Changli Gaod8d1f302010-06-10 23:31:35 -0700479 mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
Bart De Schuymer6c79bf02010-04-20 16:22:01 +0200480#ifdef CONFIG_BRIDGE_NETFILTER
481 if (skb->nf_bridge)
482 mtu -= nf_bridge_mtu_reduction(skb);
483#endif
Herbert Xu89cee8b2005-12-13 23:14:27 -0800484 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
486 /* When frag_list is given, use it. First, check its validity:
487 * some transformers could create wrong frag_list or break existing
488 * one, it is not prohibited. In this case fall back to copying.
489 *
490 * LATER: this step can be merged to real generation of fragments,
491 * we can switch to copy when see the first bad fragment.
492 */
David S. Miller21dc3302010-08-23 00:13:46 -0700493 if (skb_has_frag_list(skb)) {
Eric Dumazet3d130082010-09-21 08:47:45 +0000494 struct sk_buff *frag, *frag2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 int first_len = skb_pagelen(skb);
496
497 if (first_len - hlen > mtu ||
498 ((first_len - hlen) & 7) ||
Paul Gortmaker56f8a752011-06-21 20:33:34 -0700499 ip_is_fragment(iph) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 skb_cloned(skb))
501 goto slow_path;
502
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700503 skb_walk_frags(skb, frag) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 /* Correct geometry. */
505 if (frag->len > mtu ||
506 ((frag->len & 7) && frag->next) ||
507 skb_headroom(frag) < hlen)
Eric Dumazet3d130082010-09-21 08:47:45 +0000508 goto slow_path_clean;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510 /* Partially cloned skb? */
511 if (skb_shared(frag))
Eric Dumazet3d130082010-09-21 08:47:45 +0000512 goto slow_path_clean;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700513
514 BUG_ON(frag->sk);
515 if (skb->sk) {
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700516 frag->sk = skb->sk;
517 frag->destructor = sock_wfree;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700518 }
Eric Dumazet3d130082010-09-21 08:47:45 +0000519 skb->truesize -= frag->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 }
521
522 /* Everything is OK. Generate! */
523
524 err = 0;
525 offset = 0;
526 frag = skb_shinfo(skb)->frag_list;
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700527 skb_frag_list_init(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 skb->data_len = first_len - skb_headlen(skb);
529 skb->len = first_len;
530 iph->tot_len = htons(first_len);
531 iph->frag_off = htons(IP_MF);
532 ip_send_check(iph);
533
534 for (;;) {
535 /* Prepare header of the next frame,
536 * before previous one went down. */
537 if (frag) {
538 frag->ip_summed = CHECKSUM_NONE;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300539 skb_reset_transport_header(frag);
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700540 __skb_push(frag, hlen);
541 skb_reset_network_header(frag);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700542 memcpy(skb_network_header(frag), iph, hlen);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700543 iph = ip_hdr(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 iph->tot_len = htons(frag->len);
545 ip_copy_metadata(frag, skb);
546 if (offset == 0)
547 ip_options_fragment(frag);
548 offset += skb->len - hlen;
549 iph->frag_off = htons(offset>>3);
550 if (frag->next != NULL)
551 iph->frag_off |= htons(IP_MF);
552 /* Ready, complete checksum */
553 ip_send_check(iph);
554 }
555
556 err = output(skb);
557
Wei Dongdafee492006-08-02 13:41:21 -0700558 if (!err)
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700559 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 if (err || !frag)
561 break;
562
563 skb = frag;
564 frag = skb->next;
565 skb->next = NULL;
566 }
567
568 if (err == 0) {
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700569 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 return 0;
571 }
572
573 while (frag) {
574 skb = frag->next;
575 kfree_skb(frag);
576 frag = skb;
577 }
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700578 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 return err;
Eric Dumazet3d130082010-09-21 08:47:45 +0000580
581slow_path_clean:
582 skb_walk_frags(skb, frag2) {
583 if (frag2 == frag)
584 break;
585 frag2->sk = NULL;
586 frag2->destructor = NULL;
587 skb->truesize += frag2->truesize;
588 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 }
590
591slow_path:
592 left = skb->len - hlen; /* Space per frame */
George Kadianakis49085bd2010-07-06 11:44:12 +0000593 ptr = hlen; /* Where to start from */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700596 * we need to make room for the encapsulating header
597 */
Changli Gaoc893b802010-07-31 13:25:08 +0000598 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 /*
601 * Fragment the datagram.
602 */
603
604 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
605 not_last_frag = iph->frag_off & htons(IP_MF);
606
607 /*
608 * Keep copying data until we run out.
609 */
610
Stephen Hemminger132adf52007-03-08 20:44:43 -0800611 while (left > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 len = left;
613 /* IF: it doesn't fit, use 'mtu' - the data space left */
614 if (len > mtu)
615 len = mtu;
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300616 /* IF: we are not sending up to and including the packet end
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 then align the next start on an eight byte boundary */
618 if (len < left) {
619 len &= ~7;
620 }
621 /*
622 * Allocate buffer.
623 */
624
625 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
Patrick McHardy64ce2072005-08-09 20:50:53 -0700626 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 err = -ENOMEM;
628 goto fail;
629 }
630
631 /*
632 * Set up data on packet
633 */
634
635 ip_copy_metadata(skb2, skb);
636 skb_reserve(skb2, ll_rs);
637 skb_put(skb2, len + hlen);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700638 skb_reset_network_header(skb2);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700639 skb2->transport_header = skb2->network_header + hlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
641 /*
642 * Charge the memory for the fragment to any owner
643 * it might possess
644 */
645
646 if (skb->sk)
647 skb_set_owner_w(skb2, skb->sk);
648
649 /*
650 * Copy the packet header into the new buffer.
651 */
652
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300653 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
655 /*
656 * Copy a block of the IP datagram.
657 */
Arnaldo Carvalho de Melobff9b612007-03-16 17:19:57 -0300658 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 BUG();
660 left -= len;
661
662 /*
663 * Fill in the new header fields.
664 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700665 iph = ip_hdr(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 iph->frag_off = htons((offset >> 3));
667
668 /* ANK: dirty, but effective trick. Upgrade options only if
669 * the segment to be fragmented was THE FIRST (otherwise,
670 * options are already fixed) and make it ONCE
671 * on the initial skb, so that all the following fragments
672 * will inherit fixed options.
673 */
674 if (offset == 0)
675 ip_options_fragment(skb);
676
677 /*
678 * Added AC : If we are fragmenting a fragment that's not the
679 * last fragment then keep MF on each bit
680 */
681 if (left > 0 || not_last_frag)
682 iph->frag_off |= htons(IP_MF);
683 ptr += len;
684 offset += len;
685
686 /*
687 * Put this fragment into the sending queue.
688 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 iph->tot_len = htons(len + hlen);
690
691 ip_send_check(iph);
692
693 err = output(skb2);
694 if (err)
695 goto fail;
Wei Dongdafee492006-08-02 13:41:21 -0700696
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700697 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 }
699 kfree_skb(skb);
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700700 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 return err;
702
703fail:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900704 kfree_skb(skb);
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700705 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return err;
707}
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700708EXPORT_SYMBOL(ip_fragment);
709
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710int
711ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
712{
713 struct iovec *iov = from;
714
Patrick McHardy84fa7932006-08-29 16:44:56 -0700715 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
717 return -EFAULT;
718 } else {
Al Viro44bb9362006-11-14 21:36:14 -0800719 __wsum csum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
721 return -EFAULT;
722 skb->csum = csum_block_add(skb->csum, csum, odd);
723 }
724 return 0;
725}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000726EXPORT_SYMBOL(ip_generic_getfrag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
Al Viro44bb9362006-11-14 21:36:14 -0800728static inline __wsum
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729csum_page(struct page *page, int offset, int copy)
730{
731 char *kaddr;
Al Viro44bb9362006-11-14 21:36:14 -0800732 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 kaddr = kmap(page);
734 csum = csum_partial(kaddr + offset, copy, 0);
735 kunmap(page);
736 return csum;
737}
738
Adrian Bunk4b30b1c2005-11-29 16:27:20 -0800739static inline int ip_ufo_append_data(struct sock *sk,
Herbert Xu1470ddf2011-03-01 02:36:47 +0000740 struct sk_buff_head *queue,
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700741 int getfrag(void *from, char *to, int offset, int len,
742 int odd, struct sk_buff *skb),
743 void *from, int length, int hh_len, int fragheaderlen,
Bill Sommerfeldd9be4f72011-07-19 15:22:33 +0000744 int transhdrlen, int maxfraglen, unsigned int flags)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700745{
746 struct sk_buff *skb;
747 int err;
748
749 /* There is support for UDP fragmentation offload by network
750 * device, so create one single skb packet containing complete
751 * udp datagram
752 */
Herbert Xu1470ddf2011-03-01 02:36:47 +0000753 if ((skb = skb_peek_tail(queue)) == NULL) {
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700754 skb = sock_alloc_send_skb(sk,
755 hh_len + fragheaderlen + transhdrlen + 20,
756 (flags & MSG_DONTWAIT), &err);
757
758 if (skb == NULL)
759 return err;
760
761 /* reserve space for Hardware header */
762 skb_reserve(skb, hh_len);
763
764 /* create space for UDP/IP header */
Jianjun Kongd93191002008-11-03 00:23:42 -0800765 skb_put(skb, fragheaderlen + transhdrlen);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700766
767 /* initialize network header pointer */
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700768 skb_reset_network_header(skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700769
770 /* initialize protocol header pointer */
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700771 skb->transport_header = skb->network_header + fragheaderlen;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700772
Patrick McHardy84fa7932006-08-29 16:44:56 -0700773 skb->ip_summed = CHECKSUM_PARTIAL;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700774 skb->csum = 0;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700775
Kostya Bbe9164e2008-04-29 22:36:30 -0700776 /* specify the length of each IP datagram fragment */
Bill Sommerfeldd9be4f72011-07-19 15:22:33 +0000777 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700778 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Herbert Xu1470ddf2011-03-01 02:36:47 +0000779 __skb_queue_tail(queue, skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700780 }
Kostya Bbe9164e2008-04-29 22:36:30 -0700781
782 return skb_append_datato_frags(sk, skb, getfrag, from,
783 (length - transhdrlen));
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700784}
785
David S. Millerf5fca602011-05-08 17:24:10 -0700786static int __ip_append_data(struct sock *sk,
787 struct flowi4 *fl4,
788 struct sk_buff_head *queue,
Herbert Xu1470ddf2011-03-01 02:36:47 +0000789 struct inet_cork *cork,
790 int getfrag(void *from, char *to, int offset,
791 int len, int odd, struct sk_buff *skb),
792 void *from, int length, int transhdrlen,
793 unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794{
795 struct inet_sock *inet = inet_sk(sk);
796 struct sk_buff *skb;
797
Herbert Xu07df5292011-03-01 23:00:58 -0800798 struct ip_options *opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 int hh_len;
800 int exthdrlen;
801 int mtu;
802 int copy;
803 int err;
804 int offset = 0;
805 unsigned int maxfraglen, fragheaderlen;
806 int csummode = CHECKSUM_NONE;
Herbert Xu1470ddf2011-03-01 02:36:47 +0000807 struct rtable *rt = (struct rtable *)cork->dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Steffen Klassert96d73032011-06-05 20:48:47 +0000809 skb = skb_peek_tail(queue);
810
811 exthdrlen = !skb ? rt->dst.header_len : 0;
Herbert Xu07df5292011-03-01 23:00:58 -0800812 mtu = cork->fragsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Changli Gaod8d1f302010-06-10 23:31:35 -0700814 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
816 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
817 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
818
Herbert Xu1470ddf2011-03-01 02:36:47 +0000819 if (cork->length + length > 0xFFFF - fragheaderlen) {
David S. Millerf5fca602011-05-08 17:24:10 -0700820 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000821 mtu-exthdrlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 return -EMSGSIZE;
823 }
824
825 /*
826 * transhdrlen > 0 means that this is the first fragment and we wish
827 * it won't be fragmented in the future.
828 */
829 if (transhdrlen &&
830 length + fragheaderlen <= mtu &&
Changli Gaod8d1f302010-06-10 23:31:35 -0700831 rt->dst.dev->features & NETIF_F_V4_CSUM &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 !exthdrlen)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700833 csummode = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
Herbert Xu1470ddf2011-03-01 02:36:47 +0000835 cork->length += length;
Herbert Xu26cde9f2010-06-15 01:52:25 +0000836 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
Kostya Bbe9164e2008-04-29 22:36:30 -0700837 (sk->sk_protocol == IPPROTO_UDP) &&
Steffen Klassertc1460662011-06-29 23:19:32 +0000838 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
Herbert Xu1470ddf2011-03-01 02:36:47 +0000839 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
840 hh_len, fragheaderlen, transhdrlen,
Bill Sommerfeldd9be4f72011-07-19 15:22:33 +0000841 maxfraglen, flags);
Patrick McHardybaa829d2006-03-12 20:35:12 -0800842 if (err)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700843 goto error;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700844 return 0;
845 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
847 /* So, what's going on in the loop below?
848 *
849 * We use calculated fragment length to generate chained skb,
850 * each of segments is IP fragment ready for sending to network after
851 * adding appropriate IP header.
852 */
853
Herbert Xu26cde9f2010-06-15 01:52:25 +0000854 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 goto alloc_new_skb;
856
857 while (length > 0) {
858 /* Check if the remaining data fits into current packet. */
859 copy = mtu - skb->len;
860 if (copy < length)
861 copy = maxfraglen - skb->len;
862 if (copy <= 0) {
863 char *data;
864 unsigned int datalen;
865 unsigned int fraglen;
866 unsigned int fraggap;
867 unsigned int alloclen;
868 struct sk_buff *skb_prev;
869alloc_new_skb:
870 skb_prev = skb;
871 if (skb_prev)
872 fraggap = skb_prev->len - maxfraglen;
873 else
874 fraggap = 0;
875
876 /*
877 * If remaining data exceeds the mtu,
878 * we know we need more fragment(s).
879 */
880 datalen = length + fraggap;
881 if (datalen > mtu - fragheaderlen)
882 datalen = maxfraglen - fragheaderlen;
883 fraglen = datalen + fragheaderlen;
884
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900885 if ((flags & MSG_MORE) &&
Changli Gaod8d1f302010-06-10 23:31:35 -0700886 !(rt->dst.dev->features&NETIF_F_SG))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 alloclen = mtu;
888 else
Eric Dumazet59104f02010-09-20 20:16:27 +0000889 alloclen = fraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Steffen Klassert353e5c92011-06-22 01:05:37 +0000891 alloclen += exthdrlen;
892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 /* The last fragment gets additional space at tail.
894 * Note, with MSG_MORE we overallocate on fragments,
895 * because we have no idea what fragment will be
896 * the last.
897 */
Steffen Klassert33f99dc2011-06-22 01:04:37 +0000898 if (datalen == length + fraggap)
Changli Gaod8d1f302010-06-10 23:31:35 -0700899 alloclen += rt->dst.trailer_len;
Steffen Klassert33f99dc2011-06-22 01:04:37 +0000900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 if (transhdrlen) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900902 skb = sock_alloc_send_skb(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 alloclen + hh_len + 15,
904 (flags & MSG_DONTWAIT), &err);
905 } else {
906 skb = NULL;
907 if (atomic_read(&sk->sk_wmem_alloc) <=
908 2 * sk->sk_sndbuf)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900909 skb = sock_wmalloc(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 alloclen + hh_len + 15, 1,
911 sk->sk_allocation);
912 if (unlikely(skb == NULL))
913 err = -ENOBUFS;
Patrick Ohly51f31ca2009-02-12 05:03:39 +0000914 else
915 /* only the initial fragment is
916 time stamped */
Herbert Xu1470ddf2011-03-01 02:36:47 +0000917 cork->tx_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 }
919 if (skb == NULL)
920 goto error;
921
922 /*
923 * Fill in the control structures
924 */
925 skb->ip_summed = csummode;
926 skb->csum = 0;
927 skb_reserve(skb, hh_len);
Herbert Xu1470ddf2011-03-01 02:36:47 +0000928 skb_shinfo(skb)->tx_flags = cork->tx_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
930 /*
931 * Find where to start putting bytes.
932 */
Steffen Klassert353e5c92011-06-22 01:05:37 +0000933 data = skb_put(skb, fraglen + exthdrlen);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300934 skb_set_network_header(skb, exthdrlen);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700935 skb->transport_header = (skb->network_header +
936 fragheaderlen);
Steffen Klassert353e5c92011-06-22 01:05:37 +0000937 data += fragheaderlen + exthdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 if (fraggap) {
940 skb->csum = skb_copy_and_csum_bits(
941 skb_prev, maxfraglen,
942 data + transhdrlen, fraggap, 0);
943 skb_prev->csum = csum_sub(skb_prev->csum,
944 skb->csum);
945 data += fraggap;
Herbert Xue9fa4f72006-08-13 20:12:58 -0700946 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 }
948
949 copy = datalen - transhdrlen - fraggap;
950 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
951 err = -EFAULT;
952 kfree_skb(skb);
953 goto error;
954 }
955
956 offset += copy;
957 length -= datalen - fraggap;
958 transhdrlen = 0;
959 exthdrlen = 0;
960 csummode = CHECKSUM_NONE;
961
962 /*
963 * Put the packet on the pending queue.
964 */
Herbert Xu1470ddf2011-03-01 02:36:47 +0000965 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 continue;
967 }
968
969 if (copy > length)
970 copy = length;
971
Changli Gaod8d1f302010-06-10 23:31:35 -0700972 if (!(rt->dst.dev->features&NETIF_F_SG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 unsigned int off;
974
975 off = skb->len;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900976 if (getfrag(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 offset, copy, off, skb) < 0) {
978 __skb_trim(skb, off);
979 err = -EFAULT;
980 goto error;
981 }
982 } else {
983 int i = skb_shinfo(skb)->nr_frags;
984 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
Herbert Xu1470ddf2011-03-01 02:36:47 +0000985 struct page *page = cork->page;
986 int off = cork->off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 unsigned int left;
988
989 if (page && (left = PAGE_SIZE - off) > 0) {
990 if (copy >= left)
991 copy = left;
992 if (page != frag->page) {
993 if (i == MAX_SKB_FRAGS) {
994 err = -EMSGSIZE;
995 goto error;
996 }
997 get_page(page);
Herbert Xu1470ddf2011-03-01 02:36:47 +0000998 skb_fill_page_desc(skb, i, page, off, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 frag = &skb_shinfo(skb)->frags[i];
1000 }
1001 } else if (i < MAX_SKB_FRAGS) {
1002 if (copy > PAGE_SIZE)
1003 copy = PAGE_SIZE;
1004 page = alloc_pages(sk->sk_allocation, 0);
1005 if (page == NULL) {
1006 err = -ENOMEM;
1007 goto error;
1008 }
Herbert Xu1470ddf2011-03-01 02:36:47 +00001009 cork->page = page;
1010 cork->off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
1012 skb_fill_page_desc(skb, i, page, 0, 0);
1013 frag = &skb_shinfo(skb)->frags[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 } else {
1015 err = -EMSGSIZE;
1016 goto error;
1017 }
1018 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1019 err = -EFAULT;
1020 goto error;
1021 }
Herbert Xu1470ddf2011-03-01 02:36:47 +00001022 cork->off += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 frag->size += copy;
1024 skb->len += copy;
1025 skb->data_len += copy;
Herbert Xuf945fa72008-01-22 22:39:26 -08001026 skb->truesize += copy;
1027 atomic_add(copy, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 }
1029 offset += copy;
1030 length -= copy;
1031 }
1032
1033 return 0;
1034
1035error:
Herbert Xu1470ddf2011-03-01 02:36:47 +00001036 cork->length -= length;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001037 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001038 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039}
1040
Herbert Xu1470ddf2011-03-01 02:36:47 +00001041static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1042 struct ipcm_cookie *ipc, struct rtable **rtp)
1043{
1044 struct inet_sock *inet = inet_sk(sk);
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001045 struct ip_options_rcu *opt;
Herbert Xu1470ddf2011-03-01 02:36:47 +00001046 struct rtable *rt;
1047
1048 /*
1049 * setup for corking.
1050 */
1051 opt = ipc->opt;
1052 if (opt) {
1053 if (cork->opt == NULL) {
1054 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1055 sk->sk_allocation);
1056 if (unlikely(cork->opt == NULL))
1057 return -ENOBUFS;
1058 }
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001059 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001060 cork->flags |= IPCORK_OPT;
1061 cork->addr = ipc->addr;
1062 }
1063 rt = *rtp;
1064 if (unlikely(!rt))
1065 return -EFAULT;
1066 /*
1067 * We steal reference to this route, caller should not release it
1068 */
1069 *rtp = NULL;
1070 cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
Steffen Klassert353e5c92011-06-22 01:05:37 +00001071 rt->dst.dev->mtu : dst_mtu(&rt->dst);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001072 cork->dst = &rt->dst;
1073 cork->length = 0;
1074 cork->tx_flags = ipc->tx_flags;
1075 cork->page = NULL;
1076 cork->off = 0;
1077
1078 return 0;
1079}
1080
1081/*
1082 * ip_append_data() and ip_append_page() can make one large IP datagram
1083 * from many pieces of data. Each pieces will be holded on the socket
1084 * until ip_push_pending_frames() is called. Each piece can be a page
1085 * or non-page data.
1086 *
1087 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1088 * this interface potentially.
1089 *
1090 * LATER: length must be adjusted by pad at tail, when it is required.
1091 */
David S. Millerf5fca602011-05-08 17:24:10 -07001092int ip_append_data(struct sock *sk, struct flowi4 *fl4,
Herbert Xu1470ddf2011-03-01 02:36:47 +00001093 int getfrag(void *from, char *to, int offset, int len,
1094 int odd, struct sk_buff *skb),
1095 void *from, int length, int transhdrlen,
1096 struct ipcm_cookie *ipc, struct rtable **rtp,
1097 unsigned int flags)
1098{
1099 struct inet_sock *inet = inet_sk(sk);
1100 int err;
1101
1102 if (flags&MSG_PROBE)
1103 return 0;
1104
1105 if (skb_queue_empty(&sk->sk_write_queue)) {
David S. Millerbdc712b2011-05-06 15:02:07 -07001106 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001107 if (err)
1108 return err;
1109 } else {
1110 transhdrlen = 0;
1111 }
1112
David S. Millerf5fca602011-05-08 17:24:10 -07001113 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
Herbert Xu1470ddf2011-03-01 02:36:47 +00001114 from, length, transhdrlen, flags);
1115}
1116
David S. Millerf5fca602011-05-08 17:24:10 -07001117ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 int offset, size_t size, int flags)
1119{
1120 struct inet_sock *inet = inet_sk(sk);
1121 struct sk_buff *skb;
1122 struct rtable *rt;
1123 struct ip_options *opt = NULL;
David S. Millerbdc712b2011-05-06 15:02:07 -07001124 struct inet_cork *cork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 int hh_len;
1126 int mtu;
1127 int len;
1128 int err;
1129 unsigned int maxfraglen, fragheaderlen, fraggap;
1130
1131 if (inet->hdrincl)
1132 return -EPERM;
1133
1134 if (flags&MSG_PROBE)
1135 return 0;
1136
1137 if (skb_queue_empty(&sk->sk_write_queue))
1138 return -EINVAL;
1139
David S. Millerbdc712b2011-05-06 15:02:07 -07001140 cork = &inet->cork.base;
1141 rt = (struct rtable *)cork->dst;
1142 if (cork->flags & IPCORK_OPT)
1143 opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Changli Gaod8d1f302010-06-10 23:31:35 -07001145 if (!(rt->dst.dev->features&NETIF_F_SG))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 return -EOPNOTSUPP;
1147
Changli Gaod8d1f302010-06-10 23:31:35 -07001148 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
David S. Millerbdc712b2011-05-06 15:02:07 -07001149 mtu = cork->fragsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
1151 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1152 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1153
David S. Millerbdc712b2011-05-06 15:02:07 -07001154 if (cork->length + size > 0xFFFF - fragheaderlen) {
David S. Millerf5fca602011-05-08 17:24:10 -07001155 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 return -EMSGSIZE;
1157 }
1158
1159 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1160 return -EINVAL;
1161
David S. Millerbdc712b2011-05-06 15:02:07 -07001162 cork->length += size;
Herbert Xu26cde9f2010-06-15 01:52:25 +00001163 if ((size + skb->len > mtu) &&
1164 (sk->sk_protocol == IPPROTO_UDP) &&
Changli Gaod8d1f302010-06-10 23:31:35 -07001165 (rt->dst.dev->features & NETIF_F_UFO)) {
Herbert Xu79671682006-06-22 02:40:14 -07001166 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -07001167 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Herbert Xu79671682006-06-22 02:40:14 -07001168 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001169
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
1171 while (size > 0) {
1172 int i;
1173
Herbert Xu89114af2006-07-08 13:34:32 -07001174 if (skb_is_gso(skb))
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001175 len = size;
1176 else {
1177
1178 /* Check if the remaining data fits into current packet. */
1179 len = mtu - skb->len;
1180 if (len < size)
1181 len = maxfraglen - skb->len;
1182 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 if (len <= 0) {
1184 struct sk_buff *skb_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 int alloclen;
1186
1187 skb_prev = skb;
Jayachandran C0d0d2bb2005-10-13 11:43:02 -07001188 fraggap = skb_prev->len - maxfraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
1190 alloclen = fragheaderlen + hh_len + fraggap + 15;
1191 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1192 if (unlikely(!skb)) {
1193 err = -ENOBUFS;
1194 goto error;
1195 }
1196
1197 /*
1198 * Fill in the control structures
1199 */
1200 skb->ip_summed = CHECKSUM_NONE;
1201 skb->csum = 0;
1202 skb_reserve(skb, hh_len);
1203
1204 /*
1205 * Find where to start putting bytes.
1206 */
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001207 skb_put(skb, fragheaderlen + fraggap);
Arnaldo Carvalho de Melo2ca9e6f2007-03-10 19:15:25 -03001208 skb_reset_network_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001209 skb->transport_header = (skb->network_header +
1210 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 if (fraggap) {
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001212 skb->csum = skb_copy_and_csum_bits(skb_prev,
1213 maxfraglen,
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001214 skb_transport_header(skb),
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001215 fraggap, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 skb_prev->csum = csum_sub(skb_prev->csum,
1217 skb->csum);
Herbert Xue9fa4f72006-08-13 20:12:58 -07001218 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 }
1220
1221 /*
1222 * Put the packet on the pending queue.
1223 */
1224 __skb_queue_tail(&sk->sk_write_queue, skb);
1225 continue;
1226 }
1227
1228 i = skb_shinfo(skb)->nr_frags;
1229 if (len > size)
1230 len = size;
1231 if (skb_can_coalesce(skb, i, page, offset)) {
1232 skb_shinfo(skb)->frags[i-1].size += len;
1233 } else if (i < MAX_SKB_FRAGS) {
1234 get_page(page);
1235 skb_fill_page_desc(skb, i, page, offset, len);
1236 } else {
1237 err = -EMSGSIZE;
1238 goto error;
1239 }
1240
1241 if (skb->ip_summed == CHECKSUM_NONE) {
Al Viro44bb9362006-11-14 21:36:14 -08001242 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 csum = csum_page(page, offset, len);
1244 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1245 }
1246
1247 skb->len += len;
1248 skb->data_len += len;
David S. Miller1e34a112008-01-22 23:44:31 -08001249 skb->truesize += len;
1250 atomic_add(len, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 offset += len;
1252 size -= len;
1253 }
1254 return 0;
1255
1256error:
David S. Millerbdc712b2011-05-06 15:02:07 -07001257 cork->length -= size;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001258 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 return err;
1260}
1261
Herbert Xu1470ddf2011-03-01 02:36:47 +00001262static void ip_cork_release(struct inet_cork *cork)
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001263{
Herbert Xu1470ddf2011-03-01 02:36:47 +00001264 cork->flags &= ~IPCORK_OPT;
1265 kfree(cork->opt);
1266 cork->opt = NULL;
1267 dst_release(cork->dst);
1268 cork->dst = NULL;
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001269}
1270
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271/*
1272 * Combined all pending IP fragments on the socket as one IP datagram
1273 * and push them out.
1274 */
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001275struct sk_buff *__ip_make_skb(struct sock *sk,
David S. Miller77968b72011-05-08 17:12:19 -07001276 struct flowi4 *fl4,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001277 struct sk_buff_head *queue,
1278 struct inet_cork *cork)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279{
1280 struct sk_buff *skb, *tmp_skb;
1281 struct sk_buff **tail_skb;
1282 struct inet_sock *inet = inet_sk(sk);
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001283 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 struct ip_options *opt = NULL;
Herbert Xu1470ddf2011-03-01 02:36:47 +00001285 struct rtable *rt = (struct rtable *)cork->dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 struct iphdr *iph;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -08001287 __be16 df = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 __u8 ttl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
Herbert Xu1470ddf2011-03-01 02:36:47 +00001290 if ((skb = __skb_dequeue(queue)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 goto out;
1292 tail_skb = &(skb_shinfo(skb)->frag_list);
1293
1294 /* move skb->data to ip header from ext header */
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001295 if (skb->data < skb_network_header(skb))
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001296 __skb_pull(skb, skb_network_offset(skb));
Herbert Xu1470ddf2011-03-01 02:36:47 +00001297 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001298 __skb_pull(tmp_skb, skb_network_header_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 *tail_skb = tmp_skb;
1300 tail_skb = &(tmp_skb->next);
1301 skb->len += tmp_skb->len;
1302 skb->data_len += tmp_skb->len;
1303 skb->truesize += tmp_skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 tmp_skb->destructor = NULL;
1305 tmp_skb->sk = NULL;
1306 }
1307
1308 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1309 * to fragment the frame generated here. No matter, what transforms
1310 * how transforms change size of the packet, it will come out.
1311 */
John Heffner628a5c52007-04-20 15:53:27 -07001312 if (inet->pmtudisc < IP_PMTUDISC_DO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 skb->local_df = 1;
1314
1315 /* DF bit is set when we want to see DF on outgoing frames.
1316 * If local_df is set too, we still allow to fragment this frame
1317 * locally. */
John Heffner628a5c52007-04-20 15:53:27 -07001318 if (inet->pmtudisc >= IP_PMTUDISC_DO ||
Changli Gaod8d1f302010-06-10 23:31:35 -07001319 (skb->len <= dst_mtu(&rt->dst) &&
1320 ip_dont_fragment(sk, &rt->dst)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 df = htons(IP_DF);
1322
Herbert Xu1470ddf2011-03-01 02:36:47 +00001323 if (cork->flags & IPCORK_OPT)
1324 opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
1326 if (rt->rt_type == RTN_MULTICAST)
1327 ttl = inet->mc_ttl;
1328 else
Changli Gaod8d1f302010-06-10 23:31:35 -07001329 ttl = ip_select_ttl(inet, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330
1331 iph = (struct iphdr *)skb->data;
1332 iph->version = 4;
1333 iph->ihl = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 iph->tos = inet->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 iph->frag_off = df;
Changli Gaod8d1f302010-06-10 23:31:35 -07001336 ip_select_ident(iph, &rt->dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 iph->ttl = ttl;
1338 iph->protocol = sk->sk_protocol;
David S. Miller77968b72011-05-08 17:12:19 -07001339 iph->saddr = fl4->saddr;
1340 iph->daddr = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
David S. Miller22f728f2011-05-13 17:21:27 -04001342 if (opt) {
1343 iph->ihl += opt->optlen>>2;
1344 ip_options_build(skb, opt, cork->addr, rt, 0);
1345 }
1346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001348 skb->mark = sk->sk_mark;
Eric Dumazeta21bba92008-11-24 16:07:50 -08001349 /*
1350 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1351 * on dst refcount
1352 */
Herbert Xu1470ddf2011-03-01 02:36:47 +00001353 cork->dst = NULL;
Changli Gaod8d1f302010-06-10 23:31:35 -07001354 skb_dst_set(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
David L Stevens96793b42007-09-17 09:57:33 -07001356 if (iph->protocol == IPPROTO_ICMP)
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001357 icmp_out_count(net, ((struct icmphdr *)
David L Stevens96793b42007-09-17 09:57:33 -07001358 skb_transport_header(skb))->type);
1359
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001360 ip_cork_release(cork);
1361out:
1362 return skb;
1363}
1364
1365int ip_send_skb(struct sk_buff *skb)
1366{
1367 struct net *net = sock_net(skb->sk);
1368 int err;
1369
Herbert Xuc439cb22008-01-11 19:14:00 -08001370 err = ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 if (err) {
1372 if (err > 0)
Eric Dumazet6ce9e7b2009-09-02 18:05:33 -07001373 err = net_xmit_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 if (err)
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001375 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 }
1377
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379}
1380
David S. Miller77968b72011-05-08 17:12:19 -07001381int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
Herbert Xu1470ddf2011-03-01 02:36:47 +00001382{
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001383 struct sk_buff *skb;
1384
David S. Miller77968b72011-05-08 17:12:19 -07001385 skb = ip_finish_skb(sk, fl4);
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001386 if (!skb)
1387 return 0;
1388
1389 /* Netfilter gets whole the not fragmented skb. */
1390 return ip_send_skb(skb);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001391}
1392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393/*
1394 * Throw away all pending data on the socket.
1395 */
Herbert Xu1470ddf2011-03-01 02:36:47 +00001396static void __ip_flush_pending_frames(struct sock *sk,
1397 struct sk_buff_head *queue,
1398 struct inet_cork *cork)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 struct sk_buff *skb;
1401
Herbert Xu1470ddf2011-03-01 02:36:47 +00001402 while ((skb = __skb_dequeue_tail(queue)) != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 kfree_skb(skb);
1404
Herbert Xu1470ddf2011-03-01 02:36:47 +00001405 ip_cork_release(cork);
1406}
1407
1408void ip_flush_pending_frames(struct sock *sk)
1409{
David S. Millerbdc712b2011-05-06 15:02:07 -07001410 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411}
1412
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001413struct sk_buff *ip_make_skb(struct sock *sk,
David S. Miller77968b72011-05-08 17:12:19 -07001414 struct flowi4 *fl4,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001415 int getfrag(void *from, char *to, int offset,
1416 int len, int odd, struct sk_buff *skb),
1417 void *from, int length, int transhdrlen,
1418 struct ipcm_cookie *ipc, struct rtable **rtp,
1419 unsigned int flags)
1420{
David S. Millerb80d7222011-05-06 15:06:01 -07001421 struct inet_cork cork;
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001422 struct sk_buff_head queue;
1423 int err;
1424
1425 if (flags & MSG_PROBE)
1426 return NULL;
1427
1428 __skb_queue_head_init(&queue);
1429
David S. Millerb80d7222011-05-06 15:06:01 -07001430 cork.flags = 0;
1431 cork.addr = 0;
David S. Miller70652722011-05-06 16:01:15 -07001432 cork.opt = NULL;
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001433 err = ip_setup_cork(sk, &cork, ipc, rtp);
1434 if (err)
1435 return ERR_PTR(err);
1436
David S. Millerf5fca602011-05-08 17:24:10 -07001437 err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001438 from, length, transhdrlen, flags);
1439 if (err) {
1440 __ip_flush_pending_frames(sk, &queue, &cork);
1441 return ERR_PTR(err);
1442 }
1443
David S. Miller77968b72011-05-08 17:12:19 -07001444 return __ip_make_skb(sk, fl4, &queue, &cork);
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001445}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
1447/*
1448 * Fetch data from kernel space and fill in checksum if needed.
1449 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001450static int ip_reply_glue_bits(void *dptr, char *to, int offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 int len, int odd, struct sk_buff *skb)
1452{
Al Viro50842052006-11-14 21:36:34 -08001453 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
1455 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1456 skb->csum = csum_block_add(skb->csum, csum, odd);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001457 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458}
1459
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001460/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 * Generic function to send a packet as reply to another packet.
1462 * Used to send TCP resets so far. ICMP should use this function too.
1463 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001464 * Should run single threaded per socket because it uses the sock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 * structure to pass arguments.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 */
David S. Miller0a5ebb82011-05-09 13:22:43 -07001467void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1468 struct ip_reply_arg *arg, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469{
1470 struct inet_sock *inet = inet_sk(sk);
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001471 struct ip_options_data replyopts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 struct ipcm_cookie ipc;
David S. Miller77968b72011-05-08 17:12:19 -07001473 struct flowi4 fl4;
Eric Dumazet511c3f92009-06-02 05:14:27 +00001474 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001476 if (ip_options_echo(&replyopts.opt.opt, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 return;
1478
David S. Miller0a5ebb82011-05-09 13:22:43 -07001479 ipc.addr = daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 ipc.opt = NULL;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00001481 ipc.tx_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001483 if (replyopts.opt.opt.optlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 ipc.opt = &replyopts.opt;
1485
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001486 if (replyopts.opt.opt.srr)
1487 daddr = replyopts.opt.opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 }
1489
David S. Miller77968b72011-05-08 17:12:19 -07001490 flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1491 RT_TOS(ip_hdr(skb)->tos),
1492 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1493 ip_reply_arg_flowi_flags(arg),
1494 daddr, rt->rt_spec_dst,
1495 tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1496 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1497 rt = ip_route_output_key(sock_net(sk), &fl4);
1498 if (IS_ERR(rt))
1499 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
1501 /* And let IP do all the hard work.
1502
1503 This chunk is not reenterable, hence spinlock.
1504 Note that it uses the fact, that this function is called
1505 with locally disabled BH and that sk cannot be already spinlocked.
1506 */
1507 bh_lock_sock(sk);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001508 inet->tos = ip_hdr(skb)->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 sk->sk_priority = skb->priority;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001510 sk->sk_protocol = ip_hdr(skb)->protocol;
Patrick McHardyf0e48db2007-06-04 21:32:46 -07001511 sk->sk_bound_dev_if = arg->bound_dev_if;
David S. Millerf5fca602011-05-08 17:24:10 -07001512 ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
Eric Dumazet2e77d892008-11-24 15:52:46 -08001513 &ipc, &rt, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1515 if (arg->csumoffset >= 0)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001516 *((__sum16 *)skb_transport_header(skb) +
1517 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1518 arg->csum));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 skb->ip_summed = CHECKSUM_NONE;
David S. Miller77968b72011-05-08 17:12:19 -07001520 ip_push_pending_frames(sk, &fl4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 }
1522
1523 bh_unlock_sock(sk);
1524
1525 ip_rt_put(rt);
1526}
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528void __init ip_init(void)
1529{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 ip_rt_init();
1531 inet_initpeers();
1532
1533#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1534 igmp_mc_proc_init();
1535#endif
1536}