blob: 77d3eded665ad2e30c03f8e62ea30b29ad1b4bb9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 *
18 * See ip_input.c for original log
19 *
20 * Fixes:
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090023 * Bradford Johnson: Fix faulty handling of some frames when
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * no route is found.
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090034 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
41 * datagrams.
42 * Hirokazu Takahashi: sendfile() on UDP works now.
43 */
44
45#include <asm/uaccess.h>
46#include <asm/system.h>
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040053#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090054#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56#include <linux/socket.h>
57#include <linux/sockios.h>
58#include <linux/in.h>
59#include <linux/inet.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/proc_fs.h>
63#include <linux/stat.h>
64#include <linux/init.h>
65
66#include <net/snmp.h>
67#include <net/ip.h>
68#include <net/protocol.h>
69#include <net/route.h>
Patrick McHardycfacb052006-01-08 22:36:54 -080070#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/skbuff.h>
72#include <net/sock.h>
73#include <net/arp.h>
74#include <net/icmp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <net/checksum.h>
76#include <net/inetpeer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#include <linux/igmp.h>
78#include <linux/netfilter_ipv4.h>
79#include <linux/netfilter_bridge.h>
80#include <linux/mroute.h>
81#include <linux/netlink.h>
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -070082#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Brian Haleyab32ea52006-09-22 14:15:41 -070084int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
David S. Miller323e1262010-12-12 21:55:08 -080085EXPORT_SYMBOL(sysctl_ip_default_ttl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87/* Generate a checksum for an outgoing IP datagram. */
88__inline__ void ip_send_check(struct iphdr *iph)
89{
90 iph->check = 0;
91 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
92}
Eric Dumazet4bc2f182010-07-09 21:22:10 +000093EXPORT_SYMBOL(ip_send_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Herbert Xuc439cb22008-01-11 19:14:00 -080095int __ip_local_out(struct sk_buff *skb)
96{
97 struct iphdr *iph = ip_hdr(skb);
98
99 iph->tot_len = htons(skb->len);
100 ip_send_check(iph);
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100101 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
102 skb_dst(skb)->dev, dst_output);
Herbert Xuc439cb22008-01-11 19:14:00 -0800103}
104
105int ip_local_out(struct sk_buff *skb)
106{
107 int err;
108
109 err = __ip_local_out(skb);
110 if (likely(err == 1))
111 err = dst_output(skb);
112
113 return err;
114}
115EXPORT_SYMBOL_GPL(ip_local_out);
116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117/* dev_loopback_xmit for use with netfilter. */
118static int ip_dev_loopback_xmit(struct sk_buff *newskb)
119{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700120 skb_reset_mac_header(newskb);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -0300121 __skb_pull(newskb, skb_network_offset(newskb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 newskb->pkt_type = PACKET_LOOPBACK;
123 newskb->ip_summed = CHECKSUM_UNNECESSARY;
Eric Dumazetadf30902009-06-02 05:19:30 +0000124 WARN_ON(!skb_dst(newskb));
Eric Dumazete30b38c2010-04-15 09:13:03 +0000125 netif_rx_ni(newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 return 0;
127}
128
129static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
130{
131 int ttl = inet->uc_ttl;
132
133 if (ttl < 0)
David S. Miller323e1262010-12-12 21:55:08 -0800134 ttl = ip4_dst_hoplimit(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 return ttl;
136}
137
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900138/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 * Add an ip header to a skbuff and send it out.
140 *
141 */
142int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000143 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
145 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet511c3f92009-06-02 05:14:27 +0000146 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 struct iphdr *iph;
148
149 /* Build the IP header. */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000150 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300151 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700152 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 iph->version = 4;
154 iph->ihl = 5;
155 iph->tos = inet->tos;
Changli Gaod8d1f302010-06-10 23:31:35 -0700156 if (ip_dont_fragment(sk, &rt->dst))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 iph->frag_off = htons(IP_DF);
158 else
159 iph->frag_off = 0;
Changli Gaod8d1f302010-06-10 23:31:35 -0700160 iph->ttl = ip_select_ttl(inet, &rt->dst);
David S. Millerdd927a22011-05-04 12:03:30 -0700161 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
162 iph->saddr = saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 iph->protocol = sk->sk_protocol;
Changli Gaod8d1f302010-06-10 23:31:35 -0700164 ip_select_ident(iph, &rt->dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000166 if (opt && opt->opt.optlen) {
167 iph->ihl += opt->opt.optlen>>2;
168 ip_options_build(skb, &opt->opt, daddr, rt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800172 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
174 /* Send it out. */
Herbert Xuc439cb22008-01-11 19:14:00 -0800175 return ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -0700177EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179static inline int ip_finish_output2(struct sk_buff *skb)
180{
Eric Dumazetadf30902009-06-02 05:19:30 +0000181 struct dst_entry *dst = skb_dst(skb);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700182 struct rtable *rt = (struct rtable *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 struct net_device *dev = dst->dev;
Chuck Leverc2636b42007-10-23 21:07:32 -0700184 unsigned int hh_len = LL_RESERVED_SPACE(dev);
David S. Millerf6b72b62011-07-14 07:53:20 -0700185 struct neighbour *neigh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Neil Hormanedf391f2009-04-27 02:45:02 -0700187 if (rt->rt_type == RTN_MULTICAST) {
188 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
189 } else if (rt->rt_type == RTN_BROADCAST)
190 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 /* Be paranoid, rather than too clever. */
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700193 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 struct sk_buff *skb2;
195
196 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
197 if (skb2 == NULL) {
198 kfree_skb(skb);
199 return -ENOMEM;
200 }
201 if (skb->sk)
202 skb_set_owner_w(skb2, skb->sk);
203 kfree_skb(skb);
204 skb = skb2;
205 }
206
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000207 rcu_read_lock();
David S. Miller69cce1d2011-07-17 23:09:49 -0700208 neigh = dst_get_neighbour(dst);
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000209 if (neigh) {
210 int res = neigh_output(neigh, skb);
211
212 rcu_read_unlock();
213 return res;
214 }
215 rcu_read_unlock();
David S. Miller05e3aa02011-07-16 17:26:00 -0700216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 if (net_ratelimit())
218 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
219 kfree_skb(skb);
220 return -EINVAL;
221}
222
John Heffner628a5c52007-04-20 15:53:27 -0700223static inline int ip_skb_dst_mtu(struct sk_buff *skb)
224{
225 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
226
227 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
Eric Dumazetadf30902009-06-02 05:19:30 +0000228 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
John Heffner628a5c52007-04-20 15:53:27 -0700229}
230
Patrick McHardy861d0482007-10-15 01:48:39 -0700231static int ip_finish_output(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232{
Patrick McHardy5c901da2006-01-06 23:05:36 -0800233#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
234 /* Policy lookup after SNAT yielded a new policy */
Eric Dumazetadf30902009-06-02 05:19:30 +0000235 if (skb_dst(skb)->xfrm != NULL) {
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800236 IPCB(skb)->flags |= IPSKB_REROUTED;
237 return dst_output(skb);
238 }
Patrick McHardy5c901da2006-01-06 23:05:36 -0800239#endif
John Heffner628a5c52007-04-20 15:53:27 -0700240 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800241 return ip_fragment(skb, ip_finish_output2);
242 else
243 return ip_finish_output2(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
246int ip_mc_output(struct sk_buff *skb)
247{
248 struct sock *sk = skb->sk;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000249 struct rtable *rt = skb_rtable(skb);
Changli Gaod8d1f302010-06-10 23:31:35 -0700250 struct net_device *dev = rt->dst.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 /*
253 * If the indicated interface is up and running, send the packet.
254 */
Neil Hormanedf391f2009-04-27 02:45:02 -0700255 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 skb->dev = dev;
258 skb->protocol = htons(ETH_P_IP);
259
260 /*
261 * Multicasts are looped back for other local users
262 */
263
264 if (rt->rt_flags&RTCF_MULTICAST) {
Octavian Purdila7ad68482010-01-06 20:37:01 -0800265 if (sk_mc_loop(sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266#ifdef CONFIG_IP_MROUTE
267 /* Small optimization: do not loopback not local frames,
268 which returned after forwarding; they will be dropped
269 by ip_mr_input in any case.
270 Note, that local frames are looped back to be delivered
271 to local recipients.
272
273 This check is duplicated in ip_mr_input at the moment.
274 */
Joe Perches9d4fb272009-11-23 10:41:23 -0800275 &&
276 ((rt->rt_flags & RTCF_LOCAL) ||
277 !(IPCB(skb)->flags & IPSKB_FORWARDED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278#endif
Joe Perches9d4fb272009-11-23 10:41:23 -0800279 ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
281 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100282 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
283 newskb, NULL, newskb->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 ip_dev_loopback_xmit);
285 }
286
287 /* Multicasts with ttl 0 must not go beyond the host */
288
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700289 if (ip_hdr(skb)->ttl == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 kfree_skb(skb);
291 return 0;
292 }
293 }
294
295 if (rt->rt_flags&RTCF_BROADCAST) {
296 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
297 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100298 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
299 NULL, newskb->dev, ip_dev_loopback_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 }
301
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100302 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
303 skb->dev, ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800304 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305}
306
307int ip_output(struct sk_buff *skb)
308{
Eric Dumazetadf30902009-06-02 05:19:30 +0000309 struct net_device *dev = skb_dst(skb)->dev;
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800310
Neil Hormanedf391f2009-04-27 02:45:02 -0700311 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800313 skb->dev = dev;
314 skb->protocol = htons(ETH_P_IP);
315
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100316 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900317 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800318 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319}
320
David S. Millerd9d8da82011-05-06 22:23:20 -0700321int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
David S. Millere89862f2007-01-26 01:04:55 -0800323 struct sock *sk = skb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 struct inet_sock *inet = inet_sk(sk);
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000325 struct ip_options_rcu *inet_opt;
David S. Millerb57ae012011-05-06 16:24:06 -0700326 struct flowi4 *fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 struct rtable *rt;
328 struct iphdr *iph;
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000329 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331 /* Skip all of this if the packet is already routed,
332 * f.e. by something like SCTP.
333 */
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000334 rcu_read_lock();
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000335 inet_opt = rcu_dereference(inet->inet_opt);
David S. Millerea4fc0d2011-05-06 22:30:20 -0700336 fl4 = &fl->u.ip4;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000337 rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 if (rt != NULL)
339 goto packet_routed;
340
341 /* Make sure we can route this packet. */
342 rt = (struct rtable *)__sk_dst_check(sk, 0);
343 if (rt == NULL) {
Al Viro3ca3c682006-09-27 18:28:07 -0700344 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346 /* Use correct destination address if we have options. */
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000347 daddr = inet->inet_daddr;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000348 if (inet_opt && inet_opt->opt.srr)
349 daddr = inet_opt->opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
David S. Miller78fbfd82011-03-12 00:00:52 -0500351 /* If this fails, retransmit mechanism of transport layer will
352 * keep trying until route appears or the connection times
353 * itself out.
354 */
David S. Millerb57ae012011-05-06 16:24:06 -0700355 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
David S. Miller78fbfd82011-03-12 00:00:52 -0500356 daddr, inet->inet_saddr,
357 inet->inet_dport,
358 inet->inet_sport,
359 sk->sk_protocol,
360 RT_CONN_FLAGS(sk),
361 sk->sk_bound_dev_if);
362 if (IS_ERR(rt))
363 goto no_route;
Changli Gaod8d1f302010-06-10 23:31:35 -0700364 sk_setup_caps(sk, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 }
Changli Gaod8d1f302010-06-10 23:31:35 -0700366 skb_dst_set_noref(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
368packet_routed:
David S. Millerea4fc0d2011-05-06 22:30:20 -0700369 if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 goto no_route;
371
372 /* OK, we know where to send it, allocate and build IP header. */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000373 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300374 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700375 iph = ip_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800376 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
Changli Gaod8d1f302010-06-10 23:31:35 -0700377 if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 iph->frag_off = htons(IP_DF);
379 else
380 iph->frag_off = 0;
Changli Gaod8d1f302010-06-10 23:31:35 -0700381 iph->ttl = ip_select_ttl(inet, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 iph->protocol = sk->sk_protocol;
David S. Millerea4fc0d2011-05-06 22:30:20 -0700383 iph->saddr = fl4->saddr;
384 iph->daddr = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 /* Transport layer set skb->h.foo itself. */
386
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000387 if (inet_opt && inet_opt->opt.optlen) {
388 iph->ihl += inet_opt->opt.optlen >> 2;
389 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 }
391
Changli Gaod8d1f302010-06-10 23:31:35 -0700392 ip_select_ident_more(iph, &rt->dst, sk,
Herbert Xu79671682006-06-22 02:40:14 -0700393 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800396 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000398 res = ip_local_out(skb);
399 rcu_read_unlock();
400 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
402no_route:
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000403 rcu_read_unlock();
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700404 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 kfree_skb(skb);
406 return -EHOSTUNREACH;
407}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000408EXPORT_SYMBOL(ip_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410
411static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
412{
413 to->pkt_type = from->pkt_type;
414 to->priority = from->priority;
415 to->protocol = from->protocol;
Eric Dumazetadf30902009-06-02 05:19:30 +0000416 skb_dst_drop(to);
Eric Dumazetfe76cda2010-07-01 23:48:22 +0000417 skb_dst_copy(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 to->dev = from->dev;
Thomas Graf82e91ff2006-11-09 15:19:14 -0800419 to->mark = from->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
421 /* Copy the flags to each fragment. */
422 IPCB(to)->flags = IPCB(from)->flags;
423
424#ifdef CONFIG_NET_SCHED
425 to->tc_index = from->tc_index;
426#endif
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -0700427 nf_copy(to, from);
Jozsef Kadlecsikba9dda32007-07-07 22:21:23 -0700428#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
429 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
430 to->nf_trace = from->nf_trace;
431#endif
Julian Anastasovc98d80e2005-10-22 13:39:21 +0300432#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
433 to->ipvs_property = from->ipvs_property;
434#endif
James Morris984bc162006-06-09 00:29:17 -0700435 skb_copy_secmark(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436}
437
438/*
439 * This IP datagram is too large to be sent in one piece. Break it up into
440 * smaller pieces (each of size equal to IP header plus
441 * a block of the data of the original IP data part) that will yet fit in a
442 * single device frame, and queue such a frame for sending.
443 */
444
Jianjun Kongd93191002008-11-03 00:23:42 -0800445int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446{
447 struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 int ptr;
449 struct net_device *dev;
450 struct sk_buff *skb2;
Changli Gaoc893b802010-07-31 13:25:08 +0000451 unsigned int mtu, hlen, left, len, ll_rs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 int offset;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -0800453 __be16 not_last_frag;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000454 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 int err = 0;
456
Changli Gaod8d1f302010-06-10 23:31:35 -0700457 dev = rt->dst.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
459 /*
460 * Point into the IP datagram header.
461 */
462
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700463 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700466 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
John Heffner628a5c52007-04-20 15:53:27 -0700468 htonl(ip_skb_dst_mtu(skb)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 kfree_skb(skb);
470 return -EMSGSIZE;
471 }
472
473 /*
474 * Setup starting values.
475 */
476
477 hlen = iph->ihl * 4;
Changli Gaod8d1f302010-06-10 23:31:35 -0700478 mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
Bart De Schuymer6c79bf02010-04-20 16:22:01 +0200479#ifdef CONFIG_BRIDGE_NETFILTER
480 if (skb->nf_bridge)
481 mtu -= nf_bridge_mtu_reduction(skb);
482#endif
Herbert Xu89cee8b2005-12-13 23:14:27 -0800483 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
485 /* When frag_list is given, use it. First, check its validity:
486 * some transformers could create wrong frag_list or break existing
487 * one, it is not prohibited. In this case fall back to copying.
488 *
489 * LATER: this step can be merged to real generation of fragments,
490 * we can switch to copy when see the first bad fragment.
491 */
David S. Miller21dc3302010-08-23 00:13:46 -0700492 if (skb_has_frag_list(skb)) {
Eric Dumazet3d130082010-09-21 08:47:45 +0000493 struct sk_buff *frag, *frag2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 int first_len = skb_pagelen(skb);
495
496 if (first_len - hlen > mtu ||
497 ((first_len - hlen) & 7) ||
Paul Gortmaker56f8a752011-06-21 20:33:34 -0700498 ip_is_fragment(iph) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 skb_cloned(skb))
500 goto slow_path;
501
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700502 skb_walk_frags(skb, frag) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 /* Correct geometry. */
504 if (frag->len > mtu ||
505 ((frag->len & 7) && frag->next) ||
506 skb_headroom(frag) < hlen)
Eric Dumazet3d130082010-09-21 08:47:45 +0000507 goto slow_path_clean;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
509 /* Partially cloned skb? */
510 if (skb_shared(frag))
Eric Dumazet3d130082010-09-21 08:47:45 +0000511 goto slow_path_clean;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700512
513 BUG_ON(frag->sk);
514 if (skb->sk) {
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700515 frag->sk = skb->sk;
516 frag->destructor = sock_wfree;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700517 }
Eric Dumazet3d130082010-09-21 08:47:45 +0000518 skb->truesize -= frag->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 }
520
521 /* Everything is OK. Generate! */
522
523 err = 0;
524 offset = 0;
525 frag = skb_shinfo(skb)->frag_list;
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700526 skb_frag_list_init(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 skb->data_len = first_len - skb_headlen(skb);
528 skb->len = first_len;
529 iph->tot_len = htons(first_len);
530 iph->frag_off = htons(IP_MF);
531 ip_send_check(iph);
532
533 for (;;) {
534 /* Prepare header of the next frame,
535 * before previous one went down. */
536 if (frag) {
537 frag->ip_summed = CHECKSUM_NONE;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300538 skb_reset_transport_header(frag);
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700539 __skb_push(frag, hlen);
540 skb_reset_network_header(frag);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700541 memcpy(skb_network_header(frag), iph, hlen);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700542 iph = ip_hdr(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 iph->tot_len = htons(frag->len);
544 ip_copy_metadata(frag, skb);
545 if (offset == 0)
546 ip_options_fragment(frag);
547 offset += skb->len - hlen;
548 iph->frag_off = htons(offset>>3);
549 if (frag->next != NULL)
550 iph->frag_off |= htons(IP_MF);
551 /* Ready, complete checksum */
552 ip_send_check(iph);
553 }
554
555 err = output(skb);
556
Wei Dongdafee492006-08-02 13:41:21 -0700557 if (!err)
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700558 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 if (err || !frag)
560 break;
561
562 skb = frag;
563 frag = skb->next;
564 skb->next = NULL;
565 }
566
567 if (err == 0) {
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700568 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 return 0;
570 }
571
572 while (frag) {
573 skb = frag->next;
574 kfree_skb(frag);
575 frag = skb;
576 }
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700577 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 return err;
Eric Dumazet3d130082010-09-21 08:47:45 +0000579
580slow_path_clean:
581 skb_walk_frags(skb, frag2) {
582 if (frag2 == frag)
583 break;
584 frag2->sk = NULL;
585 frag2->destructor = NULL;
586 skb->truesize += frag2->truesize;
587 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 }
589
590slow_path:
591 left = skb->len - hlen; /* Space per frame */
George Kadianakis49085bd2010-07-06 11:44:12 +0000592 ptr = hlen; /* Where to start from */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700595 * we need to make room for the encapsulating header
596 */
Changli Gaoc893b802010-07-31 13:25:08 +0000597 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 /*
600 * Fragment the datagram.
601 */
602
603 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
604 not_last_frag = iph->frag_off & htons(IP_MF);
605
606 /*
607 * Keep copying data until we run out.
608 */
609
Stephen Hemminger132adf52007-03-08 20:44:43 -0800610 while (left > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 len = left;
612 /* IF: it doesn't fit, use 'mtu' - the data space left */
613 if (len > mtu)
614 len = mtu;
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300615 /* IF: we are not sending up to and including the packet end
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 then align the next start on an eight byte boundary */
617 if (len < left) {
618 len &= ~7;
619 }
620 /*
621 * Allocate buffer.
622 */
623
624 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
Patrick McHardy64ce2072005-08-09 20:50:53 -0700625 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 err = -ENOMEM;
627 goto fail;
628 }
629
630 /*
631 * Set up data on packet
632 */
633
634 ip_copy_metadata(skb2, skb);
635 skb_reserve(skb2, ll_rs);
636 skb_put(skb2, len + hlen);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700637 skb_reset_network_header(skb2);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700638 skb2->transport_header = skb2->network_header + hlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
640 /*
641 * Charge the memory for the fragment to any owner
642 * it might possess
643 */
644
645 if (skb->sk)
646 skb_set_owner_w(skb2, skb->sk);
647
648 /*
649 * Copy the packet header into the new buffer.
650 */
651
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300652 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654 /*
655 * Copy a block of the IP datagram.
656 */
Arnaldo Carvalho de Melobff9b612007-03-16 17:19:57 -0300657 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 BUG();
659 left -= len;
660
661 /*
662 * Fill in the new header fields.
663 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700664 iph = ip_hdr(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 iph->frag_off = htons((offset >> 3));
666
667 /* ANK: dirty, but effective trick. Upgrade options only if
668 * the segment to be fragmented was THE FIRST (otherwise,
669 * options are already fixed) and make it ONCE
670 * on the initial skb, so that all the following fragments
671 * will inherit fixed options.
672 */
673 if (offset == 0)
674 ip_options_fragment(skb);
675
676 /*
677 * Added AC : If we are fragmenting a fragment that's not the
678 * last fragment then keep MF on each bit
679 */
680 if (left > 0 || not_last_frag)
681 iph->frag_off |= htons(IP_MF);
682 ptr += len;
683 offset += len;
684
685 /*
686 * Put this fragment into the sending queue.
687 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 iph->tot_len = htons(len + hlen);
689
690 ip_send_check(iph);
691
692 err = output(skb2);
693 if (err)
694 goto fail;
Wei Dongdafee492006-08-02 13:41:21 -0700695
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700696 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 }
698 kfree_skb(skb);
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700699 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 return err;
701
702fail:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900703 kfree_skb(skb);
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700704 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 return err;
706}
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700707EXPORT_SYMBOL(ip_fragment);
708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709int
710ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
711{
712 struct iovec *iov = from;
713
Patrick McHardy84fa7932006-08-29 16:44:56 -0700714 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
716 return -EFAULT;
717 } else {
Al Viro44bb9362006-11-14 21:36:14 -0800718 __wsum csum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
720 return -EFAULT;
721 skb->csum = csum_block_add(skb->csum, csum, odd);
722 }
723 return 0;
724}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000725EXPORT_SYMBOL(ip_generic_getfrag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Al Viro44bb9362006-11-14 21:36:14 -0800727static inline __wsum
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728csum_page(struct page *page, int offset, int copy)
729{
730 char *kaddr;
Al Viro44bb9362006-11-14 21:36:14 -0800731 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 kaddr = kmap(page);
733 csum = csum_partial(kaddr + offset, copy, 0);
734 kunmap(page);
735 return csum;
736}
737
Adrian Bunk4b30b1c2005-11-29 16:27:20 -0800738static inline int ip_ufo_append_data(struct sock *sk,
Herbert Xu1470ddf2011-03-01 02:36:47 +0000739 struct sk_buff_head *queue,
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700740 int getfrag(void *from, char *to, int offset, int len,
741 int odd, struct sk_buff *skb),
742 void *from, int length, int hh_len, int fragheaderlen,
Bill Sommerfeldd9be4f72011-07-19 15:22:33 +0000743 int transhdrlen, int maxfraglen, unsigned int flags)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700744{
745 struct sk_buff *skb;
746 int err;
747
748 /* There is support for UDP fragmentation offload by network
749 * device, so create one single skb packet containing complete
750 * udp datagram
751 */
Herbert Xu1470ddf2011-03-01 02:36:47 +0000752 if ((skb = skb_peek_tail(queue)) == NULL) {
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700753 skb = sock_alloc_send_skb(sk,
754 hh_len + fragheaderlen + transhdrlen + 20,
755 (flags & MSG_DONTWAIT), &err);
756
757 if (skb == NULL)
758 return err;
759
760 /* reserve space for Hardware header */
761 skb_reserve(skb, hh_len);
762
763 /* create space for UDP/IP header */
Jianjun Kongd93191002008-11-03 00:23:42 -0800764 skb_put(skb, fragheaderlen + transhdrlen);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700765
766 /* initialize network header pointer */
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700767 skb_reset_network_header(skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700768
769 /* initialize protocol header pointer */
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700770 skb->transport_header = skb->network_header + fragheaderlen;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700771
Patrick McHardy84fa7932006-08-29 16:44:56 -0700772 skb->ip_summed = CHECKSUM_PARTIAL;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700773 skb->csum = 0;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700774
Kostya Bbe9164e2008-04-29 22:36:30 -0700775 /* specify the length of each IP datagram fragment */
Bill Sommerfeldd9be4f72011-07-19 15:22:33 +0000776 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700777 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Herbert Xu1470ddf2011-03-01 02:36:47 +0000778 __skb_queue_tail(queue, skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700779 }
Kostya Bbe9164e2008-04-29 22:36:30 -0700780
781 return skb_append_datato_frags(sk, skb, getfrag, from,
782 (length - transhdrlen));
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700783}
784
David S. Millerf5fca602011-05-08 17:24:10 -0700785static int __ip_append_data(struct sock *sk,
786 struct flowi4 *fl4,
787 struct sk_buff_head *queue,
Herbert Xu1470ddf2011-03-01 02:36:47 +0000788 struct inet_cork *cork,
789 int getfrag(void *from, char *to, int offset,
790 int len, int odd, struct sk_buff *skb),
791 void *from, int length, int transhdrlen,
792 unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793{
794 struct inet_sock *inet = inet_sk(sk);
795 struct sk_buff *skb;
796
Herbert Xu07df5292011-03-01 23:00:58 -0800797 struct ip_options *opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 int hh_len;
799 int exthdrlen;
800 int mtu;
801 int copy;
802 int err;
803 int offset = 0;
804 unsigned int maxfraglen, fragheaderlen;
805 int csummode = CHECKSUM_NONE;
Herbert Xu1470ddf2011-03-01 02:36:47 +0000806 struct rtable *rt = (struct rtable *)cork->dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Steffen Klassert96d73032011-06-05 20:48:47 +0000808 skb = skb_peek_tail(queue);
809
810 exthdrlen = !skb ? rt->dst.header_len : 0;
Herbert Xu07df5292011-03-01 23:00:58 -0800811 mtu = cork->fragsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Changli Gaod8d1f302010-06-10 23:31:35 -0700813 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
816 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
817
Herbert Xu1470ddf2011-03-01 02:36:47 +0000818 if (cork->length + length > 0xFFFF - fragheaderlen) {
David S. Millerf5fca602011-05-08 17:24:10 -0700819 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000820 mtu-exthdrlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 return -EMSGSIZE;
822 }
823
824 /*
825 * transhdrlen > 0 means that this is the first fragment and we wish
826 * it won't be fragmented in the future.
827 */
828 if (transhdrlen &&
829 length + fragheaderlen <= mtu &&
Changli Gaod8d1f302010-06-10 23:31:35 -0700830 rt->dst.dev->features & NETIF_F_V4_CSUM &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 !exthdrlen)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700832 csummode = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
Herbert Xu1470ddf2011-03-01 02:36:47 +0000834 cork->length += length;
Herbert Xu26cde9f2010-06-15 01:52:25 +0000835 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
Kostya Bbe9164e2008-04-29 22:36:30 -0700836 (sk->sk_protocol == IPPROTO_UDP) &&
Steffen Klassertc1460662011-06-29 23:19:32 +0000837 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
Herbert Xu1470ddf2011-03-01 02:36:47 +0000838 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
839 hh_len, fragheaderlen, transhdrlen,
Bill Sommerfeldd9be4f72011-07-19 15:22:33 +0000840 maxfraglen, flags);
Patrick McHardybaa829d2006-03-12 20:35:12 -0800841 if (err)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700842 goto error;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700843 return 0;
844 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
846 /* So, what's going on in the loop below?
847 *
848 * We use calculated fragment length to generate chained skb,
849 * each of segments is IP fragment ready for sending to network after
850 * adding appropriate IP header.
851 */
852
Herbert Xu26cde9f2010-06-15 01:52:25 +0000853 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 goto alloc_new_skb;
855
856 while (length > 0) {
857 /* Check if the remaining data fits into current packet. */
858 copy = mtu - skb->len;
859 if (copy < length)
860 copy = maxfraglen - skb->len;
861 if (copy <= 0) {
862 char *data;
863 unsigned int datalen;
864 unsigned int fraglen;
865 unsigned int fraggap;
866 unsigned int alloclen;
867 struct sk_buff *skb_prev;
868alloc_new_skb:
869 skb_prev = skb;
870 if (skb_prev)
871 fraggap = skb_prev->len - maxfraglen;
872 else
873 fraggap = 0;
874
875 /*
876 * If remaining data exceeds the mtu,
877 * we know we need more fragment(s).
878 */
879 datalen = length + fraggap;
880 if (datalen > mtu - fragheaderlen)
881 datalen = maxfraglen - fragheaderlen;
882 fraglen = datalen + fragheaderlen;
883
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900884 if ((flags & MSG_MORE) &&
Changli Gaod8d1f302010-06-10 23:31:35 -0700885 !(rt->dst.dev->features&NETIF_F_SG))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 alloclen = mtu;
887 else
Eric Dumazet59104f02010-09-20 20:16:27 +0000888 alloclen = fraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
Steffen Klassert353e5c92011-06-22 01:05:37 +0000890 alloclen += exthdrlen;
891
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 /* The last fragment gets additional space at tail.
893 * Note, with MSG_MORE we overallocate on fragments,
894 * because we have no idea what fragment will be
895 * the last.
896 */
Steffen Klassert33f99dc2011-06-22 01:04:37 +0000897 if (datalen == length + fraggap)
Changli Gaod8d1f302010-06-10 23:31:35 -0700898 alloclen += rt->dst.trailer_len;
Steffen Klassert33f99dc2011-06-22 01:04:37 +0000899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 if (transhdrlen) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900901 skb = sock_alloc_send_skb(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 alloclen + hh_len + 15,
903 (flags & MSG_DONTWAIT), &err);
904 } else {
905 skb = NULL;
906 if (atomic_read(&sk->sk_wmem_alloc) <=
907 2 * sk->sk_sndbuf)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900908 skb = sock_wmalloc(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 alloclen + hh_len + 15, 1,
910 sk->sk_allocation);
911 if (unlikely(skb == NULL))
912 err = -ENOBUFS;
Patrick Ohly51f31ca2009-02-12 05:03:39 +0000913 else
914 /* only the initial fragment is
915 time stamped */
Herbert Xu1470ddf2011-03-01 02:36:47 +0000916 cork->tx_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 }
918 if (skb == NULL)
919 goto error;
920
921 /*
922 * Fill in the control structures
923 */
924 skb->ip_summed = csummode;
925 skb->csum = 0;
926 skb_reserve(skb, hh_len);
Herbert Xu1470ddf2011-03-01 02:36:47 +0000927 skb_shinfo(skb)->tx_flags = cork->tx_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
929 /*
930 * Find where to start putting bytes.
931 */
Steffen Klassert353e5c92011-06-22 01:05:37 +0000932 data = skb_put(skb, fraglen + exthdrlen);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300933 skb_set_network_header(skb, exthdrlen);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700934 skb->transport_header = (skb->network_header +
935 fragheaderlen);
Steffen Klassert353e5c92011-06-22 01:05:37 +0000936 data += fragheaderlen + exthdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
938 if (fraggap) {
939 skb->csum = skb_copy_and_csum_bits(
940 skb_prev, maxfraglen,
941 data + transhdrlen, fraggap, 0);
942 skb_prev->csum = csum_sub(skb_prev->csum,
943 skb->csum);
944 data += fraggap;
Herbert Xue9fa4f72006-08-13 20:12:58 -0700945 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 }
947
948 copy = datalen - transhdrlen - fraggap;
949 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
950 err = -EFAULT;
951 kfree_skb(skb);
952 goto error;
953 }
954
955 offset += copy;
956 length -= datalen - fraggap;
957 transhdrlen = 0;
958 exthdrlen = 0;
959 csummode = CHECKSUM_NONE;
960
961 /*
962 * Put the packet on the pending queue.
963 */
Herbert Xu1470ddf2011-03-01 02:36:47 +0000964 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 continue;
966 }
967
968 if (copy > length)
969 copy = length;
970
Changli Gaod8d1f302010-06-10 23:31:35 -0700971 if (!(rt->dst.dev->features&NETIF_F_SG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 unsigned int off;
973
974 off = skb->len;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900975 if (getfrag(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 offset, copy, off, skb) < 0) {
977 __skb_trim(skb, off);
978 err = -EFAULT;
979 goto error;
980 }
981 } else {
982 int i = skb_shinfo(skb)->nr_frags;
983 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
Herbert Xu1470ddf2011-03-01 02:36:47 +0000984 struct page *page = cork->page;
985 int off = cork->off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 unsigned int left;
987
988 if (page && (left = PAGE_SIZE - off) > 0) {
989 if (copy >= left)
990 copy = left;
991 if (page != frag->page) {
992 if (i == MAX_SKB_FRAGS) {
993 err = -EMSGSIZE;
994 goto error;
995 }
996 get_page(page);
Herbert Xu1470ddf2011-03-01 02:36:47 +0000997 skb_fill_page_desc(skb, i, page, off, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 frag = &skb_shinfo(skb)->frags[i];
999 }
1000 } else if (i < MAX_SKB_FRAGS) {
1001 if (copy > PAGE_SIZE)
1002 copy = PAGE_SIZE;
1003 page = alloc_pages(sk->sk_allocation, 0);
1004 if (page == NULL) {
1005 err = -ENOMEM;
1006 goto error;
1007 }
Herbert Xu1470ddf2011-03-01 02:36:47 +00001008 cork->page = page;
1009 cork->off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
1011 skb_fill_page_desc(skb, i, page, 0, 0);
1012 frag = &skb_shinfo(skb)->frags[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 } else {
1014 err = -EMSGSIZE;
1015 goto error;
1016 }
1017 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1018 err = -EFAULT;
1019 goto error;
1020 }
Herbert Xu1470ddf2011-03-01 02:36:47 +00001021 cork->off += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 frag->size += copy;
1023 skb->len += copy;
1024 skb->data_len += copy;
Herbert Xuf945fa72008-01-22 22:39:26 -08001025 skb->truesize += copy;
1026 atomic_add(copy, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 }
1028 offset += copy;
1029 length -= copy;
1030 }
1031
1032 return 0;
1033
1034error:
Herbert Xu1470ddf2011-03-01 02:36:47 +00001035 cork->length -= length;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001036 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001037 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038}
1039
Herbert Xu1470ddf2011-03-01 02:36:47 +00001040static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1041 struct ipcm_cookie *ipc, struct rtable **rtp)
1042{
1043 struct inet_sock *inet = inet_sk(sk);
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001044 struct ip_options_rcu *opt;
Herbert Xu1470ddf2011-03-01 02:36:47 +00001045 struct rtable *rt;
1046
1047 /*
1048 * setup for corking.
1049 */
1050 opt = ipc->opt;
1051 if (opt) {
1052 if (cork->opt == NULL) {
1053 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1054 sk->sk_allocation);
1055 if (unlikely(cork->opt == NULL))
1056 return -ENOBUFS;
1057 }
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001058 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001059 cork->flags |= IPCORK_OPT;
1060 cork->addr = ipc->addr;
1061 }
1062 rt = *rtp;
1063 if (unlikely(!rt))
1064 return -EFAULT;
1065 /*
1066 * We steal reference to this route, caller should not release it
1067 */
1068 *rtp = NULL;
1069 cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
Steffen Klassert353e5c92011-06-22 01:05:37 +00001070 rt->dst.dev->mtu : dst_mtu(&rt->dst);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001071 cork->dst = &rt->dst;
1072 cork->length = 0;
1073 cork->tx_flags = ipc->tx_flags;
1074 cork->page = NULL;
1075 cork->off = 0;
1076
1077 return 0;
1078}
1079
1080/*
1081 * ip_append_data() and ip_append_page() can make one large IP datagram
1082 * from many pieces of data. Each pieces will be holded on the socket
1083 * until ip_push_pending_frames() is called. Each piece can be a page
1084 * or non-page data.
1085 *
1086 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1087 * this interface potentially.
1088 *
1089 * LATER: length must be adjusted by pad at tail, when it is required.
1090 */
David S. Millerf5fca602011-05-08 17:24:10 -07001091int ip_append_data(struct sock *sk, struct flowi4 *fl4,
Herbert Xu1470ddf2011-03-01 02:36:47 +00001092 int getfrag(void *from, char *to, int offset, int len,
1093 int odd, struct sk_buff *skb),
1094 void *from, int length, int transhdrlen,
1095 struct ipcm_cookie *ipc, struct rtable **rtp,
1096 unsigned int flags)
1097{
1098 struct inet_sock *inet = inet_sk(sk);
1099 int err;
1100
1101 if (flags&MSG_PROBE)
1102 return 0;
1103
1104 if (skb_queue_empty(&sk->sk_write_queue)) {
David S. Millerbdc712b2011-05-06 15:02:07 -07001105 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001106 if (err)
1107 return err;
1108 } else {
1109 transhdrlen = 0;
1110 }
1111
David S. Millerf5fca602011-05-08 17:24:10 -07001112 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
Herbert Xu1470ddf2011-03-01 02:36:47 +00001113 from, length, transhdrlen, flags);
1114}
1115
David S. Millerf5fca602011-05-08 17:24:10 -07001116ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 int offset, size_t size, int flags)
1118{
1119 struct inet_sock *inet = inet_sk(sk);
1120 struct sk_buff *skb;
1121 struct rtable *rt;
1122 struct ip_options *opt = NULL;
David S. Millerbdc712b2011-05-06 15:02:07 -07001123 struct inet_cork *cork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 int hh_len;
1125 int mtu;
1126 int len;
1127 int err;
1128 unsigned int maxfraglen, fragheaderlen, fraggap;
1129
1130 if (inet->hdrincl)
1131 return -EPERM;
1132
1133 if (flags&MSG_PROBE)
1134 return 0;
1135
1136 if (skb_queue_empty(&sk->sk_write_queue))
1137 return -EINVAL;
1138
David S. Millerbdc712b2011-05-06 15:02:07 -07001139 cork = &inet->cork.base;
1140 rt = (struct rtable *)cork->dst;
1141 if (cork->flags & IPCORK_OPT)
1142 opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
Changli Gaod8d1f302010-06-10 23:31:35 -07001144 if (!(rt->dst.dev->features&NETIF_F_SG))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 return -EOPNOTSUPP;
1146
Changli Gaod8d1f302010-06-10 23:31:35 -07001147 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
David S. Millerbdc712b2011-05-06 15:02:07 -07001148 mtu = cork->fragsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
1150 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1151 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1152
David S. Millerbdc712b2011-05-06 15:02:07 -07001153 if (cork->length + size > 0xFFFF - fragheaderlen) {
David S. Millerf5fca602011-05-08 17:24:10 -07001154 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 return -EMSGSIZE;
1156 }
1157
1158 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1159 return -EINVAL;
1160
David S. Millerbdc712b2011-05-06 15:02:07 -07001161 cork->length += size;
Herbert Xu26cde9f2010-06-15 01:52:25 +00001162 if ((size + skb->len > mtu) &&
1163 (sk->sk_protocol == IPPROTO_UDP) &&
Changli Gaod8d1f302010-06-10 23:31:35 -07001164 (rt->dst.dev->features & NETIF_F_UFO)) {
Herbert Xu79671682006-06-22 02:40:14 -07001165 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -07001166 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Herbert Xu79671682006-06-22 02:40:14 -07001167 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001168
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
1170 while (size > 0) {
1171 int i;
1172
Herbert Xu89114af2006-07-08 13:34:32 -07001173 if (skb_is_gso(skb))
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001174 len = size;
1175 else {
1176
1177 /* Check if the remaining data fits into current packet. */
1178 len = mtu - skb->len;
1179 if (len < size)
1180 len = maxfraglen - skb->len;
1181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 if (len <= 0) {
1183 struct sk_buff *skb_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 int alloclen;
1185
1186 skb_prev = skb;
Jayachandran C0d0d2bb2005-10-13 11:43:02 -07001187 fraggap = skb_prev->len - maxfraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
1189 alloclen = fragheaderlen + hh_len + fraggap + 15;
1190 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1191 if (unlikely(!skb)) {
1192 err = -ENOBUFS;
1193 goto error;
1194 }
1195
1196 /*
1197 * Fill in the control structures
1198 */
1199 skb->ip_summed = CHECKSUM_NONE;
1200 skb->csum = 0;
1201 skb_reserve(skb, hh_len);
1202
1203 /*
1204 * Find where to start putting bytes.
1205 */
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001206 skb_put(skb, fragheaderlen + fraggap);
Arnaldo Carvalho de Melo2ca9e6f2007-03-10 19:15:25 -03001207 skb_reset_network_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001208 skb->transport_header = (skb->network_header +
1209 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 if (fraggap) {
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001211 skb->csum = skb_copy_and_csum_bits(skb_prev,
1212 maxfraglen,
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001213 skb_transport_header(skb),
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001214 fraggap, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 skb_prev->csum = csum_sub(skb_prev->csum,
1216 skb->csum);
Herbert Xue9fa4f72006-08-13 20:12:58 -07001217 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 }
1219
1220 /*
1221 * Put the packet on the pending queue.
1222 */
1223 __skb_queue_tail(&sk->sk_write_queue, skb);
1224 continue;
1225 }
1226
1227 i = skb_shinfo(skb)->nr_frags;
1228 if (len > size)
1229 len = size;
1230 if (skb_can_coalesce(skb, i, page, offset)) {
1231 skb_shinfo(skb)->frags[i-1].size += len;
1232 } else if (i < MAX_SKB_FRAGS) {
1233 get_page(page);
1234 skb_fill_page_desc(skb, i, page, offset, len);
1235 } else {
1236 err = -EMSGSIZE;
1237 goto error;
1238 }
1239
1240 if (skb->ip_summed == CHECKSUM_NONE) {
Al Viro44bb9362006-11-14 21:36:14 -08001241 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 csum = csum_page(page, offset, len);
1243 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1244 }
1245
1246 skb->len += len;
1247 skb->data_len += len;
David S. Miller1e34a112008-01-22 23:44:31 -08001248 skb->truesize += len;
1249 atomic_add(len, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 offset += len;
1251 size -= len;
1252 }
1253 return 0;
1254
1255error:
David S. Millerbdc712b2011-05-06 15:02:07 -07001256 cork->length -= size;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001257 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 return err;
1259}
1260
Herbert Xu1470ddf2011-03-01 02:36:47 +00001261static void ip_cork_release(struct inet_cork *cork)
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001262{
Herbert Xu1470ddf2011-03-01 02:36:47 +00001263 cork->flags &= ~IPCORK_OPT;
1264 kfree(cork->opt);
1265 cork->opt = NULL;
1266 dst_release(cork->dst);
1267 cork->dst = NULL;
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001268}
1269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270/*
1271 * Combined all pending IP fragments on the socket as one IP datagram
1272 * and push them out.
1273 */
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001274struct sk_buff *__ip_make_skb(struct sock *sk,
David S. Miller77968b72011-05-08 17:12:19 -07001275 struct flowi4 *fl4,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001276 struct sk_buff_head *queue,
1277 struct inet_cork *cork)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278{
1279 struct sk_buff *skb, *tmp_skb;
1280 struct sk_buff **tail_skb;
1281 struct inet_sock *inet = inet_sk(sk);
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001282 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 struct ip_options *opt = NULL;
Herbert Xu1470ddf2011-03-01 02:36:47 +00001284 struct rtable *rt = (struct rtable *)cork->dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 struct iphdr *iph;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -08001286 __be16 df = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 __u8 ttl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
Herbert Xu1470ddf2011-03-01 02:36:47 +00001289 if ((skb = __skb_dequeue(queue)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 goto out;
1291 tail_skb = &(skb_shinfo(skb)->frag_list);
1292
1293 /* move skb->data to ip header from ext header */
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001294 if (skb->data < skb_network_header(skb))
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001295 __skb_pull(skb, skb_network_offset(skb));
Herbert Xu1470ddf2011-03-01 02:36:47 +00001296 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001297 __skb_pull(tmp_skb, skb_network_header_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 *tail_skb = tmp_skb;
1299 tail_skb = &(tmp_skb->next);
1300 skb->len += tmp_skb->len;
1301 skb->data_len += tmp_skb->len;
1302 skb->truesize += tmp_skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 tmp_skb->destructor = NULL;
1304 tmp_skb->sk = NULL;
1305 }
1306
1307 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1308 * to fragment the frame generated here. No matter, what transforms
1309 * how transforms change size of the packet, it will come out.
1310 */
John Heffner628a5c52007-04-20 15:53:27 -07001311 if (inet->pmtudisc < IP_PMTUDISC_DO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 skb->local_df = 1;
1313
1314 /* DF bit is set when we want to see DF on outgoing frames.
1315 * If local_df is set too, we still allow to fragment this frame
1316 * locally. */
John Heffner628a5c52007-04-20 15:53:27 -07001317 if (inet->pmtudisc >= IP_PMTUDISC_DO ||
Changli Gaod8d1f302010-06-10 23:31:35 -07001318 (skb->len <= dst_mtu(&rt->dst) &&
1319 ip_dont_fragment(sk, &rt->dst)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 df = htons(IP_DF);
1321
Herbert Xu1470ddf2011-03-01 02:36:47 +00001322 if (cork->flags & IPCORK_OPT)
1323 opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
1325 if (rt->rt_type == RTN_MULTICAST)
1326 ttl = inet->mc_ttl;
1327 else
Changli Gaod8d1f302010-06-10 23:31:35 -07001328 ttl = ip_select_ttl(inet, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
1330 iph = (struct iphdr *)skb->data;
1331 iph->version = 4;
1332 iph->ihl = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 iph->tos = inet->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 iph->frag_off = df;
Changli Gaod8d1f302010-06-10 23:31:35 -07001335 ip_select_ident(iph, &rt->dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 iph->ttl = ttl;
1337 iph->protocol = sk->sk_protocol;
David S. Miller77968b72011-05-08 17:12:19 -07001338 iph->saddr = fl4->saddr;
1339 iph->daddr = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
David S. Miller22f728f2011-05-13 17:21:27 -04001341 if (opt) {
1342 iph->ihl += opt->optlen>>2;
1343 ip_options_build(skb, opt, cork->addr, rt, 0);
1344 }
1345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001347 skb->mark = sk->sk_mark;
Eric Dumazeta21bba92008-11-24 16:07:50 -08001348 /*
1349 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1350 * on dst refcount
1351 */
Herbert Xu1470ddf2011-03-01 02:36:47 +00001352 cork->dst = NULL;
Changli Gaod8d1f302010-06-10 23:31:35 -07001353 skb_dst_set(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
David L Stevens96793b42007-09-17 09:57:33 -07001355 if (iph->protocol == IPPROTO_ICMP)
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001356 icmp_out_count(net, ((struct icmphdr *)
David L Stevens96793b42007-09-17 09:57:33 -07001357 skb_transport_header(skb))->type);
1358
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001359 ip_cork_release(cork);
1360out:
1361 return skb;
1362}
1363
1364int ip_send_skb(struct sk_buff *skb)
1365{
1366 struct net *net = sock_net(skb->sk);
1367 int err;
1368
Herbert Xuc439cb22008-01-11 19:14:00 -08001369 err = ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 if (err) {
1371 if (err > 0)
Eric Dumazet6ce9e7b2009-09-02 18:05:33 -07001372 err = net_xmit_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 if (err)
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001374 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 }
1376
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378}
1379
David S. Miller77968b72011-05-08 17:12:19 -07001380int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
Herbert Xu1470ddf2011-03-01 02:36:47 +00001381{
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001382 struct sk_buff *skb;
1383
David S. Miller77968b72011-05-08 17:12:19 -07001384 skb = ip_finish_skb(sk, fl4);
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001385 if (!skb)
1386 return 0;
1387
1388 /* Netfilter gets whole the not fragmented skb. */
1389 return ip_send_skb(skb);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001390}
1391
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392/*
1393 * Throw away all pending data on the socket.
1394 */
Herbert Xu1470ddf2011-03-01 02:36:47 +00001395static void __ip_flush_pending_frames(struct sock *sk,
1396 struct sk_buff_head *queue,
1397 struct inet_cork *cork)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 struct sk_buff *skb;
1400
Herbert Xu1470ddf2011-03-01 02:36:47 +00001401 while ((skb = __skb_dequeue_tail(queue)) != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 kfree_skb(skb);
1403
Herbert Xu1470ddf2011-03-01 02:36:47 +00001404 ip_cork_release(cork);
1405}
1406
1407void ip_flush_pending_frames(struct sock *sk)
1408{
David S. Millerbdc712b2011-05-06 15:02:07 -07001409 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410}
1411
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001412struct sk_buff *ip_make_skb(struct sock *sk,
David S. Miller77968b72011-05-08 17:12:19 -07001413 struct flowi4 *fl4,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001414 int getfrag(void *from, char *to, int offset,
1415 int len, int odd, struct sk_buff *skb),
1416 void *from, int length, int transhdrlen,
1417 struct ipcm_cookie *ipc, struct rtable **rtp,
1418 unsigned int flags)
1419{
David S. Millerb80d7222011-05-06 15:06:01 -07001420 struct inet_cork cork;
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001421 struct sk_buff_head queue;
1422 int err;
1423
1424 if (flags & MSG_PROBE)
1425 return NULL;
1426
1427 __skb_queue_head_init(&queue);
1428
David S. Millerb80d7222011-05-06 15:06:01 -07001429 cork.flags = 0;
1430 cork.addr = 0;
David S. Miller70652722011-05-06 16:01:15 -07001431 cork.opt = NULL;
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001432 err = ip_setup_cork(sk, &cork, ipc, rtp);
1433 if (err)
1434 return ERR_PTR(err);
1435
David S. Millerf5fca602011-05-08 17:24:10 -07001436 err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001437 from, length, transhdrlen, flags);
1438 if (err) {
1439 __ip_flush_pending_frames(sk, &queue, &cork);
1440 return ERR_PTR(err);
1441 }
1442
David S. Miller77968b72011-05-08 17:12:19 -07001443 return __ip_make_skb(sk, fl4, &queue, &cork);
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001444}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
1446/*
1447 * Fetch data from kernel space and fill in checksum if needed.
1448 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001449static int ip_reply_glue_bits(void *dptr, char *to, int offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 int len, int odd, struct sk_buff *skb)
1451{
Al Viro50842052006-11-14 21:36:34 -08001452 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
1454 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1455 skb->csum = csum_block_add(skb->csum, csum, odd);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001456 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457}
1458
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001459/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 * Generic function to send a packet as reply to another packet.
1461 * Used to send TCP resets so far. ICMP should use this function too.
1462 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001463 * Should run single threaded per socket because it uses the sock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 * structure to pass arguments.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 */
David S. Miller0a5ebb82011-05-09 13:22:43 -07001466void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1467 struct ip_reply_arg *arg, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468{
1469 struct inet_sock *inet = inet_sk(sk);
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001470 struct ip_options_data replyopts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 struct ipcm_cookie ipc;
David S. Miller77968b72011-05-08 17:12:19 -07001472 struct flowi4 fl4;
Eric Dumazet511c3f92009-06-02 05:14:27 +00001473 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001475 if (ip_options_echo(&replyopts.opt.opt, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 return;
1477
David S. Miller0a5ebb82011-05-09 13:22:43 -07001478 ipc.addr = daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 ipc.opt = NULL;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00001480 ipc.tx_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001482 if (replyopts.opt.opt.optlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 ipc.opt = &replyopts.opt;
1484
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001485 if (replyopts.opt.opt.srr)
1486 daddr = replyopts.opt.opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 }
1488
David S. Miller77968b72011-05-08 17:12:19 -07001489 flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1490 RT_TOS(ip_hdr(skb)->tos),
1491 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1492 ip_reply_arg_flowi_flags(arg),
1493 daddr, rt->rt_spec_dst,
1494 tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1495 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1496 rt = ip_route_output_key(sock_net(sk), &fl4);
1497 if (IS_ERR(rt))
1498 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499
1500 /* And let IP do all the hard work.
1501
1502 This chunk is not reenterable, hence spinlock.
1503 Note that it uses the fact, that this function is called
1504 with locally disabled BH and that sk cannot be already spinlocked.
1505 */
1506 bh_lock_sock(sk);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001507 inet->tos = ip_hdr(skb)->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 sk->sk_priority = skb->priority;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001509 sk->sk_protocol = ip_hdr(skb)->protocol;
Patrick McHardyf0e48db2007-06-04 21:32:46 -07001510 sk->sk_bound_dev_if = arg->bound_dev_if;
David S. Millerf5fca602011-05-08 17:24:10 -07001511 ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
Eric Dumazet2e77d892008-11-24 15:52:46 -08001512 &ipc, &rt, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1514 if (arg->csumoffset >= 0)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001515 *((__sum16 *)skb_transport_header(skb) +
1516 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1517 arg->csum));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 skb->ip_summed = CHECKSUM_NONE;
David S. Miller77968b72011-05-08 17:12:19 -07001519 ip_push_pending_frames(sk, &fl4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 }
1521
1522 bh_unlock_sock(sk);
1523
1524 ip_rt_put(rt);
1525}
1526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527void __init ip_init(void)
1528{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 ip_rt_init();
1530 inet_initpeers();
1531
1532#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1533 igmp_mc_proc_init();
1534#endif
1535}