blob: 439d2a34ee4411b932eefb3a6fc51383e8db7125 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 *
18 * See ip_input.c for original log
19 *
20 * Fixes:
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090023 * Bradford Johnson: Fix faulty handling of some frames when
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * no route is found.
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090034 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
41 * datagrams.
42 * Hirokazu Takahashi: sendfile() on UDP works now.
43 */
44
45#include <asm/uaccess.h>
46#include <asm/system.h>
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040053#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090054#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56#include <linux/socket.h>
57#include <linux/sockios.h>
58#include <linux/in.h>
59#include <linux/inet.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/proc_fs.h>
63#include <linux/stat.h>
64#include <linux/init.h>
65
66#include <net/snmp.h>
67#include <net/ip.h>
68#include <net/protocol.h>
69#include <net/route.h>
Patrick McHardycfacb052006-01-08 22:36:54 -080070#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/skbuff.h>
72#include <net/sock.h>
73#include <net/arp.h>
74#include <net/icmp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <net/checksum.h>
76#include <net/inetpeer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#include <linux/igmp.h>
78#include <linux/netfilter_ipv4.h>
79#include <linux/netfilter_bridge.h>
80#include <linux/mroute.h>
81#include <linux/netlink.h>
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -070082#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Brian Haleyab32ea52006-09-22 14:15:41 -070084int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86/* Generate a checksum for an outgoing IP datagram. */
87__inline__ void ip_send_check(struct iphdr *iph)
88{
89 iph->check = 0;
90 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
91}
Eric Dumazet4bc2f182010-07-09 21:22:10 +000092EXPORT_SYMBOL(ip_send_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Herbert Xuc439cb22008-01-11 19:14:00 -080094int __ip_local_out(struct sk_buff *skb)
95{
96 struct iphdr *iph = ip_hdr(skb);
97
98 iph->tot_len = htons(skb->len);
99 ip_send_check(iph);
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100100 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
101 skb_dst(skb)->dev, dst_output);
Herbert Xuc439cb22008-01-11 19:14:00 -0800102}
103
104int ip_local_out(struct sk_buff *skb)
105{
106 int err;
107
108 err = __ip_local_out(skb);
109 if (likely(err == 1))
110 err = dst_output(skb);
111
112 return err;
113}
114EXPORT_SYMBOL_GPL(ip_local_out);
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116/* dev_loopback_xmit for use with netfilter. */
117static int ip_dev_loopback_xmit(struct sk_buff *newskb)
118{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700119 skb_reset_mac_header(newskb);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -0300120 __skb_pull(newskb, skb_network_offset(newskb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 newskb->pkt_type = PACKET_LOOPBACK;
122 newskb->ip_summed = CHECKSUM_UNNECESSARY;
Eric Dumazetadf30902009-06-02 05:19:30 +0000123 WARN_ON(!skb_dst(newskb));
Eric Dumazete30b38c2010-04-15 09:13:03 +0000124 netif_rx_ni(newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 return 0;
126}
127
128static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
129{
130 int ttl = inet->uc_ttl;
131
132 if (ttl < 0)
133 ttl = dst_metric(dst, RTAX_HOPLIMIT);
134 return ttl;
135}
136
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900137/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 * Add an ip header to a skbuff and send it out.
139 *
140 */
141int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
Al Viro13d8eaa2006-09-26 22:27:30 -0700142 __be32 saddr, __be32 daddr, struct ip_options *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
144 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet511c3f92009-06-02 05:14:27 +0000145 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 struct iphdr *iph;
147
148 /* Build the IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300149 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
150 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700151 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 iph->version = 4;
153 iph->ihl = 5;
154 iph->tos = inet->tos;
Changli Gaod8d1f302010-06-10 23:31:35 -0700155 if (ip_dont_fragment(sk, &rt->dst))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 iph->frag_off = htons(IP_DF);
157 else
158 iph->frag_off = 0;
Changli Gaod8d1f302010-06-10 23:31:35 -0700159 iph->ttl = ip_select_ttl(inet, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 iph->daddr = rt->rt_dst;
161 iph->saddr = rt->rt_src;
162 iph->protocol = sk->sk_protocol;
Changli Gaod8d1f302010-06-10 23:31:35 -0700163 ip_select_ident(iph, &rt->dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 if (opt && opt->optlen) {
166 iph->ihl += opt->optlen>>2;
167 ip_options_build(skb, opt, daddr, rt, 0);
168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
170 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800171 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173 /* Send it out. */
Herbert Xuc439cb22008-01-11 19:14:00 -0800174 return ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -0700176EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178static inline int ip_finish_output2(struct sk_buff *skb)
179{
Eric Dumazetadf30902009-06-02 05:19:30 +0000180 struct dst_entry *dst = skb_dst(skb);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700181 struct rtable *rt = (struct rtable *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 struct net_device *dev = dst->dev;
Chuck Leverc2636b42007-10-23 21:07:32 -0700183 unsigned int hh_len = LL_RESERVED_SPACE(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Neil Hormanedf391f2009-04-27 02:45:02 -0700185 if (rt->rt_type == RTN_MULTICAST) {
186 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
187 } else if (rt->rt_type == RTN_BROADCAST)
188 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 /* Be paranoid, rather than too clever. */
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700191 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 struct sk_buff *skb2;
193
194 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
195 if (skb2 == NULL) {
196 kfree_skb(skb);
197 return -ENOMEM;
198 }
199 if (skb->sk)
200 skb_set_owner_w(skb2, skb->sk);
201 kfree_skb(skb);
202 skb = skb2;
203 }
204
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800205 if (dst->hh)
206 return neigh_hh_output(dst->hh, skb);
207 else if (dst->neighbour)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 return dst->neighbour->output(skb);
209
210 if (net_ratelimit())
211 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
212 kfree_skb(skb);
213 return -EINVAL;
214}
215
John Heffner628a5c52007-04-20 15:53:27 -0700216static inline int ip_skb_dst_mtu(struct sk_buff *skb)
217{
218 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
219
220 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
Eric Dumazetadf30902009-06-02 05:19:30 +0000221 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
John Heffner628a5c52007-04-20 15:53:27 -0700222}
223
Patrick McHardy861d0482007-10-15 01:48:39 -0700224static int ip_finish_output(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
Patrick McHardy5c901da2006-01-06 23:05:36 -0800226#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
227 /* Policy lookup after SNAT yielded a new policy */
Eric Dumazetadf30902009-06-02 05:19:30 +0000228 if (skb_dst(skb)->xfrm != NULL) {
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800229 IPCB(skb)->flags |= IPSKB_REROUTED;
230 return dst_output(skb);
231 }
Patrick McHardy5c901da2006-01-06 23:05:36 -0800232#endif
John Heffner628a5c52007-04-20 15:53:27 -0700233 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800234 return ip_fragment(skb, ip_finish_output2);
235 else
236 return ip_finish_output2(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237}
238
239int ip_mc_output(struct sk_buff *skb)
240{
241 struct sock *sk = skb->sk;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000242 struct rtable *rt = skb_rtable(skb);
Changli Gaod8d1f302010-06-10 23:31:35 -0700243 struct net_device *dev = rt->dst.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245 /*
246 * If the indicated interface is up and running, send the packet.
247 */
Neil Hormanedf391f2009-04-27 02:45:02 -0700248 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
250 skb->dev = dev;
251 skb->protocol = htons(ETH_P_IP);
252
253 /*
254 * Multicasts are looped back for other local users
255 */
256
257 if (rt->rt_flags&RTCF_MULTICAST) {
Octavian Purdila7ad68482010-01-06 20:37:01 -0800258 if (sk_mc_loop(sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259#ifdef CONFIG_IP_MROUTE
260 /* Small optimization: do not loopback not local frames,
261 which returned after forwarding; they will be dropped
262 by ip_mr_input in any case.
263 Note, that local frames are looped back to be delivered
264 to local recipients.
265
266 This check is duplicated in ip_mr_input at the moment.
267 */
Joe Perches9d4fb272009-11-23 10:41:23 -0800268 &&
269 ((rt->rt_flags & RTCF_LOCAL) ||
270 !(IPCB(skb)->flags & IPSKB_FORWARDED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271#endif
Joe Perches9d4fb272009-11-23 10:41:23 -0800272 ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
274 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100275 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
276 newskb, NULL, newskb->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 ip_dev_loopback_xmit);
278 }
279
280 /* Multicasts with ttl 0 must not go beyond the host */
281
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700282 if (ip_hdr(skb)->ttl == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 kfree_skb(skb);
284 return 0;
285 }
286 }
287
288 if (rt->rt_flags&RTCF_BROADCAST) {
289 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
290 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100291 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
292 NULL, newskb->dev, ip_dev_loopback_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 }
294
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100295 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
296 skb->dev, ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800297 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
300int ip_output(struct sk_buff *skb)
301{
Eric Dumazetadf30902009-06-02 05:19:30 +0000302 struct net_device *dev = skb_dst(skb)->dev;
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800303
Neil Hormanedf391f2009-04-27 02:45:02 -0700304 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800306 skb->dev = dev;
307 skb->protocol = htons(ETH_P_IP);
308
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100309 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900310 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800311 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312}
313
Shan Wei4e15ed42010-04-15 16:43:08 +0000314int ip_queue_xmit(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315{
David S. Millere89862f2007-01-26 01:04:55 -0800316 struct sock *sk = skb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 struct inet_sock *inet = inet_sk(sk);
318 struct ip_options *opt = inet->opt;
319 struct rtable *rt;
320 struct iphdr *iph;
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000321 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323 /* Skip all of this if the packet is already routed,
324 * f.e. by something like SCTP.
325 */
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000326 rcu_read_lock();
Eric Dumazet511c3f92009-06-02 05:14:27 +0000327 rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 if (rt != NULL)
329 goto packet_routed;
330
331 /* Make sure we can route this packet. */
332 rt = (struct rtable *)__sk_dst_check(sk, 0);
333 if (rt == NULL) {
Al Viro3ca3c682006-09-27 18:28:07 -0700334 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336 /* Use correct destination address if we have options. */
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000337 daddr = inet->inet_daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 if(opt && opt->srr)
339 daddr = opt->faddr;
340
341 {
342 struct flowi fl = { .oif = sk->sk_bound_dev_if,
Atis Elsts914a9ab2009-10-01 15:16:49 -0700343 .mark = sk->sk_mark,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 .nl_u = { .ip4_u =
345 { .daddr = daddr,
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000346 .saddr = inet->inet_saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 .tos = RT_CONN_FLAGS(sk) } },
348 .proto = sk->sk_protocol,
KOVACS Krisztian86b08d82008-10-01 07:44:42 -0700349 .flags = inet_sk_flowi_flags(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 .uli_u = { .ports =
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000351 { .sport = inet->inet_sport,
352 .dport = inet->inet_dport } } };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 /* If this fails, retransmit mechanism of transport layer will
355 * keep trying until route appears or the connection times
356 * itself out.
357 */
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700358 security_sk_classify_flow(sk, &fl);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900359 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 goto no_route;
361 }
Changli Gaod8d1f302010-06-10 23:31:35 -0700362 sk_setup_caps(sk, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 }
Changli Gaod8d1f302010-06-10 23:31:35 -0700364 skb_dst_set_noref(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
366packet_routed:
367 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
368 goto no_route;
369
370 /* OK, we know where to send it, allocate and build IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300371 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
372 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700373 iph = ip_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800374 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
Changli Gaod8d1f302010-06-10 23:31:35 -0700375 if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 iph->frag_off = htons(IP_DF);
377 else
378 iph->frag_off = 0;
Changli Gaod8d1f302010-06-10 23:31:35 -0700379 iph->ttl = ip_select_ttl(inet, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 iph->protocol = sk->sk_protocol;
381 iph->saddr = rt->rt_src;
382 iph->daddr = rt->rt_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 /* Transport layer set skb->h.foo itself. */
384
385 if (opt && opt->optlen) {
386 iph->ihl += opt->optlen >> 2;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000387 ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 }
389
Changli Gaod8d1f302010-06-10 23:31:35 -0700390 ip_select_ident_more(iph, &rt->dst, sk,
Herbert Xu79671682006-06-22 02:40:14 -0700391 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800394 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000396 res = ip_local_out(skb);
397 rcu_read_unlock();
398 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
400no_route:
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000401 rcu_read_unlock();
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700402 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 kfree_skb(skb);
404 return -EHOSTUNREACH;
405}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000406EXPORT_SYMBOL(ip_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408
409static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
410{
411 to->pkt_type = from->pkt_type;
412 to->priority = from->priority;
413 to->protocol = from->protocol;
Eric Dumazetadf30902009-06-02 05:19:30 +0000414 skb_dst_drop(to);
Eric Dumazetfe76cda2010-07-01 23:48:22 +0000415 skb_dst_copy(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 to->dev = from->dev;
Thomas Graf82e91ff2006-11-09 15:19:14 -0800417 to->mark = from->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 /* Copy the flags to each fragment. */
420 IPCB(to)->flags = IPCB(from)->flags;
421
422#ifdef CONFIG_NET_SCHED
423 to->tc_index = from->tc_index;
424#endif
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -0700425 nf_copy(to, from);
Jozsef Kadlecsikba9dda32007-07-07 22:21:23 -0700426#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
427 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
428 to->nf_trace = from->nf_trace;
429#endif
Julian Anastasovc98d80e2005-10-22 13:39:21 +0300430#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
431 to->ipvs_property = from->ipvs_property;
432#endif
James Morris984bc162006-06-09 00:29:17 -0700433 skb_copy_secmark(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434}
435
436/*
437 * This IP datagram is too large to be sent in one piece. Break it up into
438 * smaller pieces (each of size equal to IP header plus
439 * a block of the data of the original IP data part) that will yet fit in a
440 * single device frame, and queue such a frame for sending.
441 */
442
Jianjun Kongd93191002008-11-03 00:23:42 -0800443int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444{
445 struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 int ptr;
447 struct net_device *dev;
448 struct sk_buff *skb2;
Changli Gaoc893b802010-07-31 13:25:08 +0000449 unsigned int mtu, hlen, left, len, ll_rs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 int offset;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -0800451 __be16 not_last_frag;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000452 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 int err = 0;
454
Changli Gaod8d1f302010-06-10 23:31:35 -0700455 dev = rt->dst.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
457 /*
458 * Point into the IP datagram header.
459 */
460
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700461 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700464 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
John Heffner628a5c52007-04-20 15:53:27 -0700466 htonl(ip_skb_dst_mtu(skb)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 kfree_skb(skb);
468 return -EMSGSIZE;
469 }
470
471 /*
472 * Setup starting values.
473 */
474
475 hlen = iph->ihl * 4;
Changli Gaod8d1f302010-06-10 23:31:35 -0700476 mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
Bart De Schuymer6c79bf02010-04-20 16:22:01 +0200477#ifdef CONFIG_BRIDGE_NETFILTER
478 if (skb->nf_bridge)
479 mtu -= nf_bridge_mtu_reduction(skb);
480#endif
Herbert Xu89cee8b2005-12-13 23:14:27 -0800481 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
483 /* When frag_list is given, use it. First, check its validity:
484 * some transformers could create wrong frag_list or break existing
485 * one, it is not prohibited. In this case fall back to copying.
486 *
487 * LATER: this step can be merged to real generation of fragments,
488 * we can switch to copy when see the first bad fragment.
489 */
David S. Miller21dc3302010-08-23 00:13:46 -0700490 if (skb_has_frag_list(skb)) {
Eric Dumazet3d130082010-09-21 08:47:45 +0000491 struct sk_buff *frag, *frag2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 int first_len = skb_pagelen(skb);
493
494 if (first_len - hlen > mtu ||
495 ((first_len - hlen) & 7) ||
496 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
497 skb_cloned(skb))
498 goto slow_path;
499
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700500 skb_walk_frags(skb, frag) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 /* Correct geometry. */
502 if (frag->len > mtu ||
503 ((frag->len & 7) && frag->next) ||
504 skb_headroom(frag) < hlen)
Eric Dumazet3d130082010-09-21 08:47:45 +0000505 goto slow_path_clean;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
507 /* Partially cloned skb? */
508 if (skb_shared(frag))
Eric Dumazet3d130082010-09-21 08:47:45 +0000509 goto slow_path_clean;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700510
511 BUG_ON(frag->sk);
512 if (skb->sk) {
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700513 frag->sk = skb->sk;
514 frag->destructor = sock_wfree;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700515 }
Eric Dumazet3d130082010-09-21 08:47:45 +0000516 skb->truesize -= frag->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 }
518
519 /* Everything is OK. Generate! */
520
521 err = 0;
522 offset = 0;
523 frag = skb_shinfo(skb)->frag_list;
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700524 skb_frag_list_init(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 skb->data_len = first_len - skb_headlen(skb);
526 skb->len = first_len;
527 iph->tot_len = htons(first_len);
528 iph->frag_off = htons(IP_MF);
529 ip_send_check(iph);
530
531 for (;;) {
532 /* Prepare header of the next frame,
533 * before previous one went down. */
534 if (frag) {
535 frag->ip_summed = CHECKSUM_NONE;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300536 skb_reset_transport_header(frag);
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700537 __skb_push(frag, hlen);
538 skb_reset_network_header(frag);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700539 memcpy(skb_network_header(frag), iph, hlen);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700540 iph = ip_hdr(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 iph->tot_len = htons(frag->len);
542 ip_copy_metadata(frag, skb);
543 if (offset == 0)
544 ip_options_fragment(frag);
545 offset += skb->len - hlen;
546 iph->frag_off = htons(offset>>3);
547 if (frag->next != NULL)
548 iph->frag_off |= htons(IP_MF);
549 /* Ready, complete checksum */
550 ip_send_check(iph);
551 }
552
553 err = output(skb);
554
Wei Dongdafee492006-08-02 13:41:21 -0700555 if (!err)
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700556 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 if (err || !frag)
558 break;
559
560 skb = frag;
561 frag = skb->next;
562 skb->next = NULL;
563 }
564
565 if (err == 0) {
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700566 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 return 0;
568 }
569
570 while (frag) {
571 skb = frag->next;
572 kfree_skb(frag);
573 frag = skb;
574 }
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700575 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 return err;
Eric Dumazet3d130082010-09-21 08:47:45 +0000577
578slow_path_clean:
579 skb_walk_frags(skb, frag2) {
580 if (frag2 == frag)
581 break;
582 frag2->sk = NULL;
583 frag2->destructor = NULL;
584 skb->truesize += frag2->truesize;
585 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 }
587
588slow_path:
589 left = skb->len - hlen; /* Space per frame */
George Kadianakis49085bd2010-07-06 11:44:12 +0000590 ptr = hlen; /* Where to start from */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700593 * we need to make room for the encapsulating header
594 */
Changli Gaoc893b802010-07-31 13:25:08 +0000595 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700596
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 /*
598 * Fragment the datagram.
599 */
600
601 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
602 not_last_frag = iph->frag_off & htons(IP_MF);
603
604 /*
605 * Keep copying data until we run out.
606 */
607
Stephen Hemminger132adf52007-03-08 20:44:43 -0800608 while (left > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 len = left;
610 /* IF: it doesn't fit, use 'mtu' - the data space left */
611 if (len > mtu)
612 len = mtu;
613 /* IF: we are not sending upto and including the packet end
614 then align the next start on an eight byte boundary */
615 if (len < left) {
616 len &= ~7;
617 }
618 /*
619 * Allocate buffer.
620 */
621
622 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
Patrick McHardy64ce2072005-08-09 20:50:53 -0700623 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 err = -ENOMEM;
625 goto fail;
626 }
627
628 /*
629 * Set up data on packet
630 */
631
632 ip_copy_metadata(skb2, skb);
633 skb_reserve(skb2, ll_rs);
634 skb_put(skb2, len + hlen);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700635 skb_reset_network_header(skb2);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700636 skb2->transport_header = skb2->network_header + hlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
638 /*
639 * Charge the memory for the fragment to any owner
640 * it might possess
641 */
642
643 if (skb->sk)
644 skb_set_owner_w(skb2, skb->sk);
645
646 /*
647 * Copy the packet header into the new buffer.
648 */
649
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300650 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652 /*
653 * Copy a block of the IP datagram.
654 */
Arnaldo Carvalho de Melobff9b612007-03-16 17:19:57 -0300655 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 BUG();
657 left -= len;
658
659 /*
660 * Fill in the new header fields.
661 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700662 iph = ip_hdr(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 iph->frag_off = htons((offset >> 3));
664
665 /* ANK: dirty, but effective trick. Upgrade options only if
666 * the segment to be fragmented was THE FIRST (otherwise,
667 * options are already fixed) and make it ONCE
668 * on the initial skb, so that all the following fragments
669 * will inherit fixed options.
670 */
671 if (offset == 0)
672 ip_options_fragment(skb);
673
674 /*
675 * Added AC : If we are fragmenting a fragment that's not the
676 * last fragment then keep MF on each bit
677 */
678 if (left > 0 || not_last_frag)
679 iph->frag_off |= htons(IP_MF);
680 ptr += len;
681 offset += len;
682
683 /*
684 * Put this fragment into the sending queue.
685 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 iph->tot_len = htons(len + hlen);
687
688 ip_send_check(iph);
689
690 err = output(skb2);
691 if (err)
692 goto fail;
Wei Dongdafee492006-08-02 13:41:21 -0700693
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700694 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 }
696 kfree_skb(skb);
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700697 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 return err;
699
700fail:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900701 kfree_skb(skb);
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700702 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 return err;
704}
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700705EXPORT_SYMBOL(ip_fragment);
706
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707int
708ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
709{
710 struct iovec *iov = from;
711
Patrick McHardy84fa7932006-08-29 16:44:56 -0700712 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
714 return -EFAULT;
715 } else {
Al Viro44bb9362006-11-14 21:36:14 -0800716 __wsum csum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
718 return -EFAULT;
719 skb->csum = csum_block_add(skb->csum, csum, odd);
720 }
721 return 0;
722}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000723EXPORT_SYMBOL(ip_generic_getfrag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
Al Viro44bb9362006-11-14 21:36:14 -0800725static inline __wsum
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726csum_page(struct page *page, int offset, int copy)
727{
728 char *kaddr;
Al Viro44bb9362006-11-14 21:36:14 -0800729 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 kaddr = kmap(page);
731 csum = csum_partial(kaddr + offset, copy, 0);
732 kunmap(page);
733 return csum;
734}
735
Adrian Bunk4b30b1c2005-11-29 16:27:20 -0800736static inline int ip_ufo_append_data(struct sock *sk,
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700737 int getfrag(void *from, char *to, int offset, int len,
738 int odd, struct sk_buff *skb),
739 void *from, int length, int hh_len, int fragheaderlen,
Jianjun Kongd93191002008-11-03 00:23:42 -0800740 int transhdrlen, int mtu, unsigned int flags)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700741{
742 struct sk_buff *skb;
743 int err;
744
745 /* There is support for UDP fragmentation offload by network
746 * device, so create one single skb packet containing complete
747 * udp datagram
748 */
749 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
750 skb = sock_alloc_send_skb(sk,
751 hh_len + fragheaderlen + transhdrlen + 20,
752 (flags & MSG_DONTWAIT), &err);
753
754 if (skb == NULL)
755 return err;
756
757 /* reserve space for Hardware header */
758 skb_reserve(skb, hh_len);
759
760 /* create space for UDP/IP header */
Jianjun Kongd93191002008-11-03 00:23:42 -0800761 skb_put(skb, fragheaderlen + transhdrlen);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700762
763 /* initialize network header pointer */
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700764 skb_reset_network_header(skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700765
766 /* initialize protocol header pointer */
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700767 skb->transport_header = skb->network_header + fragheaderlen;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700768
Patrick McHardy84fa7932006-08-29 16:44:56 -0700769 skb->ip_summed = CHECKSUM_PARTIAL;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700770 skb->csum = 0;
771 sk->sk_sndmsg_off = 0;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700772
Kostya Bbe9164e2008-04-29 22:36:30 -0700773 /* specify the length of each IP datagram fragment */
Herbert Xu79671682006-06-22 02:40:14 -0700774 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700775 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700776 __skb_queue_tail(&sk->sk_write_queue, skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700777 }
Kostya Bbe9164e2008-04-29 22:36:30 -0700778
779 return skb_append_datato_frags(sk, skb, getfrag, from,
780 (length - transhdrlen));
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700781}
782
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783/*
784 * ip_append_data() and ip_append_page() can make one large IP datagram
785 * from many pieces of data. Each pieces will be holded on the socket
786 * until ip_push_pending_frames() is called. Each piece can be a page
787 * or non-page data.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900788 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 * Not only UDP, other transport protocols - e.g. raw sockets - can use
790 * this interface potentially.
791 *
792 * LATER: length must be adjusted by pad at tail, when it is required.
793 */
794int ip_append_data(struct sock *sk,
795 int getfrag(void *from, char *to, int offset, int len,
796 int odd, struct sk_buff *skb),
797 void *from, int length, int transhdrlen,
Eric Dumazet2e77d892008-11-24 15:52:46 -0800798 struct ipcm_cookie *ipc, struct rtable **rtp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 unsigned int flags)
800{
801 struct inet_sock *inet = inet_sk(sk);
802 struct sk_buff *skb;
803
804 struct ip_options *opt = NULL;
805 int hh_len;
806 int exthdrlen;
807 int mtu;
808 int copy;
809 int err;
810 int offset = 0;
811 unsigned int maxfraglen, fragheaderlen;
812 int csummode = CHECKSUM_NONE;
Eric Dumazet2e77d892008-11-24 15:52:46 -0800813 struct rtable *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815 if (flags&MSG_PROBE)
816 return 0;
817
818 if (skb_queue_empty(&sk->sk_write_queue)) {
819 /*
820 * setup for corking.
821 */
822 opt = ipc->opt;
823 if (opt) {
824 if (inet->cork.opt == NULL) {
825 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
826 if (unlikely(inet->cork.opt == NULL))
827 return -ENOBUFS;
828 }
829 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
830 inet->cork.flags |= IPCORK_OPT;
831 inet->cork.addr = ipc->addr;
832 }
Eric Dumazet2e77d892008-11-24 15:52:46 -0800833 rt = *rtp;
Julien TINNES788d9082009-08-27 15:26:58 +0200834 if (unlikely(!rt))
835 return -EFAULT;
Eric Dumazet2e77d892008-11-24 15:52:46 -0800836 /*
837 * We steal reference to this route, caller should not release it
838 */
839 *rtp = NULL;
John Heffner628a5c52007-04-20 15:53:27 -0700840 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
Changli Gaod8d1f302010-06-10 23:31:35 -0700841 rt->dst.dev->mtu :
842 dst_mtu(rt->dst.path);
843 inet->cork.dst = &rt->dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 inet->cork.length = 0;
845 sk->sk_sndmsg_page = NULL;
846 sk->sk_sndmsg_off = 0;
Eric Dumazetec550d22010-08-24 14:45:09 -0700847 exthdrlen = rt->dst.header_len;
848 length += exthdrlen;
849 transhdrlen += exthdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 } else {
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -0400851 rt = (struct rtable *)inet->cork.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 if (inet->cork.flags & IPCORK_OPT)
853 opt = inet->cork.opt;
854
855 transhdrlen = 0;
856 exthdrlen = 0;
857 mtu = inet->cork.fragsize;
858 }
Changli Gaod8d1f302010-06-10 23:31:35 -0700859 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860
861 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
862 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
863
864 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000865 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
866 mtu-exthdrlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return -EMSGSIZE;
868 }
869
870 /*
871 * transhdrlen > 0 means that this is the first fragment and we wish
872 * it won't be fragmented in the future.
873 */
874 if (transhdrlen &&
875 length + fragheaderlen <= mtu &&
Changli Gaod8d1f302010-06-10 23:31:35 -0700876 rt->dst.dev->features & NETIF_F_V4_CSUM &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 !exthdrlen)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700878 csummode = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Herbert Xu26cde9f2010-06-15 01:52:25 +0000880 skb = skb_peek_tail(&sk->sk_write_queue);
881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 inet->cork.length += length;
Herbert Xu26cde9f2010-06-15 01:52:25 +0000883 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
Kostya Bbe9164e2008-04-29 22:36:30 -0700884 (sk->sk_protocol == IPPROTO_UDP) &&
Changli Gaod8d1f302010-06-10 23:31:35 -0700885 (rt->dst.dev->features & NETIF_F_UFO)) {
Patrick McHardybaa829d2006-03-12 20:35:12 -0800886 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
887 fragheaderlen, transhdrlen, mtu,
888 flags);
889 if (err)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700890 goto error;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700891 return 0;
892 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
894 /* So, what's going on in the loop below?
895 *
896 * We use calculated fragment length to generate chained skb,
897 * each of segments is IP fragment ready for sending to network after
898 * adding appropriate IP header.
899 */
900
Herbert Xu26cde9f2010-06-15 01:52:25 +0000901 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 goto alloc_new_skb;
903
904 while (length > 0) {
905 /* Check if the remaining data fits into current packet. */
906 copy = mtu - skb->len;
907 if (copy < length)
908 copy = maxfraglen - skb->len;
909 if (copy <= 0) {
910 char *data;
911 unsigned int datalen;
912 unsigned int fraglen;
913 unsigned int fraggap;
914 unsigned int alloclen;
915 struct sk_buff *skb_prev;
916alloc_new_skb:
917 skb_prev = skb;
918 if (skb_prev)
919 fraggap = skb_prev->len - maxfraglen;
920 else
921 fraggap = 0;
922
923 /*
924 * If remaining data exceeds the mtu,
925 * we know we need more fragment(s).
926 */
927 datalen = length + fraggap;
928 if (datalen > mtu - fragheaderlen)
929 datalen = maxfraglen - fragheaderlen;
930 fraglen = datalen + fragheaderlen;
931
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900932 if ((flags & MSG_MORE) &&
Changli Gaod8d1f302010-06-10 23:31:35 -0700933 !(rt->dst.dev->features&NETIF_F_SG))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 alloclen = mtu;
935 else
Eric Dumazet59104f02010-09-20 20:16:27 +0000936 alloclen = fraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
938 /* The last fragment gets additional space at tail.
939 * Note, with MSG_MORE we overallocate on fragments,
940 * because we have no idea what fragment will be
941 * the last.
942 */
Eric Dumazet59104f02010-09-20 20:16:27 +0000943 if (datalen == length + fraggap) {
Changli Gaod8d1f302010-06-10 23:31:35 -0700944 alloclen += rt->dst.trailer_len;
Eric Dumazet59104f02010-09-20 20:16:27 +0000945 /* make sure mtu is not reached */
946 if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
947 datalen -= ALIGN(rt->dst.trailer_len, 8);
948 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 if (transhdrlen) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900950 skb = sock_alloc_send_skb(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 alloclen + hh_len + 15,
952 (flags & MSG_DONTWAIT), &err);
953 } else {
954 skb = NULL;
955 if (atomic_read(&sk->sk_wmem_alloc) <=
956 2 * sk->sk_sndbuf)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900957 skb = sock_wmalloc(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 alloclen + hh_len + 15, 1,
959 sk->sk_allocation);
960 if (unlikely(skb == NULL))
961 err = -ENOBUFS;
Patrick Ohly51f31ca2009-02-12 05:03:39 +0000962 else
963 /* only the initial fragment is
964 time stamped */
Oliver Hartkopp2244d072010-08-17 08:59:14 +0000965 ipc->tx_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 }
967 if (skb == NULL)
968 goto error;
969
970 /*
971 * Fill in the control structures
972 */
973 skb->ip_summed = csummode;
974 skb->csum = 0;
975 skb_reserve(skb, hh_len);
Oliver Hartkopp2244d072010-08-17 08:59:14 +0000976 skb_shinfo(skb)->tx_flags = ipc->tx_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978 /*
979 * Find where to start putting bytes.
980 */
981 data = skb_put(skb, fraglen);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300982 skb_set_network_header(skb, exthdrlen);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700983 skb->transport_header = (skb->network_header +
984 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 data += fragheaderlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 if (fraggap) {
988 skb->csum = skb_copy_and_csum_bits(
989 skb_prev, maxfraglen,
990 data + transhdrlen, fraggap, 0);
991 skb_prev->csum = csum_sub(skb_prev->csum,
992 skb->csum);
993 data += fraggap;
Herbert Xue9fa4f72006-08-13 20:12:58 -0700994 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 }
996
997 copy = datalen - transhdrlen - fraggap;
998 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
999 err = -EFAULT;
1000 kfree_skb(skb);
1001 goto error;
1002 }
1003
1004 offset += copy;
1005 length -= datalen - fraggap;
1006 transhdrlen = 0;
1007 exthdrlen = 0;
1008 csummode = CHECKSUM_NONE;
1009
1010 /*
1011 * Put the packet on the pending queue.
1012 */
1013 __skb_queue_tail(&sk->sk_write_queue, skb);
1014 continue;
1015 }
1016
1017 if (copy > length)
1018 copy = length;
1019
Changli Gaod8d1f302010-06-10 23:31:35 -07001020 if (!(rt->dst.dev->features&NETIF_F_SG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 unsigned int off;
1022
1023 off = skb->len;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001024 if (getfrag(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 offset, copy, off, skb) < 0) {
1026 __skb_trim(skb, off);
1027 err = -EFAULT;
1028 goto error;
1029 }
1030 } else {
1031 int i = skb_shinfo(skb)->nr_frags;
1032 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1033 struct page *page = sk->sk_sndmsg_page;
1034 int off = sk->sk_sndmsg_off;
1035 unsigned int left;
1036
1037 if (page && (left = PAGE_SIZE - off) > 0) {
1038 if (copy >= left)
1039 copy = left;
1040 if (page != frag->page) {
1041 if (i == MAX_SKB_FRAGS) {
1042 err = -EMSGSIZE;
1043 goto error;
1044 }
1045 get_page(page);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001046 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 frag = &skb_shinfo(skb)->frags[i];
1048 }
1049 } else if (i < MAX_SKB_FRAGS) {
1050 if (copy > PAGE_SIZE)
1051 copy = PAGE_SIZE;
1052 page = alloc_pages(sk->sk_allocation, 0);
1053 if (page == NULL) {
1054 err = -ENOMEM;
1055 goto error;
1056 }
1057 sk->sk_sndmsg_page = page;
1058 sk->sk_sndmsg_off = 0;
1059
1060 skb_fill_page_desc(skb, i, page, 0, 0);
1061 frag = &skb_shinfo(skb)->frags[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 } else {
1063 err = -EMSGSIZE;
1064 goto error;
1065 }
1066 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1067 err = -EFAULT;
1068 goto error;
1069 }
1070 sk->sk_sndmsg_off += copy;
1071 frag->size += copy;
1072 skb->len += copy;
1073 skb->data_len += copy;
Herbert Xuf945fa72008-01-22 22:39:26 -08001074 skb->truesize += copy;
1075 atomic_add(copy, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 }
1077 offset += copy;
1078 length -= copy;
1079 }
1080
1081 return 0;
1082
1083error:
1084 inet->cork.length -= length;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001085 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001086 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087}
1088
1089ssize_t ip_append_page(struct sock *sk, struct page *page,
1090 int offset, size_t size, int flags)
1091{
1092 struct inet_sock *inet = inet_sk(sk);
1093 struct sk_buff *skb;
1094 struct rtable *rt;
1095 struct ip_options *opt = NULL;
1096 int hh_len;
1097 int mtu;
1098 int len;
1099 int err;
1100 unsigned int maxfraglen, fragheaderlen, fraggap;
1101
1102 if (inet->hdrincl)
1103 return -EPERM;
1104
1105 if (flags&MSG_PROBE)
1106 return 0;
1107
1108 if (skb_queue_empty(&sk->sk_write_queue))
1109 return -EINVAL;
1110
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -04001111 rt = (struct rtable *)inet->cork.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 if (inet->cork.flags & IPCORK_OPT)
1113 opt = inet->cork.opt;
1114
Changli Gaod8d1f302010-06-10 23:31:35 -07001115 if (!(rt->dst.dev->features&NETIF_F_SG))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 return -EOPNOTSUPP;
1117
Changli Gaod8d1f302010-06-10 23:31:35 -07001118 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 mtu = inet->cork.fragsize;
1120
1121 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1122 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1123
1124 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001125 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 return -EMSGSIZE;
1127 }
1128
1129 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1130 return -EINVAL;
1131
1132 inet->cork.length += size;
Herbert Xu26cde9f2010-06-15 01:52:25 +00001133 if ((size + skb->len > mtu) &&
1134 (sk->sk_protocol == IPPROTO_UDP) &&
Changli Gaod8d1f302010-06-10 23:31:35 -07001135 (rt->dst.dev->features & NETIF_F_UFO)) {
Herbert Xu79671682006-06-22 02:40:14 -07001136 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -07001137 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Herbert Xu79671682006-06-22 02:40:14 -07001138 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
1141 while (size > 0) {
1142 int i;
1143
Herbert Xu89114af2006-07-08 13:34:32 -07001144 if (skb_is_gso(skb))
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001145 len = size;
1146 else {
1147
1148 /* Check if the remaining data fits into current packet. */
1149 len = mtu - skb->len;
1150 if (len < size)
1151 len = maxfraglen - skb->len;
1152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 if (len <= 0) {
1154 struct sk_buff *skb_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 int alloclen;
1156
1157 skb_prev = skb;
Jayachandran C0d0d2bb2005-10-13 11:43:02 -07001158 fraggap = skb_prev->len - maxfraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
1160 alloclen = fragheaderlen + hh_len + fraggap + 15;
1161 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1162 if (unlikely(!skb)) {
1163 err = -ENOBUFS;
1164 goto error;
1165 }
1166
1167 /*
1168 * Fill in the control structures
1169 */
1170 skb->ip_summed = CHECKSUM_NONE;
1171 skb->csum = 0;
1172 skb_reserve(skb, hh_len);
1173
1174 /*
1175 * Find where to start putting bytes.
1176 */
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001177 skb_put(skb, fragheaderlen + fraggap);
Arnaldo Carvalho de Melo2ca9e6f2007-03-10 19:15:25 -03001178 skb_reset_network_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001179 skb->transport_header = (skb->network_header +
1180 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 if (fraggap) {
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001182 skb->csum = skb_copy_and_csum_bits(skb_prev,
1183 maxfraglen,
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001184 skb_transport_header(skb),
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001185 fraggap, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 skb_prev->csum = csum_sub(skb_prev->csum,
1187 skb->csum);
Herbert Xue9fa4f72006-08-13 20:12:58 -07001188 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 }
1190
1191 /*
1192 * Put the packet on the pending queue.
1193 */
1194 __skb_queue_tail(&sk->sk_write_queue, skb);
1195 continue;
1196 }
1197
1198 i = skb_shinfo(skb)->nr_frags;
1199 if (len > size)
1200 len = size;
1201 if (skb_can_coalesce(skb, i, page, offset)) {
1202 skb_shinfo(skb)->frags[i-1].size += len;
1203 } else if (i < MAX_SKB_FRAGS) {
1204 get_page(page);
1205 skb_fill_page_desc(skb, i, page, offset, len);
1206 } else {
1207 err = -EMSGSIZE;
1208 goto error;
1209 }
1210
1211 if (skb->ip_summed == CHECKSUM_NONE) {
Al Viro44bb9362006-11-14 21:36:14 -08001212 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 csum = csum_page(page, offset, len);
1214 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1215 }
1216
1217 skb->len += len;
1218 skb->data_len += len;
David S. Miller1e34a112008-01-22 23:44:31 -08001219 skb->truesize += len;
1220 atomic_add(len, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 offset += len;
1222 size -= len;
1223 }
1224 return 0;
1225
1226error:
1227 inet->cork.length -= size;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001228 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 return err;
1230}
1231
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001232static void ip_cork_release(struct inet_sock *inet)
1233{
1234 inet->cork.flags &= ~IPCORK_OPT;
1235 kfree(inet->cork.opt);
1236 inet->cork.opt = NULL;
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -04001237 dst_release(inet->cork.dst);
1238 inet->cork.dst = NULL;
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001239}
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241/*
1242 * Combined all pending IP fragments on the socket as one IP datagram
1243 * and push them out.
1244 */
1245int ip_push_pending_frames(struct sock *sk)
1246{
1247 struct sk_buff *skb, *tmp_skb;
1248 struct sk_buff **tail_skb;
1249 struct inet_sock *inet = inet_sk(sk);
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001250 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 struct ip_options *opt = NULL;
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -04001252 struct rtable *rt = (struct rtable *)inet->cork.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 struct iphdr *iph;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -08001254 __be16 df = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 __u8 ttl;
1256 int err = 0;
1257
1258 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1259 goto out;
1260 tail_skb = &(skb_shinfo(skb)->frag_list);
1261
1262 /* move skb->data to ip header from ext header */
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001263 if (skb->data < skb_network_header(skb))
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001264 __skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001266 __skb_pull(tmp_skb, skb_network_header_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 *tail_skb = tmp_skb;
1268 tail_skb = &(tmp_skb->next);
1269 skb->len += tmp_skb->len;
1270 skb->data_len += tmp_skb->len;
1271 skb->truesize += tmp_skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 tmp_skb->destructor = NULL;
1273 tmp_skb->sk = NULL;
1274 }
1275
1276 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1277 * to fragment the frame generated here. No matter, what transforms
1278 * how transforms change size of the packet, it will come out.
1279 */
John Heffner628a5c52007-04-20 15:53:27 -07001280 if (inet->pmtudisc < IP_PMTUDISC_DO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 skb->local_df = 1;
1282
1283 /* DF bit is set when we want to see DF on outgoing frames.
1284 * If local_df is set too, we still allow to fragment this frame
1285 * locally. */
John Heffner628a5c52007-04-20 15:53:27 -07001286 if (inet->pmtudisc >= IP_PMTUDISC_DO ||
Changli Gaod8d1f302010-06-10 23:31:35 -07001287 (skb->len <= dst_mtu(&rt->dst) &&
1288 ip_dont_fragment(sk, &rt->dst)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 df = htons(IP_DF);
1290
1291 if (inet->cork.flags & IPCORK_OPT)
1292 opt = inet->cork.opt;
1293
1294 if (rt->rt_type == RTN_MULTICAST)
1295 ttl = inet->mc_ttl;
1296 else
Changli Gaod8d1f302010-06-10 23:31:35 -07001297 ttl = ip_select_ttl(inet, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
1299 iph = (struct iphdr *)skb->data;
1300 iph->version = 4;
1301 iph->ihl = 5;
1302 if (opt) {
1303 iph->ihl += opt->optlen>>2;
1304 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1305 }
1306 iph->tos = inet->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 iph->frag_off = df;
Changli Gaod8d1f302010-06-10 23:31:35 -07001308 ip_select_ident(iph, &rt->dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 iph->ttl = ttl;
1310 iph->protocol = sk->sk_protocol;
1311 iph->saddr = rt->rt_src;
1312 iph->daddr = rt->rt_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001315 skb->mark = sk->sk_mark;
Eric Dumazeta21bba92008-11-24 16:07:50 -08001316 /*
1317 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1318 * on dst refcount
1319 */
1320 inet->cork.dst = NULL;
Changli Gaod8d1f302010-06-10 23:31:35 -07001321 skb_dst_set(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322
David L Stevens96793b42007-09-17 09:57:33 -07001323 if (iph->protocol == IPPROTO_ICMP)
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001324 icmp_out_count(net, ((struct icmphdr *)
David L Stevens96793b42007-09-17 09:57:33 -07001325 skb_transport_header(skb))->type);
1326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 /* Netfilter gets whole the not fragmented skb. */
Herbert Xuc439cb22008-01-11 19:14:00 -08001328 err = ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 if (err) {
1330 if (err > 0)
Eric Dumazet6ce9e7b2009-09-02 18:05:33 -07001331 err = net_xmit_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 if (err)
1333 goto error;
1334 }
1335
1336out:
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001337 ip_cork_release(inet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 return err;
1339
1340error:
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001341 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 goto out;
1343}
1344
1345/*
1346 * Throw away all pending data on the socket.
1347 */
1348void ip_flush_pending_frames(struct sock *sk)
1349{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 struct sk_buff *skb;
1351
1352 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1353 kfree_skb(skb);
1354
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001355 ip_cork_release(inet_sk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356}
1357
1358
1359/*
1360 * Fetch data from kernel space and fill in checksum if needed.
1361 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001362static int ip_reply_glue_bits(void *dptr, char *to, int offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 int len, int odd, struct sk_buff *skb)
1364{
Al Viro50842052006-11-14 21:36:34 -08001365 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
1367 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1368 skb->csum = csum_block_add(skb->csum, csum, odd);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001369 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370}
1371
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001372/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 * Generic function to send a packet as reply to another packet.
1374 * Used to send TCP resets so far. ICMP should use this function too.
1375 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001376 * Should run single threaded per socket because it uses the sock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 * structure to pass arguments.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 */
1379void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1380 unsigned int len)
1381{
1382 struct inet_sock *inet = inet_sk(sk);
1383 struct {
1384 struct ip_options opt;
1385 char data[40];
1386 } replyopts;
1387 struct ipcm_cookie ipc;
Al Viro3ca3c682006-09-27 18:28:07 -07001388 __be32 daddr;
Eric Dumazet511c3f92009-06-02 05:14:27 +00001389 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
1391 if (ip_options_echo(&replyopts.opt, skb))
1392 return;
1393
1394 daddr = ipc.addr = rt->rt_src;
1395 ipc.opt = NULL;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00001396 ipc.tx_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
1398 if (replyopts.opt.optlen) {
1399 ipc.opt = &replyopts.opt;
1400
1401 if (ipc.opt->srr)
1402 daddr = replyopts.opt.faddr;
1403 }
1404
1405 {
Patrick McHardyf0e48db2007-06-04 21:32:46 -07001406 struct flowi fl = { .oif = arg->bound_dev_if,
1407 .nl_u = { .ip4_u =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 { .daddr = daddr,
1409 .saddr = rt->rt_spec_dst,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001410 .tos = RT_TOS(ip_hdr(skb)->tos) } },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 /* Not quite clean, but right. */
1412 .uli_u = { .ports =
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001413 { .sport = tcp_hdr(skb)->dest,
1414 .dport = tcp_hdr(skb)->source } },
KOVACS Krisztian86b08d82008-10-01 07:44:42 -07001415 .proto = sk->sk_protocol,
1416 .flags = ip_reply_arg_flowi_flags(arg) };
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -07001417 security_skb_classify_flow(skb, &fl);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001418 if (ip_route_output_key(sock_net(sk), &rt, &fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 return;
1420 }
1421
1422 /* And let IP do all the hard work.
1423
1424 This chunk is not reenterable, hence spinlock.
1425 Note that it uses the fact, that this function is called
1426 with locally disabled BH and that sk cannot be already spinlocked.
1427 */
1428 bh_lock_sock(sk);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001429 inet->tos = ip_hdr(skb)->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 sk->sk_priority = skb->priority;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001431 sk->sk_protocol = ip_hdr(skb)->protocol;
Patrick McHardyf0e48db2007-06-04 21:32:46 -07001432 sk->sk_bound_dev_if = arg->bound_dev_if;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
Eric Dumazet2e77d892008-11-24 15:52:46 -08001434 &ipc, &rt, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1436 if (arg->csumoffset >= 0)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001437 *((__sum16 *)skb_transport_header(skb) +
1438 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1439 arg->csum));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 skb->ip_summed = CHECKSUM_NONE;
1441 ip_push_pending_frames(sk);
1442 }
1443
1444 bh_unlock_sock(sk);
1445
1446 ip_rt_put(rt);
1447}
1448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449void __init ip_init(void)
1450{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 ip_rt_init();
1452 inet_initpeers();
1453
1454#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1455 igmp_mc_proc_init();
1456#endif
1457}