blob: f09135e1e14fade1fca1126f5b1ebcb873cfaeab [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 *
18 * See ip_input.c for original log
19 *
20 * Fixes:
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090023 * Bradford Johnson: Fix faulty handling of some frames when
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * no route is found.
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090034 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
41 * datagrams.
42 * Hirokazu Takahashi: sendfile() on UDP works now.
43 */
44
45#include <asm/uaccess.h>
46#include <asm/system.h>
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040053#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#include <linux/socket.h>
56#include <linux/sockios.h>
57#include <linux/in.h>
58#include <linux/inet.h>
59#include <linux/netdevice.h>
60#include <linux/etherdevice.h>
61#include <linux/proc_fs.h>
62#include <linux/stat.h>
63#include <linux/init.h>
64
65#include <net/snmp.h>
66#include <net/ip.h>
67#include <net/protocol.h>
68#include <net/route.h>
Patrick McHardycfacb052006-01-08 22:36:54 -080069#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <linux/skbuff.h>
71#include <net/sock.h>
72#include <net/arp.h>
73#include <net/icmp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <net/checksum.h>
75#include <net/inetpeer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/igmp.h>
77#include <linux/netfilter_ipv4.h>
78#include <linux/netfilter_bridge.h>
79#include <linux/mroute.h>
80#include <linux/netlink.h>
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -070081#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
Brian Haleyab32ea52006-09-22 14:15:41 -070083int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85/* Generate a checksum for an outgoing IP datagram. */
86__inline__ void ip_send_check(struct iphdr *iph)
87{
88 iph->check = 0;
89 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
90}
91
Herbert Xuc439cb22008-01-11 19:14:00 -080092int __ip_local_out(struct sk_buff *skb)
93{
94 struct iphdr *iph = ip_hdr(skb);
95
96 iph->tot_len = htons(skb->len);
97 ip_send_check(iph);
Jan Engelhardt9bbc7682010-03-23 04:07:29 +010098 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
99 skb_dst(skb)->dev, dst_output);
Herbert Xuc439cb22008-01-11 19:14:00 -0800100}
101
102int ip_local_out(struct sk_buff *skb)
103{
104 int err;
105
106 err = __ip_local_out(skb);
107 if (likely(err == 1))
108 err = dst_output(skb);
109
110 return err;
111}
112EXPORT_SYMBOL_GPL(ip_local_out);
113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114/* dev_loopback_xmit for use with netfilter. */
115static int ip_dev_loopback_xmit(struct sk_buff *newskb)
116{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700117 skb_reset_mac_header(newskb);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -0300118 __skb_pull(newskb, skb_network_offset(newskb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 newskb->pkt_type = PACKET_LOOPBACK;
120 newskb->ip_summed = CHECKSUM_UNNECESSARY;
Eric Dumazetadf30902009-06-02 05:19:30 +0000121 WARN_ON(!skb_dst(newskb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 netif_rx(newskb);
123 return 0;
124}
125
126static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
127{
128 int ttl = inet->uc_ttl;
129
130 if (ttl < 0)
131 ttl = dst_metric(dst, RTAX_HOPLIMIT);
132 return ttl;
133}
134
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900135/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 * Add an ip header to a skbuff and send it out.
137 *
138 */
139int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
Al Viro13d8eaa2006-09-26 22:27:30 -0700140 __be32 saddr, __be32 daddr, struct ip_options *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
142 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet511c3f92009-06-02 05:14:27 +0000143 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 struct iphdr *iph;
145
146 /* Build the IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300147 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
148 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700149 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 iph->version = 4;
151 iph->ihl = 5;
152 iph->tos = inet->tos;
153 if (ip_dont_fragment(sk, &rt->u.dst))
154 iph->frag_off = htons(IP_DF);
155 else
156 iph->frag_off = 0;
157 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
158 iph->daddr = rt->rt_dst;
159 iph->saddr = rt->rt_src;
160 iph->protocol = sk->sk_protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 ip_select_ident(iph, &rt->u.dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 if (opt && opt->optlen) {
164 iph->ihl += opt->optlen>>2;
165 ip_options_build(skb, opt, daddr, rt, 0);
166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800169 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171 /* Send it out. */
Herbert Xuc439cb22008-01-11 19:14:00 -0800172 return ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173}
174
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -0700175EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177static inline int ip_finish_output2(struct sk_buff *skb)
178{
Eric Dumazetadf30902009-06-02 05:19:30 +0000179 struct dst_entry *dst = skb_dst(skb);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700180 struct rtable *rt = (struct rtable *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 struct net_device *dev = dst->dev;
Chuck Leverc2636b42007-10-23 21:07:32 -0700182 unsigned int hh_len = LL_RESERVED_SPACE(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Neil Hormanedf391f2009-04-27 02:45:02 -0700184 if (rt->rt_type == RTN_MULTICAST) {
185 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
186 } else if (rt->rt_type == RTN_BROADCAST)
187 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 /* Be paranoid, rather than too clever. */
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700190 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 struct sk_buff *skb2;
192
193 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
194 if (skb2 == NULL) {
195 kfree_skb(skb);
196 return -ENOMEM;
197 }
198 if (skb->sk)
199 skb_set_owner_w(skb2, skb->sk);
200 kfree_skb(skb);
201 skb = skb2;
202 }
203
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800204 if (dst->hh)
205 return neigh_hh_output(dst->hh, skb);
206 else if (dst->neighbour)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 return dst->neighbour->output(skb);
208
209 if (net_ratelimit())
210 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
211 kfree_skb(skb);
212 return -EINVAL;
213}
214
John Heffner628a5c52007-04-20 15:53:27 -0700215static inline int ip_skb_dst_mtu(struct sk_buff *skb)
216{
217 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
218
219 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
Eric Dumazetadf30902009-06-02 05:19:30 +0000220 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
John Heffner628a5c52007-04-20 15:53:27 -0700221}
222
Patrick McHardy861d0482007-10-15 01:48:39 -0700223static int ip_finish_output(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
Patrick McHardy5c901da2006-01-06 23:05:36 -0800225#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
226 /* Policy lookup after SNAT yielded a new policy */
Eric Dumazetadf30902009-06-02 05:19:30 +0000227 if (skb_dst(skb)->xfrm != NULL) {
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800228 IPCB(skb)->flags |= IPSKB_REROUTED;
229 return dst_output(skb);
230 }
Patrick McHardy5c901da2006-01-06 23:05:36 -0800231#endif
John Heffner628a5c52007-04-20 15:53:27 -0700232 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800233 return ip_fragment(skb, ip_finish_output2);
234 else
235 return ip_finish_output2(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236}
237
238int ip_mc_output(struct sk_buff *skb)
239{
240 struct sock *sk = skb->sk;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000241 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 struct net_device *dev = rt->u.dst.dev;
243
244 /*
245 * If the indicated interface is up and running, send the packet.
246 */
Neil Hormanedf391f2009-04-27 02:45:02 -0700247 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 skb->dev = dev;
250 skb->protocol = htons(ETH_P_IP);
251
252 /*
253 * Multicasts are looped back for other local users
254 */
255
256 if (rt->rt_flags&RTCF_MULTICAST) {
Octavian Purdila7ad68482010-01-06 20:37:01 -0800257 if (sk_mc_loop(sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258#ifdef CONFIG_IP_MROUTE
259 /* Small optimization: do not loopback not local frames,
260 which returned after forwarding; they will be dropped
261 by ip_mr_input in any case.
262 Note, that local frames are looped back to be delivered
263 to local recipients.
264
265 This check is duplicated in ip_mr_input at the moment.
266 */
Joe Perches9d4fb272009-11-23 10:41:23 -0800267 &&
268 ((rt->rt_flags & RTCF_LOCAL) ||
269 !(IPCB(skb)->flags & IPSKB_FORWARDED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270#endif
Joe Perches9d4fb272009-11-23 10:41:23 -0800271 ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
273 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100274 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
275 newskb, NULL, newskb->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 ip_dev_loopback_xmit);
277 }
278
279 /* Multicasts with ttl 0 must not go beyond the host */
280
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700281 if (ip_hdr(skb)->ttl == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 kfree_skb(skb);
283 return 0;
284 }
285 }
286
287 if (rt->rt_flags&RTCF_BROADCAST) {
288 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
289 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100290 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
291 NULL, newskb->dev, ip_dev_loopback_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 }
293
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100294 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
295 skb->dev, ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800296 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297}
298
299int ip_output(struct sk_buff *skb)
300{
Eric Dumazetadf30902009-06-02 05:19:30 +0000301 struct net_device *dev = skb_dst(skb)->dev;
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800302
Neil Hormanedf391f2009-04-27 02:45:02 -0700303 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800305 skb->dev = dev;
306 skb->protocol = htons(ETH_P_IP);
307
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100308 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900309 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800310 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311}
312
David S. Millere89862f2007-01-26 01:04:55 -0800313int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314{
David S. Millere89862f2007-01-26 01:04:55 -0800315 struct sock *sk = skb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 struct inet_sock *inet = inet_sk(sk);
317 struct ip_options *opt = inet->opt;
318 struct rtable *rt;
319 struct iphdr *iph;
320
321 /* Skip all of this if the packet is already routed,
322 * f.e. by something like SCTP.
323 */
Eric Dumazet511c3f92009-06-02 05:14:27 +0000324 rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 if (rt != NULL)
326 goto packet_routed;
327
328 /* Make sure we can route this packet. */
329 rt = (struct rtable *)__sk_dst_check(sk, 0);
330 if (rt == NULL) {
Al Viro3ca3c682006-09-27 18:28:07 -0700331 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333 /* Use correct destination address if we have options. */
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000334 daddr = inet->inet_daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if(opt && opt->srr)
336 daddr = opt->faddr;
337
338 {
339 struct flowi fl = { .oif = sk->sk_bound_dev_if,
Atis Elsts914a9ab2009-10-01 15:16:49 -0700340 .mark = sk->sk_mark,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 .nl_u = { .ip4_u =
342 { .daddr = daddr,
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000343 .saddr = inet->inet_saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 .tos = RT_CONN_FLAGS(sk) } },
345 .proto = sk->sk_protocol,
KOVACS Krisztian86b08d82008-10-01 07:44:42 -0700346 .flags = inet_sk_flowi_flags(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 .uli_u = { .ports =
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000348 { .sport = inet->inet_sport,
349 .dport = inet->inet_dport } } };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
351 /* If this fails, retransmit mechanism of transport layer will
352 * keep trying until route appears or the connection times
353 * itself out.
354 */
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700355 security_sk_classify_flow(sk, &fl);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900356 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 goto no_route;
358 }
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -0700359 sk_setup_caps(sk, &rt->u.dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 }
Eric Dumazetadf30902009-06-02 05:19:30 +0000361 skb_dst_set(skb, dst_clone(&rt->u.dst));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363packet_routed:
364 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
365 goto no_route;
366
367 /* OK, we know where to send it, allocate and build IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300368 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
369 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700370 iph = ip_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800371 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
373 iph->frag_off = htons(IP_DF);
374 else
375 iph->frag_off = 0;
376 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
377 iph->protocol = sk->sk_protocol;
378 iph->saddr = rt->rt_src;
379 iph->daddr = rt->rt_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 /* Transport layer set skb->h.foo itself. */
381
382 if (opt && opt->optlen) {
383 iph->ihl += opt->optlen >> 2;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000384 ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
386
Herbert Xu89f5f0a2005-11-08 09:41:56 -0800387 ip_select_ident_more(iph, &rt->u.dst, sk,
Herbert Xu79671682006-06-22 02:40:14 -0700388 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800391 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Herbert Xuc439cb22008-01-11 19:14:00 -0800393 return ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395no_route:
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700396 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 kfree_skb(skb);
398 return -EHOSTUNREACH;
399}
400
401
402static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
403{
404 to->pkt_type = from->pkt_type;
405 to->priority = from->priority;
406 to->protocol = from->protocol;
Eric Dumazetadf30902009-06-02 05:19:30 +0000407 skb_dst_drop(to);
408 skb_dst_set(to, dst_clone(skb_dst(from)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 to->dev = from->dev;
Thomas Graf82e91ff2006-11-09 15:19:14 -0800410 to->mark = from->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412 /* Copy the flags to each fragment. */
413 IPCB(to)->flags = IPCB(from)->flags;
414
415#ifdef CONFIG_NET_SCHED
416 to->tc_index = from->tc_index;
417#endif
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -0700418 nf_copy(to, from);
Jozsef Kadlecsikba9dda32007-07-07 22:21:23 -0700419#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
420 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
421 to->nf_trace = from->nf_trace;
422#endif
Julian Anastasovc98d80e2005-10-22 13:39:21 +0300423#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
424 to->ipvs_property = from->ipvs_property;
425#endif
James Morris984bc162006-06-09 00:29:17 -0700426 skb_copy_secmark(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427}
428
429/*
430 * This IP datagram is too large to be sent in one piece. Break it up into
431 * smaller pieces (each of size equal to IP header plus
432 * a block of the data of the original IP data part) that will yet fit in a
433 * single device frame, and queue such a frame for sending.
434 */
435
Jianjun Kongd93191002008-11-03 00:23:42 -0800436int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437{
438 struct iphdr *iph;
439 int raw = 0;
440 int ptr;
441 struct net_device *dev;
442 struct sk_buff *skb2;
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700443 unsigned int mtu, hlen, left, len, ll_rs, pad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 int offset;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -0800445 __be16 not_last_frag;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000446 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 int err = 0;
448
449 dev = rt->u.dst.dev;
450
451 /*
452 * Point into the IP datagram header.
453 */
454
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700455 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
457 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700458 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
John Heffner628a5c52007-04-20 15:53:27 -0700460 htonl(ip_skb_dst_mtu(skb)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 kfree_skb(skb);
462 return -EMSGSIZE;
463 }
464
465 /*
466 * Setup starting values.
467 */
468
469 hlen = iph->ihl * 4;
470 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
Herbert Xu89cee8b2005-12-13 23:14:27 -0800471 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
473 /* When frag_list is given, use it. First, check its validity:
474 * some transformers could create wrong frag_list or break existing
475 * one, it is not prohibited. In this case fall back to copying.
476 *
477 * LATER: this step can be merged to real generation of fragments,
478 * we can switch to copy when see the first bad fragment.
479 */
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700480 if (skb_has_frags(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 struct sk_buff *frag;
482 int first_len = skb_pagelen(skb);
Herbert Xu29ffe1a2008-01-28 20:45:20 -0800483 int truesizes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
485 if (first_len - hlen > mtu ||
486 ((first_len - hlen) & 7) ||
487 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
488 skb_cloned(skb))
489 goto slow_path;
490
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700491 skb_walk_frags(skb, frag) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 /* Correct geometry. */
493 if (frag->len > mtu ||
494 ((frag->len & 7) && frag->next) ||
495 skb_headroom(frag) < hlen)
496 goto slow_path;
497
498 /* Partially cloned skb? */
499 if (skb_shared(frag))
500 goto slow_path;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700501
502 BUG_ON(frag->sk);
503 if (skb->sk) {
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700504 frag->sk = skb->sk;
505 frag->destructor = sock_wfree;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700506 }
Patrick McHardyb2722b12009-12-01 15:53:57 -0800507 truesizes += frag->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 }
509
510 /* Everything is OK. Generate! */
511
512 err = 0;
513 offset = 0;
514 frag = skb_shinfo(skb)->frag_list;
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700515 skb_frag_list_init(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 skb->data_len = first_len - skb_headlen(skb);
Herbert Xu29ffe1a2008-01-28 20:45:20 -0800517 skb->truesize -= truesizes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 skb->len = first_len;
519 iph->tot_len = htons(first_len);
520 iph->frag_off = htons(IP_MF);
521 ip_send_check(iph);
522
523 for (;;) {
524 /* Prepare header of the next frame,
525 * before previous one went down. */
526 if (frag) {
527 frag->ip_summed = CHECKSUM_NONE;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300528 skb_reset_transport_header(frag);
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700529 __skb_push(frag, hlen);
530 skb_reset_network_header(frag);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700531 memcpy(skb_network_header(frag), iph, hlen);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700532 iph = ip_hdr(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 iph->tot_len = htons(frag->len);
534 ip_copy_metadata(frag, skb);
535 if (offset == 0)
536 ip_options_fragment(frag);
537 offset += skb->len - hlen;
538 iph->frag_off = htons(offset>>3);
539 if (frag->next != NULL)
540 iph->frag_off |= htons(IP_MF);
541 /* Ready, complete checksum */
542 ip_send_check(iph);
543 }
544
545 err = output(skb);
546
Wei Dongdafee492006-08-02 13:41:21 -0700547 if (!err)
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700548 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 if (err || !frag)
550 break;
551
552 skb = frag;
553 frag = skb->next;
554 skb->next = NULL;
555 }
556
557 if (err == 0) {
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700558 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 return 0;
560 }
561
562 while (frag) {
563 skb = frag->next;
564 kfree_skb(frag);
565 frag = skb;
566 }
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700567 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 return err;
569 }
570
571slow_path:
572 left = skb->len - hlen; /* Space per frame */
573 ptr = raw + hlen; /* Where to start from */
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700576 * we need to make room for the encapsulating header
577 */
578 pad = nf_bridge_pad(skb);
579 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
580 mtu -= pad;
581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 /*
583 * Fragment the datagram.
584 */
585
586 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
587 not_last_frag = iph->frag_off & htons(IP_MF);
588
589 /*
590 * Keep copying data until we run out.
591 */
592
Stephen Hemminger132adf52007-03-08 20:44:43 -0800593 while (left > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 len = left;
595 /* IF: it doesn't fit, use 'mtu' - the data space left */
596 if (len > mtu)
597 len = mtu;
598 /* IF: we are not sending upto and including the packet end
599 then align the next start on an eight byte boundary */
600 if (len < left) {
601 len &= ~7;
602 }
603 /*
604 * Allocate buffer.
605 */
606
607 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
Patrick McHardy64ce2072005-08-09 20:50:53 -0700608 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 err = -ENOMEM;
610 goto fail;
611 }
612
613 /*
614 * Set up data on packet
615 */
616
617 ip_copy_metadata(skb2, skb);
618 skb_reserve(skb2, ll_rs);
619 skb_put(skb2, len + hlen);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700620 skb_reset_network_header(skb2);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700621 skb2->transport_header = skb2->network_header + hlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623 /*
624 * Charge the memory for the fragment to any owner
625 * it might possess
626 */
627
628 if (skb->sk)
629 skb_set_owner_w(skb2, skb->sk);
630
631 /*
632 * Copy the packet header into the new buffer.
633 */
634
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300635 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
637 /*
638 * Copy a block of the IP datagram.
639 */
Arnaldo Carvalho de Melobff9b612007-03-16 17:19:57 -0300640 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 BUG();
642 left -= len;
643
644 /*
645 * Fill in the new header fields.
646 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700647 iph = ip_hdr(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 iph->frag_off = htons((offset >> 3));
649
650 /* ANK: dirty, but effective trick. Upgrade options only if
651 * the segment to be fragmented was THE FIRST (otherwise,
652 * options are already fixed) and make it ONCE
653 * on the initial skb, so that all the following fragments
654 * will inherit fixed options.
655 */
656 if (offset == 0)
657 ip_options_fragment(skb);
658
659 /*
660 * Added AC : If we are fragmenting a fragment that's not the
661 * last fragment then keep MF on each bit
662 */
663 if (left > 0 || not_last_frag)
664 iph->frag_off |= htons(IP_MF);
665 ptr += len;
666 offset += len;
667
668 /*
669 * Put this fragment into the sending queue.
670 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 iph->tot_len = htons(len + hlen);
672
673 ip_send_check(iph);
674
675 err = output(skb2);
676 if (err)
677 goto fail;
Wei Dongdafee492006-08-02 13:41:21 -0700678
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700679 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 }
681 kfree_skb(skb);
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700682 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 return err;
684
685fail:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900686 kfree_skb(skb);
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700687 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 return err;
689}
690
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700691EXPORT_SYMBOL(ip_fragment);
692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693int
694ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
695{
696 struct iovec *iov = from;
697
Patrick McHardy84fa7932006-08-29 16:44:56 -0700698 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
700 return -EFAULT;
701 } else {
Al Viro44bb9362006-11-14 21:36:14 -0800702 __wsum csum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
704 return -EFAULT;
705 skb->csum = csum_block_add(skb->csum, csum, odd);
706 }
707 return 0;
708}
709
Al Viro44bb9362006-11-14 21:36:14 -0800710static inline __wsum
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711csum_page(struct page *page, int offset, int copy)
712{
713 char *kaddr;
Al Viro44bb9362006-11-14 21:36:14 -0800714 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 kaddr = kmap(page);
716 csum = csum_partial(kaddr + offset, copy, 0);
717 kunmap(page);
718 return csum;
719}
720
Adrian Bunk4b30b1c2005-11-29 16:27:20 -0800721static inline int ip_ufo_append_data(struct sock *sk,
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700722 int getfrag(void *from, char *to, int offset, int len,
723 int odd, struct sk_buff *skb),
724 void *from, int length, int hh_len, int fragheaderlen,
Jianjun Kongd93191002008-11-03 00:23:42 -0800725 int transhdrlen, int mtu, unsigned int flags)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700726{
727 struct sk_buff *skb;
728 int err;
729
730 /* There is support for UDP fragmentation offload by network
731 * device, so create one single skb packet containing complete
732 * udp datagram
733 */
734 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
735 skb = sock_alloc_send_skb(sk,
736 hh_len + fragheaderlen + transhdrlen + 20,
737 (flags & MSG_DONTWAIT), &err);
738
739 if (skb == NULL)
740 return err;
741
742 /* reserve space for Hardware header */
743 skb_reserve(skb, hh_len);
744
745 /* create space for UDP/IP header */
Jianjun Kongd93191002008-11-03 00:23:42 -0800746 skb_put(skb, fragheaderlen + transhdrlen);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700747
748 /* initialize network header pointer */
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700749 skb_reset_network_header(skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700750
751 /* initialize protocol header pointer */
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700752 skb->transport_header = skb->network_header + fragheaderlen;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700753
Patrick McHardy84fa7932006-08-29 16:44:56 -0700754 skb->ip_summed = CHECKSUM_PARTIAL;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700755 skb->csum = 0;
756 sk->sk_sndmsg_off = 0;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700757
Kostya Bbe9164e2008-04-29 22:36:30 -0700758 /* specify the length of each IP datagram fragment */
Herbert Xu79671682006-06-22 02:40:14 -0700759 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700760 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700761 __skb_queue_tail(&sk->sk_write_queue, skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700762 }
Kostya Bbe9164e2008-04-29 22:36:30 -0700763
764 return skb_append_datato_frags(sk, skb, getfrag, from,
765 (length - transhdrlen));
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700766}
767
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768/*
769 * ip_append_data() and ip_append_page() can make one large IP datagram
770 * from many pieces of data. Each pieces will be holded on the socket
771 * until ip_push_pending_frames() is called. Each piece can be a page
772 * or non-page data.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900773 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 * Not only UDP, other transport protocols - e.g. raw sockets - can use
775 * this interface potentially.
776 *
777 * LATER: length must be adjusted by pad at tail, when it is required.
778 */
779int ip_append_data(struct sock *sk,
780 int getfrag(void *from, char *to, int offset, int len,
781 int odd, struct sk_buff *skb),
782 void *from, int length, int transhdrlen,
Eric Dumazet2e77d892008-11-24 15:52:46 -0800783 struct ipcm_cookie *ipc, struct rtable **rtp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 unsigned int flags)
785{
786 struct inet_sock *inet = inet_sk(sk);
787 struct sk_buff *skb;
788
789 struct ip_options *opt = NULL;
790 int hh_len;
791 int exthdrlen;
792 int mtu;
793 int copy;
794 int err;
795 int offset = 0;
796 unsigned int maxfraglen, fragheaderlen;
797 int csummode = CHECKSUM_NONE;
Eric Dumazet2e77d892008-11-24 15:52:46 -0800798 struct rtable *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
800 if (flags&MSG_PROBE)
801 return 0;
802
803 if (skb_queue_empty(&sk->sk_write_queue)) {
804 /*
805 * setup for corking.
806 */
807 opt = ipc->opt;
808 if (opt) {
809 if (inet->cork.opt == NULL) {
810 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
811 if (unlikely(inet->cork.opt == NULL))
812 return -ENOBUFS;
813 }
814 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
815 inet->cork.flags |= IPCORK_OPT;
816 inet->cork.addr = ipc->addr;
817 }
Eric Dumazet2e77d892008-11-24 15:52:46 -0800818 rt = *rtp;
Julien TINNES788d9082009-08-27 15:26:58 +0200819 if (unlikely(!rt))
820 return -EFAULT;
Eric Dumazet2e77d892008-11-24 15:52:46 -0800821 /*
822 * We steal reference to this route, caller should not release it
823 */
824 *rtp = NULL;
John Heffner628a5c52007-04-20 15:53:27 -0700825 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
826 rt->u.dst.dev->mtu :
827 dst_mtu(rt->u.dst.path);
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -0400828 inet->cork.dst = &rt->u.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 inet->cork.length = 0;
830 sk->sk_sndmsg_page = NULL;
831 sk->sk_sndmsg_off = 0;
832 if ((exthdrlen = rt->u.dst.header_len) != 0) {
833 length += exthdrlen;
834 transhdrlen += exthdrlen;
835 }
836 } else {
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -0400837 rt = (struct rtable *)inet->cork.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 if (inet->cork.flags & IPCORK_OPT)
839 opt = inet->cork.opt;
840
841 transhdrlen = 0;
842 exthdrlen = 0;
843 mtu = inet->cork.fragsize;
844 }
845 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
846
847 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
848 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
849
850 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000851 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
852 mtu-exthdrlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 return -EMSGSIZE;
854 }
855
856 /*
857 * transhdrlen > 0 means that this is the first fragment and we wish
858 * it won't be fragmented in the future.
859 */
860 if (transhdrlen &&
861 length + fragheaderlen <= mtu &&
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700862 rt->u.dst.dev->features & NETIF_F_V4_CSUM &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 !exthdrlen)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700864 csummode = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866 inet->cork.length += length;
Kostya Bbe9164e2008-04-29 22:36:30 -0700867 if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
868 (sk->sk_protocol == IPPROTO_UDP) &&
869 (rt->u.dst.dev->features & NETIF_F_UFO)) {
Patrick McHardybaa829d2006-03-12 20:35:12 -0800870 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
871 fragheaderlen, transhdrlen, mtu,
872 flags);
873 if (err)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700874 goto error;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700875 return 0;
876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878 /* So, what's going on in the loop below?
879 *
880 * We use calculated fragment length to generate chained skb,
881 * each of segments is IP fragment ready for sending to network after
882 * adding appropriate IP header.
883 */
884
885 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
886 goto alloc_new_skb;
887
888 while (length > 0) {
889 /* Check if the remaining data fits into current packet. */
890 copy = mtu - skb->len;
891 if (copy < length)
892 copy = maxfraglen - skb->len;
893 if (copy <= 0) {
894 char *data;
895 unsigned int datalen;
896 unsigned int fraglen;
897 unsigned int fraggap;
898 unsigned int alloclen;
899 struct sk_buff *skb_prev;
900alloc_new_skb:
901 skb_prev = skb;
902 if (skb_prev)
903 fraggap = skb_prev->len - maxfraglen;
904 else
905 fraggap = 0;
906
907 /*
908 * If remaining data exceeds the mtu,
909 * we know we need more fragment(s).
910 */
911 datalen = length + fraggap;
912 if (datalen > mtu - fragheaderlen)
913 datalen = maxfraglen - fragheaderlen;
914 fraglen = datalen + fragheaderlen;
915
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900916 if ((flags & MSG_MORE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 !(rt->u.dst.dev->features&NETIF_F_SG))
918 alloclen = mtu;
919 else
920 alloclen = datalen + fragheaderlen;
921
922 /* The last fragment gets additional space at tail.
923 * Note, with MSG_MORE we overallocate on fragments,
924 * because we have no idea what fragment will be
925 * the last.
926 */
Zach Brown3d9dd752006-04-14 16:04:18 -0700927 if (datalen == length + fraggap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 alloclen += rt->u.dst.trailer_len;
929
930 if (transhdrlen) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900931 skb = sock_alloc_send_skb(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 alloclen + hh_len + 15,
933 (flags & MSG_DONTWAIT), &err);
934 } else {
935 skb = NULL;
936 if (atomic_read(&sk->sk_wmem_alloc) <=
937 2 * sk->sk_sndbuf)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900938 skb = sock_wmalloc(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 alloclen + hh_len + 15, 1,
940 sk->sk_allocation);
941 if (unlikely(skb == NULL))
942 err = -ENOBUFS;
Patrick Ohly51f31ca2009-02-12 05:03:39 +0000943 else
944 /* only the initial fragment is
945 time stamped */
946 ipc->shtx.flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 }
948 if (skb == NULL)
949 goto error;
950
951 /*
952 * Fill in the control structures
953 */
954 skb->ip_summed = csummode;
955 skb->csum = 0;
956 skb_reserve(skb, hh_len);
Patrick Ohly51f31ca2009-02-12 05:03:39 +0000957 *skb_tx(skb) = ipc->shtx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
959 /*
960 * Find where to start putting bytes.
961 */
962 data = skb_put(skb, fraglen);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300963 skb_set_network_header(skb, exthdrlen);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700964 skb->transport_header = (skb->network_header +
965 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 data += fragheaderlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
968 if (fraggap) {
969 skb->csum = skb_copy_and_csum_bits(
970 skb_prev, maxfraglen,
971 data + transhdrlen, fraggap, 0);
972 skb_prev->csum = csum_sub(skb_prev->csum,
973 skb->csum);
974 data += fraggap;
Herbert Xue9fa4f72006-08-13 20:12:58 -0700975 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 }
977
978 copy = datalen - transhdrlen - fraggap;
979 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
980 err = -EFAULT;
981 kfree_skb(skb);
982 goto error;
983 }
984
985 offset += copy;
986 length -= datalen - fraggap;
987 transhdrlen = 0;
988 exthdrlen = 0;
989 csummode = CHECKSUM_NONE;
990
991 /*
992 * Put the packet on the pending queue.
993 */
994 __skb_queue_tail(&sk->sk_write_queue, skb);
995 continue;
996 }
997
998 if (copy > length)
999 copy = length;
1000
1001 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1002 unsigned int off;
1003
1004 off = skb->len;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001005 if (getfrag(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 offset, copy, off, skb) < 0) {
1007 __skb_trim(skb, off);
1008 err = -EFAULT;
1009 goto error;
1010 }
1011 } else {
1012 int i = skb_shinfo(skb)->nr_frags;
1013 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1014 struct page *page = sk->sk_sndmsg_page;
1015 int off = sk->sk_sndmsg_off;
1016 unsigned int left;
1017
1018 if (page && (left = PAGE_SIZE - off) > 0) {
1019 if (copy >= left)
1020 copy = left;
1021 if (page != frag->page) {
1022 if (i == MAX_SKB_FRAGS) {
1023 err = -EMSGSIZE;
1024 goto error;
1025 }
1026 get_page(page);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001027 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 frag = &skb_shinfo(skb)->frags[i];
1029 }
1030 } else if (i < MAX_SKB_FRAGS) {
1031 if (copy > PAGE_SIZE)
1032 copy = PAGE_SIZE;
1033 page = alloc_pages(sk->sk_allocation, 0);
1034 if (page == NULL) {
1035 err = -ENOMEM;
1036 goto error;
1037 }
1038 sk->sk_sndmsg_page = page;
1039 sk->sk_sndmsg_off = 0;
1040
1041 skb_fill_page_desc(skb, i, page, 0, 0);
1042 frag = &skb_shinfo(skb)->frags[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 } else {
1044 err = -EMSGSIZE;
1045 goto error;
1046 }
1047 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1048 err = -EFAULT;
1049 goto error;
1050 }
1051 sk->sk_sndmsg_off += copy;
1052 frag->size += copy;
1053 skb->len += copy;
1054 skb->data_len += copy;
Herbert Xuf945fa72008-01-22 22:39:26 -08001055 skb->truesize += copy;
1056 atomic_add(copy, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 }
1058 offset += copy;
1059 length -= copy;
1060 }
1061
1062 return 0;
1063
1064error:
1065 inet->cork.length -= length;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001066 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001067 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068}
1069
1070ssize_t ip_append_page(struct sock *sk, struct page *page,
1071 int offset, size_t size, int flags)
1072{
1073 struct inet_sock *inet = inet_sk(sk);
1074 struct sk_buff *skb;
1075 struct rtable *rt;
1076 struct ip_options *opt = NULL;
1077 int hh_len;
1078 int mtu;
1079 int len;
1080 int err;
1081 unsigned int maxfraglen, fragheaderlen, fraggap;
1082
1083 if (inet->hdrincl)
1084 return -EPERM;
1085
1086 if (flags&MSG_PROBE)
1087 return 0;
1088
1089 if (skb_queue_empty(&sk->sk_write_queue))
1090 return -EINVAL;
1091
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -04001092 rt = (struct rtable *)inet->cork.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (inet->cork.flags & IPCORK_OPT)
1094 opt = inet->cork.opt;
1095
1096 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1097 return -EOPNOTSUPP;
1098
1099 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1100 mtu = inet->cork.fragsize;
1101
1102 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1103 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1104
1105 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001106 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 return -EMSGSIZE;
1108 }
1109
1110 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1111 return -EINVAL;
1112
1113 inet->cork.length += size;
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001114 if ((sk->sk_protocol == IPPROTO_UDP) &&
Herbert Xu79671682006-06-22 02:40:14 -07001115 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1116 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -07001117 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Herbert Xu79671682006-06-22 02:40:14 -07001118 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
1121 while (size > 0) {
1122 int i;
1123
Herbert Xu89114af2006-07-08 13:34:32 -07001124 if (skb_is_gso(skb))
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001125 len = size;
1126 else {
1127
1128 /* Check if the remaining data fits into current packet. */
1129 len = mtu - skb->len;
1130 if (len < size)
1131 len = maxfraglen - skb->len;
1132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 if (len <= 0) {
1134 struct sk_buff *skb_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 int alloclen;
1136
1137 skb_prev = skb;
Jayachandran C0d0d2bb2005-10-13 11:43:02 -07001138 fraggap = skb_prev->len - maxfraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
1140 alloclen = fragheaderlen + hh_len + fraggap + 15;
1141 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1142 if (unlikely(!skb)) {
1143 err = -ENOBUFS;
1144 goto error;
1145 }
1146
1147 /*
1148 * Fill in the control structures
1149 */
1150 skb->ip_summed = CHECKSUM_NONE;
1151 skb->csum = 0;
1152 skb_reserve(skb, hh_len);
1153
1154 /*
1155 * Find where to start putting bytes.
1156 */
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001157 skb_put(skb, fragheaderlen + fraggap);
Arnaldo Carvalho de Melo2ca9e6f2007-03-10 19:15:25 -03001158 skb_reset_network_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001159 skb->transport_header = (skb->network_header +
1160 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 if (fraggap) {
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001162 skb->csum = skb_copy_and_csum_bits(skb_prev,
1163 maxfraglen,
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001164 skb_transport_header(skb),
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001165 fraggap, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 skb_prev->csum = csum_sub(skb_prev->csum,
1167 skb->csum);
Herbert Xue9fa4f72006-08-13 20:12:58 -07001168 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 }
1170
1171 /*
1172 * Put the packet on the pending queue.
1173 */
1174 __skb_queue_tail(&sk->sk_write_queue, skb);
1175 continue;
1176 }
1177
1178 i = skb_shinfo(skb)->nr_frags;
1179 if (len > size)
1180 len = size;
1181 if (skb_can_coalesce(skb, i, page, offset)) {
1182 skb_shinfo(skb)->frags[i-1].size += len;
1183 } else if (i < MAX_SKB_FRAGS) {
1184 get_page(page);
1185 skb_fill_page_desc(skb, i, page, offset, len);
1186 } else {
1187 err = -EMSGSIZE;
1188 goto error;
1189 }
1190
1191 if (skb->ip_summed == CHECKSUM_NONE) {
Al Viro44bb9362006-11-14 21:36:14 -08001192 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 csum = csum_page(page, offset, len);
1194 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1195 }
1196
1197 skb->len += len;
1198 skb->data_len += len;
David S. Miller1e34a112008-01-22 23:44:31 -08001199 skb->truesize += len;
1200 atomic_add(len, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 offset += len;
1202 size -= len;
1203 }
1204 return 0;
1205
1206error:
1207 inet->cork.length -= size;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001208 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 return err;
1210}
1211
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001212static void ip_cork_release(struct inet_sock *inet)
1213{
1214 inet->cork.flags &= ~IPCORK_OPT;
1215 kfree(inet->cork.opt);
1216 inet->cork.opt = NULL;
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -04001217 dst_release(inet->cork.dst);
1218 inet->cork.dst = NULL;
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001219}
1220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221/*
1222 * Combined all pending IP fragments on the socket as one IP datagram
1223 * and push them out.
1224 */
1225int ip_push_pending_frames(struct sock *sk)
1226{
1227 struct sk_buff *skb, *tmp_skb;
1228 struct sk_buff **tail_skb;
1229 struct inet_sock *inet = inet_sk(sk);
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001230 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 struct ip_options *opt = NULL;
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -04001232 struct rtable *rt = (struct rtable *)inet->cork.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 struct iphdr *iph;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -08001234 __be16 df = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 __u8 ttl;
1236 int err = 0;
1237
1238 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1239 goto out;
1240 tail_skb = &(skb_shinfo(skb)->frag_list);
1241
1242 /* move skb->data to ip header from ext header */
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001243 if (skb->data < skb_network_header(skb))
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001244 __skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001246 __skb_pull(tmp_skb, skb_network_header_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 *tail_skb = tmp_skb;
1248 tail_skb = &(tmp_skb->next);
1249 skb->len += tmp_skb->len;
1250 skb->data_len += tmp_skb->len;
1251 skb->truesize += tmp_skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 tmp_skb->destructor = NULL;
1253 tmp_skb->sk = NULL;
1254 }
1255
1256 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1257 * to fragment the frame generated here. No matter, what transforms
1258 * how transforms change size of the packet, it will come out.
1259 */
John Heffner628a5c52007-04-20 15:53:27 -07001260 if (inet->pmtudisc < IP_PMTUDISC_DO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 skb->local_df = 1;
1262
1263 /* DF bit is set when we want to see DF on outgoing frames.
1264 * If local_df is set too, we still allow to fragment this frame
1265 * locally. */
John Heffner628a5c52007-04-20 15:53:27 -07001266 if (inet->pmtudisc >= IP_PMTUDISC_DO ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 (skb->len <= dst_mtu(&rt->u.dst) &&
1268 ip_dont_fragment(sk, &rt->u.dst)))
1269 df = htons(IP_DF);
1270
1271 if (inet->cork.flags & IPCORK_OPT)
1272 opt = inet->cork.opt;
1273
1274 if (rt->rt_type == RTN_MULTICAST)
1275 ttl = inet->mc_ttl;
1276 else
1277 ttl = ip_select_ttl(inet, &rt->u.dst);
1278
1279 iph = (struct iphdr *)skb->data;
1280 iph->version = 4;
1281 iph->ihl = 5;
1282 if (opt) {
1283 iph->ihl += opt->optlen>>2;
1284 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1285 }
1286 iph->tos = inet->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 iph->frag_off = df;
Alexey Kuznetsov1a55d572006-03-22 14:27:59 -08001288 ip_select_ident(iph, &rt->u.dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 iph->ttl = ttl;
1290 iph->protocol = sk->sk_protocol;
1291 iph->saddr = rt->rt_src;
1292 iph->daddr = rt->rt_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
1294 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001295 skb->mark = sk->sk_mark;
Eric Dumazeta21bba92008-11-24 16:07:50 -08001296 /*
1297 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1298 * on dst refcount
1299 */
1300 inet->cork.dst = NULL;
Eric Dumazetadf30902009-06-02 05:19:30 +00001301 skb_dst_set(skb, &rt->u.dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
David L Stevens96793b42007-09-17 09:57:33 -07001303 if (iph->protocol == IPPROTO_ICMP)
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001304 icmp_out_count(net, ((struct icmphdr *)
David L Stevens96793b42007-09-17 09:57:33 -07001305 skb_transport_header(skb))->type);
1306
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 /* Netfilter gets whole the not fragmented skb. */
Herbert Xuc439cb22008-01-11 19:14:00 -08001308 err = ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 if (err) {
1310 if (err > 0)
Eric Dumazet6ce9e7b2009-09-02 18:05:33 -07001311 err = net_xmit_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 if (err)
1313 goto error;
1314 }
1315
1316out:
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001317 ip_cork_release(inet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 return err;
1319
1320error:
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001321 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 goto out;
1323}
1324
1325/*
1326 * Throw away all pending data on the socket.
1327 */
1328void ip_flush_pending_frames(struct sock *sk)
1329{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 struct sk_buff *skb;
1331
1332 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1333 kfree_skb(skb);
1334
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001335 ip_cork_release(inet_sk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336}
1337
1338
1339/*
1340 * Fetch data from kernel space and fill in checksum if needed.
1341 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001342static int ip_reply_glue_bits(void *dptr, char *to, int offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 int len, int odd, struct sk_buff *skb)
1344{
Al Viro50842052006-11-14 21:36:34 -08001345 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1348 skb->csum = csum_block_add(skb->csum, csum, odd);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001349 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350}
1351
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001352/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 * Generic function to send a packet as reply to another packet.
1354 * Used to send TCP resets so far. ICMP should use this function too.
1355 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001356 * Should run single threaded per socket because it uses the sock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 * structure to pass arguments.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 */
1359void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1360 unsigned int len)
1361{
1362 struct inet_sock *inet = inet_sk(sk);
1363 struct {
1364 struct ip_options opt;
1365 char data[40];
1366 } replyopts;
1367 struct ipcm_cookie ipc;
Al Viro3ca3c682006-09-27 18:28:07 -07001368 __be32 daddr;
Eric Dumazet511c3f92009-06-02 05:14:27 +00001369 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
1371 if (ip_options_echo(&replyopts.opt, skb))
1372 return;
1373
1374 daddr = ipc.addr = rt->rt_src;
1375 ipc.opt = NULL;
Patrick Ohly51f31ca2009-02-12 05:03:39 +00001376 ipc.shtx.flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
1378 if (replyopts.opt.optlen) {
1379 ipc.opt = &replyopts.opt;
1380
1381 if (ipc.opt->srr)
1382 daddr = replyopts.opt.faddr;
1383 }
1384
1385 {
Patrick McHardyf0e48db2007-06-04 21:32:46 -07001386 struct flowi fl = { .oif = arg->bound_dev_if,
1387 .nl_u = { .ip4_u =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 { .daddr = daddr,
1389 .saddr = rt->rt_spec_dst,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001390 .tos = RT_TOS(ip_hdr(skb)->tos) } },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 /* Not quite clean, but right. */
1392 .uli_u = { .ports =
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001393 { .sport = tcp_hdr(skb)->dest,
1394 .dport = tcp_hdr(skb)->source } },
KOVACS Krisztian86b08d82008-10-01 07:44:42 -07001395 .proto = sk->sk_protocol,
1396 .flags = ip_reply_arg_flowi_flags(arg) };
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -07001397 security_skb_classify_flow(skb, &fl);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001398 if (ip_route_output_key(sock_net(sk), &rt, &fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 return;
1400 }
1401
1402 /* And let IP do all the hard work.
1403
1404 This chunk is not reenterable, hence spinlock.
1405 Note that it uses the fact, that this function is called
1406 with locally disabled BH and that sk cannot be already spinlocked.
1407 */
1408 bh_lock_sock(sk);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001409 inet->tos = ip_hdr(skb)->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 sk->sk_priority = skb->priority;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001411 sk->sk_protocol = ip_hdr(skb)->protocol;
Patrick McHardyf0e48db2007-06-04 21:32:46 -07001412 sk->sk_bound_dev_if = arg->bound_dev_if;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
Eric Dumazet2e77d892008-11-24 15:52:46 -08001414 &ipc, &rt, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1416 if (arg->csumoffset >= 0)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001417 *((__sum16 *)skb_transport_header(skb) +
1418 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1419 arg->csum));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 skb->ip_summed = CHECKSUM_NONE;
1421 ip_push_pending_frames(sk);
1422 }
1423
1424 bh_unlock_sock(sk);
1425
1426 ip_rt_put(rt);
1427}
1428
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429void __init ip_init(void)
1430{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 ip_rt_init();
1432 inet_initpeers();
1433
1434#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1435 igmp_mc_proc_init();
1436#endif
1437}
1438
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439EXPORT_SYMBOL(ip_generic_getfrag);
1440EXPORT_SYMBOL(ip_queue_xmit);
1441EXPORT_SYMBOL(ip_send_check);