blob: 1abc48899f2dfaf4bc7b7375cd21daf37229dcc5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
14 * Richard Underwood
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
19 *
20 * See ip_input.c for original log
21 *
22 * Fixes:
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090025 * Bradford Johnson: Fix faulty handling of some frames when
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * no route is found.
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090036 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
43 * datagrams.
44 * Hirokazu Takahashi: sendfile() on UDP works now.
45 */
46
47#include <asm/uaccess.h>
48#include <asm/system.h>
49#include <linux/module.h>
50#include <linux/types.h>
51#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#include <linux/mm.h>
53#include <linux/string.h>
54#include <linux/errno.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040055#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#include <linux/socket.h>
58#include <linux/sockios.h>
59#include <linux/in.h>
60#include <linux/inet.h>
61#include <linux/netdevice.h>
62#include <linux/etherdevice.h>
63#include <linux/proc_fs.h>
64#include <linux/stat.h>
65#include <linux/init.h>
66
67#include <net/snmp.h>
68#include <net/ip.h>
69#include <net/protocol.h>
70#include <net/route.h>
Patrick McHardycfacb052006-01-08 22:36:54 -080071#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <net/arp.h>
75#include <net/icmp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <net/checksum.h>
77#include <net/inetpeer.h>
78#include <net/checksum.h>
79#include <linux/igmp.h>
80#include <linux/netfilter_ipv4.h>
81#include <linux/netfilter_bridge.h>
82#include <linux/mroute.h>
83#include <linux/netlink.h>
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -070084#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Brian Haleyab32ea52006-09-22 14:15:41 -070086int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
88/* Generate a checksum for an outgoing IP datagram. */
89__inline__ void ip_send_check(struct iphdr *iph)
90{
91 iph->check = 0;
92 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
93}
94
95/* dev_loopback_xmit for use with netfilter. */
96static int ip_dev_loopback_xmit(struct sk_buff *newskb)
97{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -070098 skb_reset_mac_header(newskb);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -030099 __skb_pull(newskb, skb_network_offset(newskb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 newskb->pkt_type = PACKET_LOOPBACK;
101 newskb->ip_summed = CHECKSUM_UNNECESSARY;
102 BUG_TRAP(newskb->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 netif_rx(newskb);
104 return 0;
105}
106
107static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
108{
109 int ttl = inet->uc_ttl;
110
111 if (ttl < 0)
112 ttl = dst_metric(dst, RTAX_HOPLIMIT);
113 return ttl;
114}
115
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900116/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 * Add an ip header to a skbuff and send it out.
118 *
119 */
120int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
Al Viro13d8eaa2006-09-26 22:27:30 -0700121 __be32 saddr, __be32 daddr, struct ip_options *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 struct inet_sock *inet = inet_sk(sk);
124 struct rtable *rt = (struct rtable *)skb->dst;
125 struct iphdr *iph;
126
127 /* Build the IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300128 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
129 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700130 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 iph->version = 4;
132 iph->ihl = 5;
133 iph->tos = inet->tos;
134 if (ip_dont_fragment(sk, &rt->u.dst))
135 iph->frag_off = htons(IP_DF);
136 else
137 iph->frag_off = 0;
138 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
139 iph->daddr = rt->rt_dst;
140 iph->saddr = rt->rt_src;
141 iph->protocol = sk->sk_protocol;
142 iph->tot_len = htons(skb->len);
143 ip_select_ident(iph, &rt->u.dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145 if (opt && opt->optlen) {
146 iph->ihl += opt->optlen>>2;
147 ip_options_build(skb, opt, daddr, rt, 0);
148 }
149 ip_send_check(iph);
150
151 skb->priority = sk->sk_priority;
152
153 /* Send it out. */
154 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
155 dst_output);
156}
157
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -0700158EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static inline int ip_finish_output2(struct sk_buff *skb)
161{
162 struct dst_entry *dst = skb->dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 struct net_device *dev = dst->dev;
164 int hh_len = LL_RESERVED_SPACE(dev);
165
166 /* Be paranoid, rather than too clever. */
167 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
168 struct sk_buff *skb2;
169
170 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
171 if (skb2 == NULL) {
172 kfree_skb(skb);
173 return -ENOMEM;
174 }
175 if (skb->sk)
176 skb_set_owner_w(skb2, skb->sk);
177 kfree_skb(skb);
178 skb = skb2;
179 }
180
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800181 if (dst->hh)
182 return neigh_hh_output(dst->hh, skb);
183 else if (dst->neighbour)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 return dst->neighbour->output(skb);
185
186 if (net_ratelimit())
187 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
188 kfree_skb(skb);
189 return -EINVAL;
190}
191
Thomas Graf33d043d2005-08-20 17:27:34 -0700192static inline int ip_finish_output(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Patrick McHardy5c901da2006-01-06 23:05:36 -0800194#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
195 /* Policy lookup after SNAT yielded a new policy */
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800196 if (skb->dst->xfrm != NULL) {
197 IPCB(skb)->flags |= IPSKB_REROUTED;
198 return dst_output(skb);
199 }
Patrick McHardy5c901da2006-01-06 23:05:36 -0800200#endif
Herbert Xu89114af2006-07-08 13:34:32 -0700201 if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800202 return ip_fragment(skb, ip_finish_output2);
203 else
204 return ip_finish_output2(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
206
207int ip_mc_output(struct sk_buff *skb)
208{
209 struct sock *sk = skb->sk;
210 struct rtable *rt = (struct rtable*)skb->dst;
211 struct net_device *dev = rt->u.dst.dev;
212
213 /*
214 * If the indicated interface is up and running, send the packet.
215 */
216 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
217
218 skb->dev = dev;
219 skb->protocol = htons(ETH_P_IP);
220
221 /*
222 * Multicasts are looped back for other local users
223 */
224
225 if (rt->rt_flags&RTCF_MULTICAST) {
226 if ((!sk || inet_sk(sk)->mc_loop)
227#ifdef CONFIG_IP_MROUTE
228 /* Small optimization: do not loopback not local frames,
229 which returned after forwarding; they will be dropped
230 by ip_mr_input in any case.
231 Note, that local frames are looped back to be delivered
232 to local recipients.
233
234 This check is duplicated in ip_mr_input at the moment.
235 */
236 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
237#endif
238 ) {
239 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
240 if (newskb)
241 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900242 newskb->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 ip_dev_loopback_xmit);
244 }
245
246 /* Multicasts with ttl 0 must not go beyond the host */
247
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700248 if (ip_hdr(skb)->ttl == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 kfree_skb(skb);
250 return 0;
251 }
252 }
253
254 if (rt->rt_flags&RTCF_BROADCAST) {
255 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
256 if (newskb)
257 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
258 newskb->dev, ip_dev_loopback_xmit);
259 }
260
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800261 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
262 ip_finish_output,
263 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266int ip_output(struct sk_buff *skb)
267{
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800268 struct net_device *dev = skb->dst->dev;
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
271
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800272 skb->dev = dev;
273 skb->protocol = htons(ETH_P_IP);
274
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800275 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900276 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800277 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
279
David S. Millere89862f2007-01-26 01:04:55 -0800280int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
David S. Millere89862f2007-01-26 01:04:55 -0800282 struct sock *sk = skb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 struct inet_sock *inet = inet_sk(sk);
284 struct ip_options *opt = inet->opt;
285 struct rtable *rt;
286 struct iphdr *iph;
287
288 /* Skip all of this if the packet is already routed,
289 * f.e. by something like SCTP.
290 */
291 rt = (struct rtable *) skb->dst;
292 if (rt != NULL)
293 goto packet_routed;
294
295 /* Make sure we can route this packet. */
296 rt = (struct rtable *)__sk_dst_check(sk, 0);
297 if (rt == NULL) {
Al Viro3ca3c682006-09-27 18:28:07 -0700298 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
300 /* Use correct destination address if we have options. */
301 daddr = inet->daddr;
302 if(opt && opt->srr)
303 daddr = opt->faddr;
304
305 {
306 struct flowi fl = { .oif = sk->sk_bound_dev_if,
307 .nl_u = { .ip4_u =
308 { .daddr = daddr,
309 .saddr = inet->saddr,
310 .tos = RT_CONN_FLAGS(sk) } },
311 .proto = sk->sk_protocol,
312 .uli_u = { .ports =
313 { .sport = inet->sport,
314 .dport = inet->dport } } };
315
316 /* If this fails, retransmit mechanism of transport layer will
317 * keep trying until route appears or the connection times
318 * itself out.
319 */
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700320 security_sk_classify_flow(sk, &fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 if (ip_route_output_flow(&rt, &fl, sk, 0))
322 goto no_route;
323 }
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -0700324 sk_setup_caps(sk, &rt->u.dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 }
326 skb->dst = dst_clone(&rt->u.dst);
327
328packet_routed:
329 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
330 goto no_route;
331
332 /* OK, we know where to send it, allocate and build IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300333 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
334 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700335 iph = ip_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800336 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 iph->tot_len = htons(skb->len);
338 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
339 iph->frag_off = htons(IP_DF);
340 else
341 iph->frag_off = 0;
342 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
343 iph->protocol = sk->sk_protocol;
344 iph->saddr = rt->rt_src;
345 iph->daddr = rt->rt_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 /* Transport layer set skb->h.foo itself. */
347
348 if (opt && opt->optlen) {
349 iph->ihl += opt->optlen >> 2;
350 ip_options_build(skb, opt, inet->daddr, rt, 0);
351 }
352
Herbert Xu89f5f0a2005-11-08 09:41:56 -0800353 ip_select_ident_more(iph, &rt->u.dst, sk,
Herbert Xu79671682006-06-22 02:40:14 -0700354 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
356 /* Add an IP checksum. */
357 ip_send_check(iph);
358
359 skb->priority = sk->sk_priority;
360
361 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
362 dst_output);
363
364no_route:
365 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
366 kfree_skb(skb);
367 return -EHOSTUNREACH;
368}
369
370
371static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
372{
373 to->pkt_type = from->pkt_type;
374 to->priority = from->priority;
375 to->protocol = from->protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 dst_release(to->dst);
377 to->dst = dst_clone(from->dst);
378 to->dev = from->dev;
Thomas Graf82e91ff2006-11-09 15:19:14 -0800379 to->mark = from->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
381 /* Copy the flags to each fragment. */
382 IPCB(to)->flags = IPCB(from)->flags;
383
384#ifdef CONFIG_NET_SCHED
385 to->tc_index = from->tc_index;
386#endif
387#ifdef CONFIG_NETFILTER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 /* Connection association is same as pre-frag packet */
389 nf_conntrack_put(to->nfct);
390 to->nfct = from->nfct;
391 nf_conntrack_get(to->nfct);
392 to->nfctinfo = from->nfctinfo;
Julian Anastasovc98d80e2005-10-22 13:39:21 +0300393#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
394 to->ipvs_property = from->ipvs_property;
395#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396#ifdef CONFIG_BRIDGE_NETFILTER
397 nf_bridge_put(to->nf_bridge);
398 to->nf_bridge = from->nf_bridge;
399 nf_bridge_get(to->nf_bridge);
400#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401#endif
James Morris984bc162006-06-09 00:29:17 -0700402 skb_copy_secmark(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
404
405/*
406 * This IP datagram is too large to be sent in one piece. Break it up into
407 * smaller pieces (each of size equal to IP header plus
408 * a block of the data of the original IP data part) that will yet fit in a
409 * single device frame, and queue such a frame for sending.
410 */
411
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700412int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413{
414 struct iphdr *iph;
415 int raw = 0;
416 int ptr;
417 struct net_device *dev;
418 struct sk_buff *skb2;
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700419 unsigned int mtu, hlen, left, len, ll_rs, pad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 int offset;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -0800421 __be16 not_last_frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 struct rtable *rt = (struct rtable*)skb->dst;
423 int err = 0;
424
425 dev = rt->u.dst.dev;
426
427 /*
428 * Point into the IP datagram header.
429 */
430
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700431 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
433 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
Wei Dong0668b472006-08-31 15:24:48 -0700434 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
436 htonl(dst_mtu(&rt->u.dst)));
437 kfree_skb(skb);
438 return -EMSGSIZE;
439 }
440
441 /*
442 * Setup starting values.
443 */
444
445 hlen = iph->ihl * 4;
446 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
Herbert Xu89cee8b2005-12-13 23:14:27 -0800447 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 /* When frag_list is given, use it. First, check its validity:
450 * some transformers could create wrong frag_list or break existing
451 * one, it is not prohibited. In this case fall back to copying.
452 *
453 * LATER: this step can be merged to real generation of fragments,
454 * we can switch to copy when see the first bad fragment.
455 */
456 if (skb_shinfo(skb)->frag_list) {
457 struct sk_buff *frag;
458 int first_len = skb_pagelen(skb);
459
460 if (first_len - hlen > mtu ||
461 ((first_len - hlen) & 7) ||
462 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
463 skb_cloned(skb))
464 goto slow_path;
465
466 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
467 /* Correct geometry. */
468 if (frag->len > mtu ||
469 ((frag->len & 7) && frag->next) ||
470 skb_headroom(frag) < hlen)
471 goto slow_path;
472
473 /* Partially cloned skb? */
474 if (skb_shared(frag))
475 goto slow_path;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700476
477 BUG_ON(frag->sk);
478 if (skb->sk) {
479 sock_hold(skb->sk);
480 frag->sk = skb->sk;
481 frag->destructor = sock_wfree;
482 skb->truesize -= frag->truesize;
483 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 }
485
486 /* Everything is OK. Generate! */
487
488 err = 0;
489 offset = 0;
490 frag = skb_shinfo(skb)->frag_list;
491 skb_shinfo(skb)->frag_list = NULL;
492 skb->data_len = first_len - skb_headlen(skb);
493 skb->len = first_len;
494 iph->tot_len = htons(first_len);
495 iph->frag_off = htons(IP_MF);
496 ip_send_check(iph);
497
498 for (;;) {
499 /* Prepare header of the next frame,
500 * before previous one went down. */
501 if (frag) {
502 frag->ip_summed = CHECKSUM_NONE;
503 frag->h.raw = frag->data;
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700504 __skb_push(frag, hlen);
505 skb_reset_network_header(frag);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700506 memcpy(skb_network_header(frag), iph, hlen);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700507 iph = ip_hdr(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 iph->tot_len = htons(frag->len);
509 ip_copy_metadata(frag, skb);
510 if (offset == 0)
511 ip_options_fragment(frag);
512 offset += skb->len - hlen;
513 iph->frag_off = htons(offset>>3);
514 if (frag->next != NULL)
515 iph->frag_off |= htons(IP_MF);
516 /* Ready, complete checksum */
517 ip_send_check(iph);
518 }
519
520 err = output(skb);
521
Wei Dongdafee492006-08-02 13:41:21 -0700522 if (!err)
523 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 if (err || !frag)
525 break;
526
527 skb = frag;
528 frag = skb->next;
529 skb->next = NULL;
530 }
531
532 if (err == 0) {
533 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
534 return 0;
535 }
536
537 while (frag) {
538 skb = frag->next;
539 kfree_skb(frag);
540 frag = skb;
541 }
542 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
543 return err;
544 }
545
546slow_path:
547 left = skb->len - hlen; /* Space per frame */
548 ptr = raw + hlen; /* Where to start from */
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700551 * we need to make room for the encapsulating header
552 */
553 pad = nf_bridge_pad(skb);
554 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
555 mtu -= pad;
556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 /*
558 * Fragment the datagram.
559 */
560
561 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
562 not_last_frag = iph->frag_off & htons(IP_MF);
563
564 /*
565 * Keep copying data until we run out.
566 */
567
Stephen Hemminger132adf52007-03-08 20:44:43 -0800568 while (left > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 len = left;
570 /* IF: it doesn't fit, use 'mtu' - the data space left */
571 if (len > mtu)
572 len = mtu;
573 /* IF: we are not sending upto and including the packet end
574 then align the next start on an eight byte boundary */
575 if (len < left) {
576 len &= ~7;
577 }
578 /*
579 * Allocate buffer.
580 */
581
582 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
Patrick McHardy64ce2072005-08-09 20:50:53 -0700583 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 err = -ENOMEM;
585 goto fail;
586 }
587
588 /*
589 * Set up data on packet
590 */
591
592 ip_copy_metadata(skb2, skb);
593 skb_reserve(skb2, ll_rs);
594 skb_put(skb2, len + hlen);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700595 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 skb2->h.raw = skb2->data + hlen;
597
598 /*
599 * Charge the memory for the fragment to any owner
600 * it might possess
601 */
602
603 if (skb->sk)
604 skb_set_owner_w(skb2, skb->sk);
605
606 /*
607 * Copy the packet header into the new buffer.
608 */
609
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700610 memcpy(skb_network_header(skb2), skb->data, hlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
612 /*
613 * Copy a block of the IP datagram.
614 */
615 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
616 BUG();
617 left -= len;
618
619 /*
620 * Fill in the new header fields.
621 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700622 iph = ip_hdr(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 iph->frag_off = htons((offset >> 3));
624
625 /* ANK: dirty, but effective trick. Upgrade options only if
626 * the segment to be fragmented was THE FIRST (otherwise,
627 * options are already fixed) and make it ONCE
628 * on the initial skb, so that all the following fragments
629 * will inherit fixed options.
630 */
631 if (offset == 0)
632 ip_options_fragment(skb);
633
634 /*
635 * Added AC : If we are fragmenting a fragment that's not the
636 * last fragment then keep MF on each bit
637 */
638 if (left > 0 || not_last_frag)
639 iph->frag_off |= htons(IP_MF);
640 ptr += len;
641 offset += len;
642
643 /*
644 * Put this fragment into the sending queue.
645 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 iph->tot_len = htons(len + hlen);
647
648 ip_send_check(iph);
649
650 err = output(skb2);
651 if (err)
652 goto fail;
Wei Dongdafee492006-08-02 13:41:21 -0700653
654 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 }
656 kfree_skb(skb);
657 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
658 return err;
659
660fail:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900661 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
663 return err;
664}
665
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700666EXPORT_SYMBOL(ip_fragment);
667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668int
669ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
670{
671 struct iovec *iov = from;
672
Patrick McHardy84fa7932006-08-29 16:44:56 -0700673 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
675 return -EFAULT;
676 } else {
Al Viro44bb9362006-11-14 21:36:14 -0800677 __wsum csum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
679 return -EFAULT;
680 skb->csum = csum_block_add(skb->csum, csum, odd);
681 }
682 return 0;
683}
684
Al Viro44bb9362006-11-14 21:36:14 -0800685static inline __wsum
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686csum_page(struct page *page, int offset, int copy)
687{
688 char *kaddr;
Al Viro44bb9362006-11-14 21:36:14 -0800689 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 kaddr = kmap(page);
691 csum = csum_partial(kaddr + offset, copy, 0);
692 kunmap(page);
693 return csum;
694}
695
Adrian Bunk4b30b1c2005-11-29 16:27:20 -0800696static inline int ip_ufo_append_data(struct sock *sk,
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700697 int getfrag(void *from, char *to, int offset, int len,
698 int odd, struct sk_buff *skb),
699 void *from, int length, int hh_len, int fragheaderlen,
700 int transhdrlen, int mtu,unsigned int flags)
701{
702 struct sk_buff *skb;
703 int err;
704
705 /* There is support for UDP fragmentation offload by network
706 * device, so create one single skb packet containing complete
707 * udp datagram
708 */
709 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
710 skb = sock_alloc_send_skb(sk,
711 hh_len + fragheaderlen + transhdrlen + 20,
712 (flags & MSG_DONTWAIT), &err);
713
714 if (skb == NULL)
715 return err;
716
717 /* reserve space for Hardware header */
718 skb_reserve(skb, hh_len);
719
720 /* create space for UDP/IP header */
721 skb_put(skb,fragheaderlen + transhdrlen);
722
723 /* initialize network header pointer */
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700724 skb_reset_network_header(skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700725
726 /* initialize protocol header pointer */
727 skb->h.raw = skb->data + fragheaderlen;
728
Patrick McHardy84fa7932006-08-29 16:44:56 -0700729 skb->ip_summed = CHECKSUM_PARTIAL;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700730 skb->csum = 0;
731 sk->sk_sndmsg_off = 0;
732 }
733
734 err = skb_append_datato_frags(sk,skb, getfrag, from,
735 (length - transhdrlen));
736 if (!err) {
737 /* specify the length of each IP datagram fragment*/
Herbert Xu79671682006-06-22 02:40:14 -0700738 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700739 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700740 __skb_queue_tail(&sk->sk_write_queue, skb);
741
742 return 0;
743 }
744 /* There is not enough support do UFO ,
745 * so follow normal path
746 */
747 kfree_skb(skb);
748 return err;
749}
750
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751/*
752 * ip_append_data() and ip_append_page() can make one large IP datagram
753 * from many pieces of data. Each pieces will be holded on the socket
754 * until ip_push_pending_frames() is called. Each piece can be a page
755 * or non-page data.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900756 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 * Not only UDP, other transport protocols - e.g. raw sockets - can use
758 * this interface potentially.
759 *
760 * LATER: length must be adjusted by pad at tail, when it is required.
761 */
762int ip_append_data(struct sock *sk,
763 int getfrag(void *from, char *to, int offset, int len,
764 int odd, struct sk_buff *skb),
765 void *from, int length, int transhdrlen,
766 struct ipcm_cookie *ipc, struct rtable *rt,
767 unsigned int flags)
768{
769 struct inet_sock *inet = inet_sk(sk);
770 struct sk_buff *skb;
771
772 struct ip_options *opt = NULL;
773 int hh_len;
774 int exthdrlen;
775 int mtu;
776 int copy;
777 int err;
778 int offset = 0;
779 unsigned int maxfraglen, fragheaderlen;
780 int csummode = CHECKSUM_NONE;
781
782 if (flags&MSG_PROBE)
783 return 0;
784
785 if (skb_queue_empty(&sk->sk_write_queue)) {
786 /*
787 * setup for corking.
788 */
789 opt = ipc->opt;
790 if (opt) {
791 if (inet->cork.opt == NULL) {
792 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
793 if (unlikely(inet->cork.opt == NULL))
794 return -ENOBUFS;
795 }
796 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
797 inet->cork.flags |= IPCORK_OPT;
798 inet->cork.addr = ipc->addr;
799 }
800 dst_hold(&rt->u.dst);
801 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
802 inet->cork.rt = rt;
803 inet->cork.length = 0;
804 sk->sk_sndmsg_page = NULL;
805 sk->sk_sndmsg_off = 0;
806 if ((exthdrlen = rt->u.dst.header_len) != 0) {
807 length += exthdrlen;
808 transhdrlen += exthdrlen;
809 }
810 } else {
811 rt = inet->cork.rt;
812 if (inet->cork.flags & IPCORK_OPT)
813 opt = inet->cork.opt;
814
815 transhdrlen = 0;
816 exthdrlen = 0;
817 mtu = inet->cork.fragsize;
818 }
819 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
820
821 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
822 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
823
824 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
825 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
826 return -EMSGSIZE;
827 }
828
829 /*
830 * transhdrlen > 0 means that this is the first fragment and we wish
831 * it won't be fragmented in the future.
832 */
833 if (transhdrlen &&
834 length + fragheaderlen <= mtu &&
Herbert Xu8648b302006-06-17 22:06:05 -0700835 rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 !exthdrlen)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700837 csummode = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
839 inet->cork.length += length;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700840 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
841 (rt->u.dst.dev->features & NETIF_F_UFO)) {
842
Patrick McHardybaa829d2006-03-12 20:35:12 -0800843 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
844 fragheaderlen, transhdrlen, mtu,
845 flags);
846 if (err)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700847 goto error;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700848 return 0;
849 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
851 /* So, what's going on in the loop below?
852 *
853 * We use calculated fragment length to generate chained skb,
854 * each of segments is IP fragment ready for sending to network after
855 * adding appropriate IP header.
856 */
857
858 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
859 goto alloc_new_skb;
860
861 while (length > 0) {
862 /* Check if the remaining data fits into current packet. */
863 copy = mtu - skb->len;
864 if (copy < length)
865 copy = maxfraglen - skb->len;
866 if (copy <= 0) {
867 char *data;
868 unsigned int datalen;
869 unsigned int fraglen;
870 unsigned int fraggap;
871 unsigned int alloclen;
872 struct sk_buff *skb_prev;
873alloc_new_skb:
874 skb_prev = skb;
875 if (skb_prev)
876 fraggap = skb_prev->len - maxfraglen;
877 else
878 fraggap = 0;
879
880 /*
881 * If remaining data exceeds the mtu,
882 * we know we need more fragment(s).
883 */
884 datalen = length + fraggap;
885 if (datalen > mtu - fragheaderlen)
886 datalen = maxfraglen - fragheaderlen;
887 fraglen = datalen + fragheaderlen;
888
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900889 if ((flags & MSG_MORE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 !(rt->u.dst.dev->features&NETIF_F_SG))
891 alloclen = mtu;
892 else
893 alloclen = datalen + fragheaderlen;
894
895 /* The last fragment gets additional space at tail.
896 * Note, with MSG_MORE we overallocate on fragments,
897 * because we have no idea what fragment will be
898 * the last.
899 */
Zach Brown3d9dd752006-04-14 16:04:18 -0700900 if (datalen == length + fraggap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 alloclen += rt->u.dst.trailer_len;
902
903 if (transhdrlen) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900904 skb = sock_alloc_send_skb(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 alloclen + hh_len + 15,
906 (flags & MSG_DONTWAIT), &err);
907 } else {
908 skb = NULL;
909 if (atomic_read(&sk->sk_wmem_alloc) <=
910 2 * sk->sk_sndbuf)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900911 skb = sock_wmalloc(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 alloclen + hh_len + 15, 1,
913 sk->sk_allocation);
914 if (unlikely(skb == NULL))
915 err = -ENOBUFS;
916 }
917 if (skb == NULL)
918 goto error;
919
920 /*
921 * Fill in the control structures
922 */
923 skb->ip_summed = csummode;
924 skb->csum = 0;
925 skb_reserve(skb, hh_len);
926
927 /*
928 * Find where to start putting bytes.
929 */
930 data = skb_put(skb, fraglen);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300931 skb_set_network_header(skb, exthdrlen);
932 skb->h.raw = skb->nh.raw + fragheaderlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 data += fragheaderlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
935 if (fraggap) {
936 skb->csum = skb_copy_and_csum_bits(
937 skb_prev, maxfraglen,
938 data + transhdrlen, fraggap, 0);
939 skb_prev->csum = csum_sub(skb_prev->csum,
940 skb->csum);
941 data += fraggap;
Herbert Xue9fa4f72006-08-13 20:12:58 -0700942 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 }
944
945 copy = datalen - transhdrlen - fraggap;
946 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
947 err = -EFAULT;
948 kfree_skb(skb);
949 goto error;
950 }
951
952 offset += copy;
953 length -= datalen - fraggap;
954 transhdrlen = 0;
955 exthdrlen = 0;
956 csummode = CHECKSUM_NONE;
957
958 /*
959 * Put the packet on the pending queue.
960 */
961 __skb_queue_tail(&sk->sk_write_queue, skb);
962 continue;
963 }
964
965 if (copy > length)
966 copy = length;
967
968 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
969 unsigned int off;
970
971 off = skb->len;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900972 if (getfrag(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 offset, copy, off, skb) < 0) {
974 __skb_trim(skb, off);
975 err = -EFAULT;
976 goto error;
977 }
978 } else {
979 int i = skb_shinfo(skb)->nr_frags;
980 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
981 struct page *page = sk->sk_sndmsg_page;
982 int off = sk->sk_sndmsg_off;
983 unsigned int left;
984
985 if (page && (left = PAGE_SIZE - off) > 0) {
986 if (copy >= left)
987 copy = left;
988 if (page != frag->page) {
989 if (i == MAX_SKB_FRAGS) {
990 err = -EMSGSIZE;
991 goto error;
992 }
993 get_page(page);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900994 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 frag = &skb_shinfo(skb)->frags[i];
996 }
997 } else if (i < MAX_SKB_FRAGS) {
998 if (copy > PAGE_SIZE)
999 copy = PAGE_SIZE;
1000 page = alloc_pages(sk->sk_allocation, 0);
1001 if (page == NULL) {
1002 err = -ENOMEM;
1003 goto error;
1004 }
1005 sk->sk_sndmsg_page = page;
1006 sk->sk_sndmsg_off = 0;
1007
1008 skb_fill_page_desc(skb, i, page, 0, 0);
1009 frag = &skb_shinfo(skb)->frags[i];
1010 skb->truesize += PAGE_SIZE;
1011 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1012 } else {
1013 err = -EMSGSIZE;
1014 goto error;
1015 }
1016 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1017 err = -EFAULT;
1018 goto error;
1019 }
1020 sk->sk_sndmsg_off += copy;
1021 frag->size += copy;
1022 skb->len += copy;
1023 skb->data_len += copy;
1024 }
1025 offset += copy;
1026 length -= copy;
1027 }
1028
1029 return 0;
1030
1031error:
1032 inet->cork.length -= length;
1033 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001034 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035}
1036
1037ssize_t ip_append_page(struct sock *sk, struct page *page,
1038 int offset, size_t size, int flags)
1039{
1040 struct inet_sock *inet = inet_sk(sk);
1041 struct sk_buff *skb;
1042 struct rtable *rt;
1043 struct ip_options *opt = NULL;
1044 int hh_len;
1045 int mtu;
1046 int len;
1047 int err;
1048 unsigned int maxfraglen, fragheaderlen, fraggap;
1049
1050 if (inet->hdrincl)
1051 return -EPERM;
1052
1053 if (flags&MSG_PROBE)
1054 return 0;
1055
1056 if (skb_queue_empty(&sk->sk_write_queue))
1057 return -EINVAL;
1058
1059 rt = inet->cork.rt;
1060 if (inet->cork.flags & IPCORK_OPT)
1061 opt = inet->cork.opt;
1062
1063 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1064 return -EOPNOTSUPP;
1065
1066 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1067 mtu = inet->cork.fragsize;
1068
1069 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1070 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1071
1072 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1073 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1074 return -EMSGSIZE;
1075 }
1076
1077 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1078 return -EINVAL;
1079
1080 inet->cork.length += size;
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001081 if ((sk->sk_protocol == IPPROTO_UDP) &&
Herbert Xu79671682006-06-22 02:40:14 -07001082 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1083 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -07001084 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Herbert Xu79671682006-06-22 02:40:14 -07001085 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001086
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 while (size > 0) {
1089 int i;
1090
Herbert Xu89114af2006-07-08 13:34:32 -07001091 if (skb_is_gso(skb))
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001092 len = size;
1093 else {
1094
1095 /* Check if the remaining data fits into current packet. */
1096 len = mtu - skb->len;
1097 if (len < size)
1098 len = maxfraglen - skb->len;
1099 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 if (len <= 0) {
1101 struct sk_buff *skb_prev;
1102 char *data;
1103 struct iphdr *iph;
1104 int alloclen;
1105
1106 skb_prev = skb;
Jayachandran C0d0d2bb2005-10-13 11:43:02 -07001107 fraggap = skb_prev->len - maxfraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 alloclen = fragheaderlen + hh_len + fraggap + 15;
1110 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1111 if (unlikely(!skb)) {
1112 err = -ENOBUFS;
1113 goto error;
1114 }
1115
1116 /*
1117 * Fill in the control structures
1118 */
1119 skb->ip_summed = CHECKSUM_NONE;
1120 skb->csum = 0;
1121 skb_reserve(skb, hh_len);
1122
1123 /*
1124 * Find where to start putting bytes.
1125 */
1126 data = skb_put(skb, fragheaderlen + fraggap);
Arnaldo Carvalho de Melo2ca9e6f2007-03-10 19:15:25 -03001127 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001128 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 data += fragheaderlen;
1130 skb->h.raw = data;
1131
1132 if (fraggap) {
1133 skb->csum = skb_copy_and_csum_bits(
1134 skb_prev, maxfraglen,
1135 data, fraggap, 0);
1136 skb_prev->csum = csum_sub(skb_prev->csum,
1137 skb->csum);
Herbert Xue9fa4f72006-08-13 20:12:58 -07001138 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 }
1140
1141 /*
1142 * Put the packet on the pending queue.
1143 */
1144 __skb_queue_tail(&sk->sk_write_queue, skb);
1145 continue;
1146 }
1147
1148 i = skb_shinfo(skb)->nr_frags;
1149 if (len > size)
1150 len = size;
1151 if (skb_can_coalesce(skb, i, page, offset)) {
1152 skb_shinfo(skb)->frags[i-1].size += len;
1153 } else if (i < MAX_SKB_FRAGS) {
1154 get_page(page);
1155 skb_fill_page_desc(skb, i, page, offset, len);
1156 } else {
1157 err = -EMSGSIZE;
1158 goto error;
1159 }
1160
1161 if (skb->ip_summed == CHECKSUM_NONE) {
Al Viro44bb9362006-11-14 21:36:14 -08001162 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 csum = csum_page(page, offset, len);
1164 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1165 }
1166
1167 skb->len += len;
1168 skb->data_len += len;
1169 offset += len;
1170 size -= len;
1171 }
1172 return 0;
1173
1174error:
1175 inet->cork.length -= size;
1176 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1177 return err;
1178}
1179
1180/*
1181 * Combined all pending IP fragments on the socket as one IP datagram
1182 * and push them out.
1183 */
1184int ip_push_pending_frames(struct sock *sk)
1185{
1186 struct sk_buff *skb, *tmp_skb;
1187 struct sk_buff **tail_skb;
1188 struct inet_sock *inet = inet_sk(sk);
1189 struct ip_options *opt = NULL;
1190 struct rtable *rt = inet->cork.rt;
1191 struct iphdr *iph;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -08001192 __be16 df = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 __u8 ttl;
1194 int err = 0;
1195
1196 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1197 goto out;
1198 tail_skb = &(skb_shinfo(skb)->frag_list);
1199
1200 /* move skb->data to ip header from ext header */
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001201 if (skb->data < skb_network_header(skb))
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001202 __skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1204 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1205 *tail_skb = tmp_skb;
1206 tail_skb = &(tmp_skb->next);
1207 skb->len += tmp_skb->len;
1208 skb->data_len += tmp_skb->len;
1209 skb->truesize += tmp_skb->truesize;
1210 __sock_put(tmp_skb->sk);
1211 tmp_skb->destructor = NULL;
1212 tmp_skb->sk = NULL;
1213 }
1214
1215 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1216 * to fragment the frame generated here. No matter, what transforms
1217 * how transforms change size of the packet, it will come out.
1218 */
1219 if (inet->pmtudisc != IP_PMTUDISC_DO)
1220 skb->local_df = 1;
1221
1222 /* DF bit is set when we want to see DF on outgoing frames.
1223 * If local_df is set too, we still allow to fragment this frame
1224 * locally. */
1225 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1226 (skb->len <= dst_mtu(&rt->u.dst) &&
1227 ip_dont_fragment(sk, &rt->u.dst)))
1228 df = htons(IP_DF);
1229
1230 if (inet->cork.flags & IPCORK_OPT)
1231 opt = inet->cork.opt;
1232
1233 if (rt->rt_type == RTN_MULTICAST)
1234 ttl = inet->mc_ttl;
1235 else
1236 ttl = ip_select_ttl(inet, &rt->u.dst);
1237
1238 iph = (struct iphdr *)skb->data;
1239 iph->version = 4;
1240 iph->ihl = 5;
1241 if (opt) {
1242 iph->ihl += opt->optlen>>2;
1243 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1244 }
1245 iph->tos = inet->tos;
1246 iph->tot_len = htons(skb->len);
1247 iph->frag_off = df;
Alexey Kuznetsov1a55d572006-03-22 14:27:59 -08001248 ip_select_ident(iph, &rt->u.dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 iph->ttl = ttl;
1250 iph->protocol = sk->sk_protocol;
1251 iph->saddr = rt->rt_src;
1252 iph->daddr = rt->rt_dst;
1253 ip_send_check(iph);
1254
1255 skb->priority = sk->sk_priority;
1256 skb->dst = dst_clone(&rt->u.dst);
1257
1258 /* Netfilter gets whole the not fragmented skb. */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001259 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 skb->dst->dev, dst_output);
1261 if (err) {
1262 if (err > 0)
1263 err = inet->recverr ? net_xmit_errno(err) : 0;
1264 if (err)
1265 goto error;
1266 }
1267
1268out:
1269 inet->cork.flags &= ~IPCORK_OPT;
Jesper Juhla51482b2005-11-08 09:41:34 -08001270 kfree(inet->cork.opt);
1271 inet->cork.opt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 if (inet->cork.rt) {
1273 ip_rt_put(inet->cork.rt);
1274 inet->cork.rt = NULL;
1275 }
1276 return err;
1277
1278error:
1279 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1280 goto out;
1281}
1282
1283/*
1284 * Throw away all pending data on the socket.
1285 */
1286void ip_flush_pending_frames(struct sock *sk)
1287{
1288 struct inet_sock *inet = inet_sk(sk);
1289 struct sk_buff *skb;
1290
1291 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1292 kfree_skb(skb);
1293
1294 inet->cork.flags &= ~IPCORK_OPT;
Jesper Juhla51482b2005-11-08 09:41:34 -08001295 kfree(inet->cork.opt);
1296 inet->cork.opt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 if (inet->cork.rt) {
1298 ip_rt_put(inet->cork.rt);
1299 inet->cork.rt = NULL;
1300 }
1301}
1302
1303
1304/*
1305 * Fetch data from kernel space and fill in checksum if needed.
1306 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001307static int ip_reply_glue_bits(void *dptr, char *to, int offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 int len, int odd, struct sk_buff *skb)
1309{
Al Viro50842052006-11-14 21:36:34 -08001310 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
1312 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1313 skb->csum = csum_block_add(skb->csum, csum, odd);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001314 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315}
1316
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001317/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 * Generic function to send a packet as reply to another packet.
1319 * Used to send TCP resets so far. ICMP should use this function too.
1320 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001321 * Should run single threaded per socket because it uses the sock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 * structure to pass arguments.
1323 *
1324 * LATER: switch from ip_build_xmit to ip_append_*
1325 */
1326void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1327 unsigned int len)
1328{
1329 struct inet_sock *inet = inet_sk(sk);
1330 struct {
1331 struct ip_options opt;
1332 char data[40];
1333 } replyopts;
1334 struct ipcm_cookie ipc;
Al Viro3ca3c682006-09-27 18:28:07 -07001335 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 struct rtable *rt = (struct rtable*)skb->dst;
1337
1338 if (ip_options_echo(&replyopts.opt, skb))
1339 return;
1340
1341 daddr = ipc.addr = rt->rt_src;
1342 ipc.opt = NULL;
1343
1344 if (replyopts.opt.optlen) {
1345 ipc.opt = &replyopts.opt;
1346
1347 if (ipc.opt->srr)
1348 daddr = replyopts.opt.faddr;
1349 }
1350
1351 {
1352 struct flowi fl = { .nl_u = { .ip4_u =
1353 { .daddr = daddr,
1354 .saddr = rt->rt_spec_dst,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001355 .tos = RT_TOS(ip_hdr(skb)->tos) } },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 /* Not quite clean, but right. */
1357 .uli_u = { .ports =
1358 { .sport = skb->h.th->dest,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001359 .dport = skb->h.th->source } },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 .proto = sk->sk_protocol };
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -07001361 security_skb_classify_flow(skb, &fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 if (ip_route_output_key(&rt, &fl))
1363 return;
1364 }
1365
1366 /* And let IP do all the hard work.
1367
1368 This chunk is not reenterable, hence spinlock.
1369 Note that it uses the fact, that this function is called
1370 with locally disabled BH and that sk cannot be already spinlocked.
1371 */
1372 bh_lock_sock(sk);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001373 inet->tos = ip_hdr(skb)->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 sk->sk_priority = skb->priority;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001375 sk->sk_protocol = ip_hdr(skb)->protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1377 &ipc, rt, MSG_DONTWAIT);
1378 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1379 if (arg->csumoffset >= 0)
Al Virod3bc23e2006-11-14 21:24:49 -08001380 *((__sum16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 skb->ip_summed = CHECKSUM_NONE;
1382 ip_push_pending_frames(sk);
1383 }
1384
1385 bh_unlock_sock(sk);
1386
1387 ip_rt_put(rt);
1388}
1389
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390void __init ip_init(void)
1391{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 ip_rt_init();
1393 inet_initpeers();
1394
1395#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1396 igmp_mc_proc_init();
1397#endif
1398}
1399
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400EXPORT_SYMBOL(ip_generic_getfrag);
1401EXPORT_SYMBOL(ip_queue_xmit);
1402EXPORT_SYMBOL(ip_send_check);