blob: 11ab100d6c6cf96d95f7f937086a19ce4cd7fb59 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
14 * Richard Underwood
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
19 *
20 * See ip_input.c for original log
21 *
22 * Fixes:
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090025 * Bradford Johnson: Fix faulty handling of some frames when
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * no route is found.
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090036 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
43 * datagrams.
44 * Hirokazu Takahashi: sendfile() on UDP works now.
45 */
46
47#include <asm/uaccess.h>
48#include <asm/system.h>
49#include <linux/module.h>
50#include <linux/types.h>
51#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#include <linux/mm.h>
53#include <linux/string.h>
54#include <linux/errno.h>
Al Viroa1f8e7f2006-10-19 16:08:53 -040055#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#include <linux/socket.h>
58#include <linux/sockios.h>
59#include <linux/in.h>
60#include <linux/inet.h>
61#include <linux/netdevice.h>
62#include <linux/etherdevice.h>
63#include <linux/proc_fs.h>
64#include <linux/stat.h>
65#include <linux/init.h>
66
67#include <net/snmp.h>
68#include <net/ip.h>
69#include <net/protocol.h>
70#include <net/route.h>
Patrick McHardycfacb052006-01-08 22:36:54 -080071#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <net/arp.h>
75#include <net/icmp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <net/checksum.h>
77#include <net/inetpeer.h>
78#include <net/checksum.h>
79#include <linux/igmp.h>
80#include <linux/netfilter_ipv4.h>
81#include <linux/netfilter_bridge.h>
82#include <linux/mroute.h>
83#include <linux/netlink.h>
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -070084#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Brian Haleyab32ea52006-09-22 14:15:41 -070086int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
88/* Generate a checksum for an outgoing IP datagram. */
89__inline__ void ip_send_check(struct iphdr *iph)
90{
91 iph->check = 0;
92 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
93}
94
95/* dev_loopback_xmit for use with netfilter. */
96static int ip_dev_loopback_xmit(struct sk_buff *newskb)
97{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -070098 skb_reset_mac_header(newskb);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -030099 __skb_pull(newskb, skb_network_offset(newskb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 newskb->pkt_type = PACKET_LOOPBACK;
101 newskb->ip_summed = CHECKSUM_UNNECESSARY;
102 BUG_TRAP(newskb->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 netif_rx(newskb);
104 return 0;
105}
106
107static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
108{
109 int ttl = inet->uc_ttl;
110
111 if (ttl < 0)
112 ttl = dst_metric(dst, RTAX_HOPLIMIT);
113 return ttl;
114}
115
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900116/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 * Add an ip header to a skbuff and send it out.
118 *
119 */
120int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
Al Viro13d8eaa2006-09-26 22:27:30 -0700121 __be32 saddr, __be32 daddr, struct ip_options *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 struct inet_sock *inet = inet_sk(sk);
124 struct rtable *rt = (struct rtable *)skb->dst;
125 struct iphdr *iph;
126
127 /* Build the IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300128 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
129 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700130 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 iph->version = 4;
132 iph->ihl = 5;
133 iph->tos = inet->tos;
134 if (ip_dont_fragment(sk, &rt->u.dst))
135 iph->frag_off = htons(IP_DF);
136 else
137 iph->frag_off = 0;
138 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
139 iph->daddr = rt->rt_dst;
140 iph->saddr = rt->rt_src;
141 iph->protocol = sk->sk_protocol;
142 iph->tot_len = htons(skb->len);
143 ip_select_ident(iph, &rt->u.dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145 if (opt && opt->optlen) {
146 iph->ihl += opt->optlen>>2;
147 ip_options_build(skb, opt, daddr, rt, 0);
148 }
149 ip_send_check(iph);
150
151 skb->priority = sk->sk_priority;
152
153 /* Send it out. */
154 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
155 dst_output);
156}
157
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -0700158EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static inline int ip_finish_output2(struct sk_buff *skb)
161{
162 struct dst_entry *dst = skb->dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 struct net_device *dev = dst->dev;
164 int hh_len = LL_RESERVED_SPACE(dev);
165
166 /* Be paranoid, rather than too clever. */
167 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
168 struct sk_buff *skb2;
169
170 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
171 if (skb2 == NULL) {
172 kfree_skb(skb);
173 return -ENOMEM;
174 }
175 if (skb->sk)
176 skb_set_owner_w(skb2, skb->sk);
177 kfree_skb(skb);
178 skb = skb2;
179 }
180
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800181 if (dst->hh)
182 return neigh_hh_output(dst->hh, skb);
183 else if (dst->neighbour)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 return dst->neighbour->output(skb);
185
186 if (net_ratelimit())
187 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
188 kfree_skb(skb);
189 return -EINVAL;
190}
191
Thomas Graf33d043d2005-08-20 17:27:34 -0700192static inline int ip_finish_output(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Patrick McHardy5c901da2006-01-06 23:05:36 -0800194#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
195 /* Policy lookup after SNAT yielded a new policy */
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800196 if (skb->dst->xfrm != NULL) {
197 IPCB(skb)->flags |= IPSKB_REROUTED;
198 return dst_output(skb);
199 }
Patrick McHardy5c901da2006-01-06 23:05:36 -0800200#endif
Herbert Xu89114af2006-07-08 13:34:32 -0700201 if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800202 return ip_fragment(skb, ip_finish_output2);
203 else
204 return ip_finish_output2(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
206
207int ip_mc_output(struct sk_buff *skb)
208{
209 struct sock *sk = skb->sk;
210 struct rtable *rt = (struct rtable*)skb->dst;
211 struct net_device *dev = rt->u.dst.dev;
212
213 /*
214 * If the indicated interface is up and running, send the packet.
215 */
216 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
217
218 skb->dev = dev;
219 skb->protocol = htons(ETH_P_IP);
220
221 /*
222 * Multicasts are looped back for other local users
223 */
224
225 if (rt->rt_flags&RTCF_MULTICAST) {
226 if ((!sk || inet_sk(sk)->mc_loop)
227#ifdef CONFIG_IP_MROUTE
228 /* Small optimization: do not loopback not local frames,
229 which returned after forwarding; they will be dropped
230 by ip_mr_input in any case.
231 Note, that local frames are looped back to be delivered
232 to local recipients.
233
234 This check is duplicated in ip_mr_input at the moment.
235 */
236 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
237#endif
238 ) {
239 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
240 if (newskb)
241 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900242 newskb->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 ip_dev_loopback_xmit);
244 }
245
246 /* Multicasts with ttl 0 must not go beyond the host */
247
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700248 if (ip_hdr(skb)->ttl == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 kfree_skb(skb);
250 return 0;
251 }
252 }
253
254 if (rt->rt_flags&RTCF_BROADCAST) {
255 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
256 if (newskb)
257 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
258 newskb->dev, ip_dev_loopback_xmit);
259 }
260
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800261 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
262 ip_finish_output,
263 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266int ip_output(struct sk_buff *skb)
267{
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800268 struct net_device *dev = skb->dst->dev;
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
271
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800272 skb->dev = dev;
273 skb->protocol = htons(ETH_P_IP);
274
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800275 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900276 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800277 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
279
David S. Millere89862f2007-01-26 01:04:55 -0800280int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
David S. Millere89862f2007-01-26 01:04:55 -0800282 struct sock *sk = skb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 struct inet_sock *inet = inet_sk(sk);
284 struct ip_options *opt = inet->opt;
285 struct rtable *rt;
286 struct iphdr *iph;
287
288 /* Skip all of this if the packet is already routed,
289 * f.e. by something like SCTP.
290 */
291 rt = (struct rtable *) skb->dst;
292 if (rt != NULL)
293 goto packet_routed;
294
295 /* Make sure we can route this packet. */
296 rt = (struct rtable *)__sk_dst_check(sk, 0);
297 if (rt == NULL) {
Al Viro3ca3c682006-09-27 18:28:07 -0700298 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
300 /* Use correct destination address if we have options. */
301 daddr = inet->daddr;
302 if(opt && opt->srr)
303 daddr = opt->faddr;
304
305 {
306 struct flowi fl = { .oif = sk->sk_bound_dev_if,
307 .nl_u = { .ip4_u =
308 { .daddr = daddr,
309 .saddr = inet->saddr,
310 .tos = RT_CONN_FLAGS(sk) } },
311 .proto = sk->sk_protocol,
312 .uli_u = { .ports =
313 { .sport = inet->sport,
314 .dport = inet->dport } } };
315
316 /* If this fails, retransmit mechanism of transport layer will
317 * keep trying until route appears or the connection times
318 * itself out.
319 */
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700320 security_sk_classify_flow(sk, &fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 if (ip_route_output_flow(&rt, &fl, sk, 0))
322 goto no_route;
323 }
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -0700324 sk_setup_caps(sk, &rt->u.dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 }
326 skb->dst = dst_clone(&rt->u.dst);
327
328packet_routed:
329 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
330 goto no_route;
331
332 /* OK, we know where to send it, allocate and build IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300333 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
334 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700335 iph = ip_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800336 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 iph->tot_len = htons(skb->len);
338 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
339 iph->frag_off = htons(IP_DF);
340 else
341 iph->frag_off = 0;
342 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
343 iph->protocol = sk->sk_protocol;
344 iph->saddr = rt->rt_src;
345 iph->daddr = rt->rt_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 /* Transport layer set skb->h.foo itself. */
347
348 if (opt && opt->optlen) {
349 iph->ihl += opt->optlen >> 2;
350 ip_options_build(skb, opt, inet->daddr, rt, 0);
351 }
352
Herbert Xu89f5f0a2005-11-08 09:41:56 -0800353 ip_select_ident_more(iph, &rt->u.dst, sk,
Herbert Xu79671682006-06-22 02:40:14 -0700354 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
356 /* Add an IP checksum. */
357 ip_send_check(iph);
358
359 skb->priority = sk->sk_priority;
360
361 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
362 dst_output);
363
364no_route:
365 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
366 kfree_skb(skb);
367 return -EHOSTUNREACH;
368}
369
370
371static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
372{
373 to->pkt_type = from->pkt_type;
374 to->priority = from->priority;
375 to->protocol = from->protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 dst_release(to->dst);
377 to->dst = dst_clone(from->dst);
378 to->dev = from->dev;
Thomas Graf82e91ff2006-11-09 15:19:14 -0800379 to->mark = from->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
381 /* Copy the flags to each fragment. */
382 IPCB(to)->flags = IPCB(from)->flags;
383
384#ifdef CONFIG_NET_SCHED
385 to->tc_index = from->tc_index;
386#endif
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -0700387 nf_copy(to, from);
Julian Anastasovc98d80e2005-10-22 13:39:21 +0300388#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
389 to->ipvs_property = from->ipvs_property;
390#endif
James Morris984bc162006-06-09 00:29:17 -0700391 skb_copy_secmark(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392}
393
394/*
395 * This IP datagram is too large to be sent in one piece. Break it up into
396 * smaller pieces (each of size equal to IP header plus
397 * a block of the data of the original IP data part) that will yet fit in a
398 * single device frame, and queue such a frame for sending.
399 */
400
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700401int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402{
403 struct iphdr *iph;
404 int raw = 0;
405 int ptr;
406 struct net_device *dev;
407 struct sk_buff *skb2;
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700408 unsigned int mtu, hlen, left, len, ll_rs, pad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 int offset;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -0800410 __be16 not_last_frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 struct rtable *rt = (struct rtable*)skb->dst;
412 int err = 0;
413
414 dev = rt->u.dst.dev;
415
416 /*
417 * Point into the IP datagram header.
418 */
419
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700420 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
422 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
Wei Dong0668b472006-08-31 15:24:48 -0700423 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
425 htonl(dst_mtu(&rt->u.dst)));
426 kfree_skb(skb);
427 return -EMSGSIZE;
428 }
429
430 /*
431 * Setup starting values.
432 */
433
434 hlen = iph->ihl * 4;
435 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
Herbert Xu89cee8b2005-12-13 23:14:27 -0800436 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 /* When frag_list is given, use it. First, check its validity:
439 * some transformers could create wrong frag_list or break existing
440 * one, it is not prohibited. In this case fall back to copying.
441 *
442 * LATER: this step can be merged to real generation of fragments,
443 * we can switch to copy when see the first bad fragment.
444 */
445 if (skb_shinfo(skb)->frag_list) {
446 struct sk_buff *frag;
447 int first_len = skb_pagelen(skb);
448
449 if (first_len - hlen > mtu ||
450 ((first_len - hlen) & 7) ||
451 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
452 skb_cloned(skb))
453 goto slow_path;
454
455 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
456 /* Correct geometry. */
457 if (frag->len > mtu ||
458 ((frag->len & 7) && frag->next) ||
459 skb_headroom(frag) < hlen)
460 goto slow_path;
461
462 /* Partially cloned skb? */
463 if (skb_shared(frag))
464 goto slow_path;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700465
466 BUG_ON(frag->sk);
467 if (skb->sk) {
468 sock_hold(skb->sk);
469 frag->sk = skb->sk;
470 frag->destructor = sock_wfree;
471 skb->truesize -= frag->truesize;
472 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 }
474
475 /* Everything is OK. Generate! */
476
477 err = 0;
478 offset = 0;
479 frag = skb_shinfo(skb)->frag_list;
480 skb_shinfo(skb)->frag_list = NULL;
481 skb->data_len = first_len - skb_headlen(skb);
482 skb->len = first_len;
483 iph->tot_len = htons(first_len);
484 iph->frag_off = htons(IP_MF);
485 ip_send_check(iph);
486
487 for (;;) {
488 /* Prepare header of the next frame,
489 * before previous one went down. */
490 if (frag) {
491 frag->ip_summed = CHECKSUM_NONE;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300492 skb_reset_transport_header(frag);
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700493 __skb_push(frag, hlen);
494 skb_reset_network_header(frag);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700495 memcpy(skb_network_header(frag), iph, hlen);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700496 iph = ip_hdr(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 iph->tot_len = htons(frag->len);
498 ip_copy_metadata(frag, skb);
499 if (offset == 0)
500 ip_options_fragment(frag);
501 offset += skb->len - hlen;
502 iph->frag_off = htons(offset>>3);
503 if (frag->next != NULL)
504 iph->frag_off |= htons(IP_MF);
505 /* Ready, complete checksum */
506 ip_send_check(iph);
507 }
508
509 err = output(skb);
510
Wei Dongdafee492006-08-02 13:41:21 -0700511 if (!err)
512 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 if (err || !frag)
514 break;
515
516 skb = frag;
517 frag = skb->next;
518 skb->next = NULL;
519 }
520
521 if (err == 0) {
522 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
523 return 0;
524 }
525
526 while (frag) {
527 skb = frag->next;
528 kfree_skb(frag);
529 frag = skb;
530 }
531 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
532 return err;
533 }
534
535slow_path:
536 left = skb->len - hlen; /* Space per frame */
537 ptr = raw + hlen; /* Where to start from */
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700540 * we need to make room for the encapsulating header
541 */
542 pad = nf_bridge_pad(skb);
543 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
544 mtu -= pad;
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 /*
547 * Fragment the datagram.
548 */
549
550 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
551 not_last_frag = iph->frag_off & htons(IP_MF);
552
553 /*
554 * Keep copying data until we run out.
555 */
556
Stephen Hemminger132adf52007-03-08 20:44:43 -0800557 while (left > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 len = left;
559 /* IF: it doesn't fit, use 'mtu' - the data space left */
560 if (len > mtu)
561 len = mtu;
562 /* IF: we are not sending upto and including the packet end
563 then align the next start on an eight byte boundary */
564 if (len < left) {
565 len &= ~7;
566 }
567 /*
568 * Allocate buffer.
569 */
570
571 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
Patrick McHardy64ce2072005-08-09 20:50:53 -0700572 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 err = -ENOMEM;
574 goto fail;
575 }
576
577 /*
578 * Set up data on packet
579 */
580
581 ip_copy_metadata(skb2, skb);
582 skb_reserve(skb2, ll_rs);
583 skb_put(skb2, len + hlen);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700584 skb_reset_network_header(skb2);
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -0300585 skb2->h.raw = skb2->nh.raw + hlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587 /*
588 * Charge the memory for the fragment to any owner
589 * it might possess
590 */
591
592 if (skb->sk)
593 skb_set_owner_w(skb2, skb->sk);
594
595 /*
596 * Copy the packet header into the new buffer.
597 */
598
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700599 memcpy(skb_network_header(skb2), skb->data, hlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601 /*
602 * Copy a block of the IP datagram.
603 */
604 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
605 BUG();
606 left -= len;
607
608 /*
609 * Fill in the new header fields.
610 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700611 iph = ip_hdr(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 iph->frag_off = htons((offset >> 3));
613
614 /* ANK: dirty, but effective trick. Upgrade options only if
615 * the segment to be fragmented was THE FIRST (otherwise,
616 * options are already fixed) and make it ONCE
617 * on the initial skb, so that all the following fragments
618 * will inherit fixed options.
619 */
620 if (offset == 0)
621 ip_options_fragment(skb);
622
623 /*
624 * Added AC : If we are fragmenting a fragment that's not the
625 * last fragment then keep MF on each bit
626 */
627 if (left > 0 || not_last_frag)
628 iph->frag_off |= htons(IP_MF);
629 ptr += len;
630 offset += len;
631
632 /*
633 * Put this fragment into the sending queue.
634 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 iph->tot_len = htons(len + hlen);
636
637 ip_send_check(iph);
638
639 err = output(skb2);
640 if (err)
641 goto fail;
Wei Dongdafee492006-08-02 13:41:21 -0700642
643 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 }
645 kfree_skb(skb);
646 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
647 return err;
648
649fail:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900650 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
652 return err;
653}
654
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700655EXPORT_SYMBOL(ip_fragment);
656
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657int
658ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
659{
660 struct iovec *iov = from;
661
Patrick McHardy84fa7932006-08-29 16:44:56 -0700662 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
664 return -EFAULT;
665 } else {
Al Viro44bb9362006-11-14 21:36:14 -0800666 __wsum csum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
668 return -EFAULT;
669 skb->csum = csum_block_add(skb->csum, csum, odd);
670 }
671 return 0;
672}
673
Al Viro44bb9362006-11-14 21:36:14 -0800674static inline __wsum
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675csum_page(struct page *page, int offset, int copy)
676{
677 char *kaddr;
Al Viro44bb9362006-11-14 21:36:14 -0800678 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 kaddr = kmap(page);
680 csum = csum_partial(kaddr + offset, copy, 0);
681 kunmap(page);
682 return csum;
683}
684
Adrian Bunk4b30b1c2005-11-29 16:27:20 -0800685static inline int ip_ufo_append_data(struct sock *sk,
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700686 int getfrag(void *from, char *to, int offset, int len,
687 int odd, struct sk_buff *skb),
688 void *from, int length, int hh_len, int fragheaderlen,
689 int transhdrlen, int mtu,unsigned int flags)
690{
691 struct sk_buff *skb;
692 int err;
693
694 /* There is support for UDP fragmentation offload by network
695 * device, so create one single skb packet containing complete
696 * udp datagram
697 */
698 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
699 skb = sock_alloc_send_skb(sk,
700 hh_len + fragheaderlen + transhdrlen + 20,
701 (flags & MSG_DONTWAIT), &err);
702
703 if (skb == NULL)
704 return err;
705
706 /* reserve space for Hardware header */
707 skb_reserve(skb, hh_len);
708
709 /* create space for UDP/IP header */
710 skb_put(skb,fragheaderlen + transhdrlen);
711
712 /* initialize network header pointer */
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700713 skb_reset_network_header(skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700714
715 /* initialize protocol header pointer */
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -0300716 skb->h.raw = skb->nh.raw + fragheaderlen;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700717
Patrick McHardy84fa7932006-08-29 16:44:56 -0700718 skb->ip_summed = CHECKSUM_PARTIAL;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700719 skb->csum = 0;
720 sk->sk_sndmsg_off = 0;
721 }
722
723 err = skb_append_datato_frags(sk,skb, getfrag, from,
724 (length - transhdrlen));
725 if (!err) {
726 /* specify the length of each IP datagram fragment*/
Herbert Xu79671682006-06-22 02:40:14 -0700727 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700728 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700729 __skb_queue_tail(&sk->sk_write_queue, skb);
730
731 return 0;
732 }
733 /* There is not enough support do UFO ,
734 * so follow normal path
735 */
736 kfree_skb(skb);
737 return err;
738}
739
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740/*
741 * ip_append_data() and ip_append_page() can make one large IP datagram
742 * from many pieces of data. Each pieces will be holded on the socket
743 * until ip_push_pending_frames() is called. Each piece can be a page
744 * or non-page data.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900745 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 * Not only UDP, other transport protocols - e.g. raw sockets - can use
747 * this interface potentially.
748 *
749 * LATER: length must be adjusted by pad at tail, when it is required.
750 */
751int ip_append_data(struct sock *sk,
752 int getfrag(void *from, char *to, int offset, int len,
753 int odd, struct sk_buff *skb),
754 void *from, int length, int transhdrlen,
755 struct ipcm_cookie *ipc, struct rtable *rt,
756 unsigned int flags)
757{
758 struct inet_sock *inet = inet_sk(sk);
759 struct sk_buff *skb;
760
761 struct ip_options *opt = NULL;
762 int hh_len;
763 int exthdrlen;
764 int mtu;
765 int copy;
766 int err;
767 int offset = 0;
768 unsigned int maxfraglen, fragheaderlen;
769 int csummode = CHECKSUM_NONE;
770
771 if (flags&MSG_PROBE)
772 return 0;
773
774 if (skb_queue_empty(&sk->sk_write_queue)) {
775 /*
776 * setup for corking.
777 */
778 opt = ipc->opt;
779 if (opt) {
780 if (inet->cork.opt == NULL) {
781 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
782 if (unlikely(inet->cork.opt == NULL))
783 return -ENOBUFS;
784 }
785 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
786 inet->cork.flags |= IPCORK_OPT;
787 inet->cork.addr = ipc->addr;
788 }
789 dst_hold(&rt->u.dst);
790 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
791 inet->cork.rt = rt;
792 inet->cork.length = 0;
793 sk->sk_sndmsg_page = NULL;
794 sk->sk_sndmsg_off = 0;
795 if ((exthdrlen = rt->u.dst.header_len) != 0) {
796 length += exthdrlen;
797 transhdrlen += exthdrlen;
798 }
799 } else {
800 rt = inet->cork.rt;
801 if (inet->cork.flags & IPCORK_OPT)
802 opt = inet->cork.opt;
803
804 transhdrlen = 0;
805 exthdrlen = 0;
806 mtu = inet->cork.fragsize;
807 }
808 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
809
810 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
811 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
812
813 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
814 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
815 return -EMSGSIZE;
816 }
817
818 /*
819 * transhdrlen > 0 means that this is the first fragment and we wish
820 * it won't be fragmented in the future.
821 */
822 if (transhdrlen &&
823 length + fragheaderlen <= mtu &&
Herbert Xu8648b302006-06-17 22:06:05 -0700824 rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 !exthdrlen)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700826 csummode = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
828 inet->cork.length += length;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700829 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
830 (rt->u.dst.dev->features & NETIF_F_UFO)) {
831
Patrick McHardybaa829d2006-03-12 20:35:12 -0800832 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
833 fragheaderlen, transhdrlen, mtu,
834 flags);
835 if (err)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700836 goto error;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700837 return 0;
838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
840 /* So, what's going on in the loop below?
841 *
842 * We use calculated fragment length to generate chained skb,
843 * each of segments is IP fragment ready for sending to network after
844 * adding appropriate IP header.
845 */
846
847 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
848 goto alloc_new_skb;
849
850 while (length > 0) {
851 /* Check if the remaining data fits into current packet. */
852 copy = mtu - skb->len;
853 if (copy < length)
854 copy = maxfraglen - skb->len;
855 if (copy <= 0) {
856 char *data;
857 unsigned int datalen;
858 unsigned int fraglen;
859 unsigned int fraggap;
860 unsigned int alloclen;
861 struct sk_buff *skb_prev;
862alloc_new_skb:
863 skb_prev = skb;
864 if (skb_prev)
865 fraggap = skb_prev->len - maxfraglen;
866 else
867 fraggap = 0;
868
869 /*
870 * If remaining data exceeds the mtu,
871 * we know we need more fragment(s).
872 */
873 datalen = length + fraggap;
874 if (datalen > mtu - fragheaderlen)
875 datalen = maxfraglen - fragheaderlen;
876 fraglen = datalen + fragheaderlen;
877
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900878 if ((flags & MSG_MORE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 !(rt->u.dst.dev->features&NETIF_F_SG))
880 alloclen = mtu;
881 else
882 alloclen = datalen + fragheaderlen;
883
884 /* The last fragment gets additional space at tail.
885 * Note, with MSG_MORE we overallocate on fragments,
886 * because we have no idea what fragment will be
887 * the last.
888 */
Zach Brown3d9dd752006-04-14 16:04:18 -0700889 if (datalen == length + fraggap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 alloclen += rt->u.dst.trailer_len;
891
892 if (transhdrlen) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900893 skb = sock_alloc_send_skb(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 alloclen + hh_len + 15,
895 (flags & MSG_DONTWAIT), &err);
896 } else {
897 skb = NULL;
898 if (atomic_read(&sk->sk_wmem_alloc) <=
899 2 * sk->sk_sndbuf)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900900 skb = sock_wmalloc(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 alloclen + hh_len + 15, 1,
902 sk->sk_allocation);
903 if (unlikely(skb == NULL))
904 err = -ENOBUFS;
905 }
906 if (skb == NULL)
907 goto error;
908
909 /*
910 * Fill in the control structures
911 */
912 skb->ip_summed = csummode;
913 skb->csum = 0;
914 skb_reserve(skb, hh_len);
915
916 /*
917 * Find where to start putting bytes.
918 */
919 data = skb_put(skb, fraglen);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300920 skb_set_network_header(skb, exthdrlen);
921 skb->h.raw = skb->nh.raw + fragheaderlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 data += fragheaderlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
924 if (fraggap) {
925 skb->csum = skb_copy_and_csum_bits(
926 skb_prev, maxfraglen,
927 data + transhdrlen, fraggap, 0);
928 skb_prev->csum = csum_sub(skb_prev->csum,
929 skb->csum);
930 data += fraggap;
Herbert Xue9fa4f72006-08-13 20:12:58 -0700931 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 }
933
934 copy = datalen - transhdrlen - fraggap;
935 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
936 err = -EFAULT;
937 kfree_skb(skb);
938 goto error;
939 }
940
941 offset += copy;
942 length -= datalen - fraggap;
943 transhdrlen = 0;
944 exthdrlen = 0;
945 csummode = CHECKSUM_NONE;
946
947 /*
948 * Put the packet on the pending queue.
949 */
950 __skb_queue_tail(&sk->sk_write_queue, skb);
951 continue;
952 }
953
954 if (copy > length)
955 copy = length;
956
957 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
958 unsigned int off;
959
960 off = skb->len;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900961 if (getfrag(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 offset, copy, off, skb) < 0) {
963 __skb_trim(skb, off);
964 err = -EFAULT;
965 goto error;
966 }
967 } else {
968 int i = skb_shinfo(skb)->nr_frags;
969 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
970 struct page *page = sk->sk_sndmsg_page;
971 int off = sk->sk_sndmsg_off;
972 unsigned int left;
973
974 if (page && (left = PAGE_SIZE - off) > 0) {
975 if (copy >= left)
976 copy = left;
977 if (page != frag->page) {
978 if (i == MAX_SKB_FRAGS) {
979 err = -EMSGSIZE;
980 goto error;
981 }
982 get_page(page);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900983 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 frag = &skb_shinfo(skb)->frags[i];
985 }
986 } else if (i < MAX_SKB_FRAGS) {
987 if (copy > PAGE_SIZE)
988 copy = PAGE_SIZE;
989 page = alloc_pages(sk->sk_allocation, 0);
990 if (page == NULL) {
991 err = -ENOMEM;
992 goto error;
993 }
994 sk->sk_sndmsg_page = page;
995 sk->sk_sndmsg_off = 0;
996
997 skb_fill_page_desc(skb, i, page, 0, 0);
998 frag = &skb_shinfo(skb)->frags[i];
999 skb->truesize += PAGE_SIZE;
1000 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1001 } else {
1002 err = -EMSGSIZE;
1003 goto error;
1004 }
1005 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1006 err = -EFAULT;
1007 goto error;
1008 }
1009 sk->sk_sndmsg_off += copy;
1010 frag->size += copy;
1011 skb->len += copy;
1012 skb->data_len += copy;
1013 }
1014 offset += copy;
1015 length -= copy;
1016 }
1017
1018 return 0;
1019
1020error:
1021 inet->cork.length -= length;
1022 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001023 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024}
1025
1026ssize_t ip_append_page(struct sock *sk, struct page *page,
1027 int offset, size_t size, int flags)
1028{
1029 struct inet_sock *inet = inet_sk(sk);
1030 struct sk_buff *skb;
1031 struct rtable *rt;
1032 struct ip_options *opt = NULL;
1033 int hh_len;
1034 int mtu;
1035 int len;
1036 int err;
1037 unsigned int maxfraglen, fragheaderlen, fraggap;
1038
1039 if (inet->hdrincl)
1040 return -EPERM;
1041
1042 if (flags&MSG_PROBE)
1043 return 0;
1044
1045 if (skb_queue_empty(&sk->sk_write_queue))
1046 return -EINVAL;
1047
1048 rt = inet->cork.rt;
1049 if (inet->cork.flags & IPCORK_OPT)
1050 opt = inet->cork.opt;
1051
1052 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1053 return -EOPNOTSUPP;
1054
1055 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1056 mtu = inet->cork.fragsize;
1057
1058 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1059 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1060
1061 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1062 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1063 return -EMSGSIZE;
1064 }
1065
1066 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1067 return -EINVAL;
1068
1069 inet->cork.length += size;
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001070 if ((sk->sk_protocol == IPPROTO_UDP) &&
Herbert Xu79671682006-06-22 02:40:14 -07001071 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1072 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -07001073 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Herbert Xu79671682006-06-22 02:40:14 -07001074 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001075
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
1077 while (size > 0) {
1078 int i;
1079
Herbert Xu89114af2006-07-08 13:34:32 -07001080 if (skb_is_gso(skb))
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001081 len = size;
1082 else {
1083
1084 /* Check if the remaining data fits into current packet. */
1085 len = mtu - skb->len;
1086 if (len < size)
1087 len = maxfraglen - skb->len;
1088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 if (len <= 0) {
1090 struct sk_buff *skb_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 int alloclen;
1092
1093 skb_prev = skb;
Jayachandran C0d0d2bb2005-10-13 11:43:02 -07001094 fraggap = skb_prev->len - maxfraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
1096 alloclen = fragheaderlen + hh_len + fraggap + 15;
1097 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1098 if (unlikely(!skb)) {
1099 err = -ENOBUFS;
1100 goto error;
1101 }
1102
1103 /*
1104 * Fill in the control structures
1105 */
1106 skb->ip_summed = CHECKSUM_NONE;
1107 skb->csum = 0;
1108 skb_reserve(skb, hh_len);
1109
1110 /*
1111 * Find where to start putting bytes.
1112 */
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001113 skb_put(skb, fragheaderlen + fraggap);
Arnaldo Carvalho de Melo2ca9e6f2007-03-10 19:15:25 -03001114 skb_reset_network_header(skb);
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001115 skb->h.raw = skb->nh.raw + fragheaderlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
1117 if (fraggap) {
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001118 skb->csum = skb_copy_and_csum_bits(skb_prev,
1119 maxfraglen,
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001120 skb_transport_header(skb),
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001121 fraggap, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 skb_prev->csum = csum_sub(skb_prev->csum,
1123 skb->csum);
Herbert Xue9fa4f72006-08-13 20:12:58 -07001124 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 }
1126
1127 /*
1128 * Put the packet on the pending queue.
1129 */
1130 __skb_queue_tail(&sk->sk_write_queue, skb);
1131 continue;
1132 }
1133
1134 i = skb_shinfo(skb)->nr_frags;
1135 if (len > size)
1136 len = size;
1137 if (skb_can_coalesce(skb, i, page, offset)) {
1138 skb_shinfo(skb)->frags[i-1].size += len;
1139 } else if (i < MAX_SKB_FRAGS) {
1140 get_page(page);
1141 skb_fill_page_desc(skb, i, page, offset, len);
1142 } else {
1143 err = -EMSGSIZE;
1144 goto error;
1145 }
1146
1147 if (skb->ip_summed == CHECKSUM_NONE) {
Al Viro44bb9362006-11-14 21:36:14 -08001148 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 csum = csum_page(page, offset, len);
1150 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1151 }
1152
1153 skb->len += len;
1154 skb->data_len += len;
1155 offset += len;
1156 size -= len;
1157 }
1158 return 0;
1159
1160error:
1161 inet->cork.length -= size;
1162 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1163 return err;
1164}
1165
1166/*
1167 * Combined all pending IP fragments on the socket as one IP datagram
1168 * and push them out.
1169 */
1170int ip_push_pending_frames(struct sock *sk)
1171{
1172 struct sk_buff *skb, *tmp_skb;
1173 struct sk_buff **tail_skb;
1174 struct inet_sock *inet = inet_sk(sk);
1175 struct ip_options *opt = NULL;
1176 struct rtable *rt = inet->cork.rt;
1177 struct iphdr *iph;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -08001178 __be16 df = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 __u8 ttl;
1180 int err = 0;
1181
1182 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1183 goto out;
1184 tail_skb = &(skb_shinfo(skb)->frag_list);
1185
1186 /* move skb->data to ip header from ext header */
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001187 if (skb->data < skb_network_header(skb))
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001188 __skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1190 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1191 *tail_skb = tmp_skb;
1192 tail_skb = &(tmp_skb->next);
1193 skb->len += tmp_skb->len;
1194 skb->data_len += tmp_skb->len;
1195 skb->truesize += tmp_skb->truesize;
1196 __sock_put(tmp_skb->sk);
1197 tmp_skb->destructor = NULL;
1198 tmp_skb->sk = NULL;
1199 }
1200
1201 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1202 * to fragment the frame generated here. No matter, what transforms
1203 * how transforms change size of the packet, it will come out.
1204 */
1205 if (inet->pmtudisc != IP_PMTUDISC_DO)
1206 skb->local_df = 1;
1207
1208 /* DF bit is set when we want to see DF on outgoing frames.
1209 * If local_df is set too, we still allow to fragment this frame
1210 * locally. */
1211 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1212 (skb->len <= dst_mtu(&rt->u.dst) &&
1213 ip_dont_fragment(sk, &rt->u.dst)))
1214 df = htons(IP_DF);
1215
1216 if (inet->cork.flags & IPCORK_OPT)
1217 opt = inet->cork.opt;
1218
1219 if (rt->rt_type == RTN_MULTICAST)
1220 ttl = inet->mc_ttl;
1221 else
1222 ttl = ip_select_ttl(inet, &rt->u.dst);
1223
1224 iph = (struct iphdr *)skb->data;
1225 iph->version = 4;
1226 iph->ihl = 5;
1227 if (opt) {
1228 iph->ihl += opt->optlen>>2;
1229 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1230 }
1231 iph->tos = inet->tos;
1232 iph->tot_len = htons(skb->len);
1233 iph->frag_off = df;
Alexey Kuznetsov1a55d572006-03-22 14:27:59 -08001234 ip_select_ident(iph, &rt->u.dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 iph->ttl = ttl;
1236 iph->protocol = sk->sk_protocol;
1237 iph->saddr = rt->rt_src;
1238 iph->daddr = rt->rt_dst;
1239 ip_send_check(iph);
1240
1241 skb->priority = sk->sk_priority;
1242 skb->dst = dst_clone(&rt->u.dst);
1243
1244 /* Netfilter gets whole the not fragmented skb. */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001245 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 skb->dst->dev, dst_output);
1247 if (err) {
1248 if (err > 0)
1249 err = inet->recverr ? net_xmit_errno(err) : 0;
1250 if (err)
1251 goto error;
1252 }
1253
1254out:
1255 inet->cork.flags &= ~IPCORK_OPT;
Jesper Juhla51482b2005-11-08 09:41:34 -08001256 kfree(inet->cork.opt);
1257 inet->cork.opt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 if (inet->cork.rt) {
1259 ip_rt_put(inet->cork.rt);
1260 inet->cork.rt = NULL;
1261 }
1262 return err;
1263
1264error:
1265 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1266 goto out;
1267}
1268
1269/*
1270 * Throw away all pending data on the socket.
1271 */
1272void ip_flush_pending_frames(struct sock *sk)
1273{
1274 struct inet_sock *inet = inet_sk(sk);
1275 struct sk_buff *skb;
1276
1277 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1278 kfree_skb(skb);
1279
1280 inet->cork.flags &= ~IPCORK_OPT;
Jesper Juhla51482b2005-11-08 09:41:34 -08001281 kfree(inet->cork.opt);
1282 inet->cork.opt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 if (inet->cork.rt) {
1284 ip_rt_put(inet->cork.rt);
1285 inet->cork.rt = NULL;
1286 }
1287}
1288
1289
1290/*
1291 * Fetch data from kernel space and fill in checksum if needed.
1292 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001293static int ip_reply_glue_bits(void *dptr, char *to, int offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 int len, int odd, struct sk_buff *skb)
1295{
Al Viro50842052006-11-14 21:36:34 -08001296 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
1298 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1299 skb->csum = csum_block_add(skb->csum, csum, odd);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001300 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301}
1302
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001303/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 * Generic function to send a packet as reply to another packet.
1305 * Used to send TCP resets so far. ICMP should use this function too.
1306 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001307 * Should run single threaded per socket because it uses the sock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 * structure to pass arguments.
1309 *
1310 * LATER: switch from ip_build_xmit to ip_append_*
1311 */
1312void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1313 unsigned int len)
1314{
1315 struct inet_sock *inet = inet_sk(sk);
1316 struct {
1317 struct ip_options opt;
1318 char data[40];
1319 } replyopts;
1320 struct ipcm_cookie ipc;
Al Viro3ca3c682006-09-27 18:28:07 -07001321 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 struct rtable *rt = (struct rtable*)skb->dst;
1323
1324 if (ip_options_echo(&replyopts.opt, skb))
1325 return;
1326
1327 daddr = ipc.addr = rt->rt_src;
1328 ipc.opt = NULL;
1329
1330 if (replyopts.opt.optlen) {
1331 ipc.opt = &replyopts.opt;
1332
1333 if (ipc.opt->srr)
1334 daddr = replyopts.opt.faddr;
1335 }
1336
1337 {
1338 struct flowi fl = { .nl_u = { .ip4_u =
1339 { .daddr = daddr,
1340 .saddr = rt->rt_spec_dst,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001341 .tos = RT_TOS(ip_hdr(skb)->tos) } },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 /* Not quite clean, but right. */
1343 .uli_u = { .ports =
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001344 { .sport = tcp_hdr(skb)->dest,
1345 .dport = tcp_hdr(skb)->source } },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 .proto = sk->sk_protocol };
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -07001347 security_skb_classify_flow(skb, &fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 if (ip_route_output_key(&rt, &fl))
1349 return;
1350 }
1351
1352 /* And let IP do all the hard work.
1353
1354 This chunk is not reenterable, hence spinlock.
1355 Note that it uses the fact, that this function is called
1356 with locally disabled BH and that sk cannot be already spinlocked.
1357 */
1358 bh_lock_sock(sk);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001359 inet->tos = ip_hdr(skb)->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 sk->sk_priority = skb->priority;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001361 sk->sk_protocol = ip_hdr(skb)->protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1363 &ipc, rt, MSG_DONTWAIT);
1364 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1365 if (arg->csumoffset >= 0)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001366 *((__sum16 *)skb_transport_header(skb) +
1367 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1368 arg->csum));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 skb->ip_summed = CHECKSUM_NONE;
1370 ip_push_pending_frames(sk);
1371 }
1372
1373 bh_unlock_sock(sk);
1374
1375 ip_rt_put(rt);
1376}
1377
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378void __init ip_init(void)
1379{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 ip_rt_init();
1381 inet_initpeers();
1382
1383#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1384 igmp_mc_proc_init();
1385#endif
1386}
1387
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388EXPORT_SYMBOL(ip_generic_getfrag);
1389EXPORT_SYMBOL(ip_queue_xmit);
1390EXPORT_SYMBOL(ip_send_check);