blob: 534650cad3a82f0b4b65513cb839d1bf6a818451 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
14 * Richard Underwood
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
19 *
20 * See ip_input.c for original log
21 *
22 * Fixes:
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090025 * Bradford Johnson: Fix faulty handling of some frames when
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * no route is found.
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090036 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
43 * datagrams.
44 * Hirokazu Takahashi: sendfile() on UDP works now.
45 */
46
47#include <asm/uaccess.h>
48#include <asm/system.h>
49#include <linux/module.h>
50#include <linux/types.h>
51#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#include <linux/mm.h>
53#include <linux/string.h>
54#include <linux/errno.h>
Al Viroa1f8e7f2006-10-19 16:08:53 -040055#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#include <linux/socket.h>
58#include <linux/sockios.h>
59#include <linux/in.h>
60#include <linux/inet.h>
61#include <linux/netdevice.h>
62#include <linux/etherdevice.h>
63#include <linux/proc_fs.h>
64#include <linux/stat.h>
65#include <linux/init.h>
66
67#include <net/snmp.h>
68#include <net/ip.h>
69#include <net/protocol.h>
70#include <net/route.h>
Patrick McHardycfacb052006-01-08 22:36:54 -080071#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <net/arp.h>
75#include <net/icmp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <net/checksum.h>
77#include <net/inetpeer.h>
78#include <net/checksum.h>
79#include <linux/igmp.h>
80#include <linux/netfilter_ipv4.h>
81#include <linux/netfilter_bridge.h>
82#include <linux/mroute.h>
83#include <linux/netlink.h>
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -070084#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Brian Haleyab32ea52006-09-22 14:15:41 -070086int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
88/* Generate a checksum for an outgoing IP datagram. */
89__inline__ void ip_send_check(struct iphdr *iph)
90{
91 iph->check = 0;
92 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
93}
94
95/* dev_loopback_xmit for use with netfilter. */
96static int ip_dev_loopback_xmit(struct sk_buff *newskb)
97{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -070098 skb_reset_mac_header(newskb);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -030099 __skb_pull(newskb, skb_network_offset(newskb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 newskb->pkt_type = PACKET_LOOPBACK;
101 newskb->ip_summed = CHECKSUM_UNNECESSARY;
102 BUG_TRAP(newskb->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 netif_rx(newskb);
104 return 0;
105}
106
107static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
108{
109 int ttl = inet->uc_ttl;
110
111 if (ttl < 0)
112 ttl = dst_metric(dst, RTAX_HOPLIMIT);
113 return ttl;
114}
115
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900116/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 * Add an ip header to a skbuff and send it out.
118 *
119 */
120int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
Al Viro13d8eaa2006-09-26 22:27:30 -0700121 __be32 saddr, __be32 daddr, struct ip_options *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 struct inet_sock *inet = inet_sk(sk);
124 struct rtable *rt = (struct rtable *)skb->dst;
125 struct iphdr *iph;
126
127 /* Build the IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300128 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
129 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700130 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 iph->version = 4;
132 iph->ihl = 5;
133 iph->tos = inet->tos;
134 if (ip_dont_fragment(sk, &rt->u.dst))
135 iph->frag_off = htons(IP_DF);
136 else
137 iph->frag_off = 0;
138 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
139 iph->daddr = rt->rt_dst;
140 iph->saddr = rt->rt_src;
141 iph->protocol = sk->sk_protocol;
142 iph->tot_len = htons(skb->len);
143 ip_select_ident(iph, &rt->u.dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145 if (opt && opt->optlen) {
146 iph->ihl += opt->optlen>>2;
147 ip_options_build(skb, opt, daddr, rt, 0);
148 }
149 ip_send_check(iph);
150
151 skb->priority = sk->sk_priority;
152
153 /* Send it out. */
154 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
155 dst_output);
156}
157
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -0700158EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static inline int ip_finish_output2(struct sk_buff *skb)
161{
162 struct dst_entry *dst = skb->dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 struct net_device *dev = dst->dev;
164 int hh_len = LL_RESERVED_SPACE(dev);
165
166 /* Be paranoid, rather than too clever. */
167 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
168 struct sk_buff *skb2;
169
170 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
171 if (skb2 == NULL) {
172 kfree_skb(skb);
173 return -ENOMEM;
174 }
175 if (skb->sk)
176 skb_set_owner_w(skb2, skb->sk);
177 kfree_skb(skb);
178 skb = skb2;
179 }
180
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800181 if (dst->hh)
182 return neigh_hh_output(dst->hh, skb);
183 else if (dst->neighbour)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 return dst->neighbour->output(skb);
185
186 if (net_ratelimit())
187 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
188 kfree_skb(skb);
189 return -EINVAL;
190}
191
John Heffner628a5c52007-04-20 15:53:27 -0700192static inline int ip_skb_dst_mtu(struct sk_buff *skb)
193{
194 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
195
196 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
197 skb->dst->dev->mtu : dst_mtu(skb->dst);
198}
199
Thomas Graf33d043d2005-08-20 17:27:34 -0700200static inline int ip_finish_output(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
Patrick McHardy5c901da2006-01-06 23:05:36 -0800202#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
203 /* Policy lookup after SNAT yielded a new policy */
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800204 if (skb->dst->xfrm != NULL) {
205 IPCB(skb)->flags |= IPSKB_REROUTED;
206 return dst_output(skb);
207 }
Patrick McHardy5c901da2006-01-06 23:05:36 -0800208#endif
John Heffner628a5c52007-04-20 15:53:27 -0700209 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800210 return ip_fragment(skb, ip_finish_output2);
211 else
212 return ip_finish_output2(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213}
214
215int ip_mc_output(struct sk_buff *skb)
216{
217 struct sock *sk = skb->sk;
218 struct rtable *rt = (struct rtable*)skb->dst;
219 struct net_device *dev = rt->u.dst.dev;
220
221 /*
222 * If the indicated interface is up and running, send the packet.
223 */
224 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
225
226 skb->dev = dev;
227 skb->protocol = htons(ETH_P_IP);
228
229 /*
230 * Multicasts are looped back for other local users
231 */
232
233 if (rt->rt_flags&RTCF_MULTICAST) {
234 if ((!sk || inet_sk(sk)->mc_loop)
235#ifdef CONFIG_IP_MROUTE
236 /* Small optimization: do not loopback not local frames,
237 which returned after forwarding; they will be dropped
238 by ip_mr_input in any case.
239 Note, that local frames are looped back to be delivered
240 to local recipients.
241
242 This check is duplicated in ip_mr_input at the moment.
243 */
244 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
245#endif
246 ) {
247 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
248 if (newskb)
249 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900250 newskb->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 ip_dev_loopback_xmit);
252 }
253
254 /* Multicasts with ttl 0 must not go beyond the host */
255
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700256 if (ip_hdr(skb)->ttl == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 kfree_skb(skb);
258 return 0;
259 }
260 }
261
262 if (rt->rt_flags&RTCF_BROADCAST) {
263 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
264 if (newskb)
265 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
266 newskb->dev, ip_dev_loopback_xmit);
267 }
268
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800269 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
270 ip_finish_output,
271 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
273
274int ip_output(struct sk_buff *skb)
275{
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800276 struct net_device *dev = skb->dst->dev;
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
279
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800280 skb->dev = dev;
281 skb->protocol = htons(ETH_P_IP);
282
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800283 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900284 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800285 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
David S. Millere89862f2007-01-26 01:04:55 -0800288int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289{
David S. Millere89862f2007-01-26 01:04:55 -0800290 struct sock *sk = skb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 struct inet_sock *inet = inet_sk(sk);
292 struct ip_options *opt = inet->opt;
293 struct rtable *rt;
294 struct iphdr *iph;
295
296 /* Skip all of this if the packet is already routed,
297 * f.e. by something like SCTP.
298 */
299 rt = (struct rtable *) skb->dst;
300 if (rt != NULL)
301 goto packet_routed;
302
303 /* Make sure we can route this packet. */
304 rt = (struct rtable *)__sk_dst_check(sk, 0);
305 if (rt == NULL) {
Al Viro3ca3c682006-09-27 18:28:07 -0700306 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
308 /* Use correct destination address if we have options. */
309 daddr = inet->daddr;
310 if(opt && opt->srr)
311 daddr = opt->faddr;
312
313 {
314 struct flowi fl = { .oif = sk->sk_bound_dev_if,
315 .nl_u = { .ip4_u =
316 { .daddr = daddr,
317 .saddr = inet->saddr,
318 .tos = RT_CONN_FLAGS(sk) } },
319 .proto = sk->sk_protocol,
320 .uli_u = { .ports =
321 { .sport = inet->sport,
322 .dport = inet->dport } } };
323
324 /* If this fails, retransmit mechanism of transport layer will
325 * keep trying until route appears or the connection times
326 * itself out.
327 */
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700328 security_sk_classify_flow(sk, &fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 if (ip_route_output_flow(&rt, &fl, sk, 0))
330 goto no_route;
331 }
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -0700332 sk_setup_caps(sk, &rt->u.dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 }
334 skb->dst = dst_clone(&rt->u.dst);
335
336packet_routed:
337 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
338 goto no_route;
339
340 /* OK, we know where to send it, allocate and build IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300341 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
342 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700343 iph = ip_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800344 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 iph->tot_len = htons(skb->len);
346 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
347 iph->frag_off = htons(IP_DF);
348 else
349 iph->frag_off = 0;
350 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
351 iph->protocol = sk->sk_protocol;
352 iph->saddr = rt->rt_src;
353 iph->daddr = rt->rt_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 /* Transport layer set skb->h.foo itself. */
355
356 if (opt && opt->optlen) {
357 iph->ihl += opt->optlen >> 2;
358 ip_options_build(skb, opt, inet->daddr, rt, 0);
359 }
360
Herbert Xu89f5f0a2005-11-08 09:41:56 -0800361 ip_select_ident_more(iph, &rt->u.dst, sk,
Herbert Xu79671682006-06-22 02:40:14 -0700362 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 /* Add an IP checksum. */
365 ip_send_check(iph);
366
367 skb->priority = sk->sk_priority;
368
369 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
370 dst_output);
371
372no_route:
373 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
374 kfree_skb(skb);
375 return -EHOSTUNREACH;
376}
377
378
379static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
380{
381 to->pkt_type = from->pkt_type;
382 to->priority = from->priority;
383 to->protocol = from->protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 dst_release(to->dst);
385 to->dst = dst_clone(from->dst);
386 to->dev = from->dev;
Thomas Graf82e91ff2006-11-09 15:19:14 -0800387 to->mark = from->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 /* Copy the flags to each fragment. */
390 IPCB(to)->flags = IPCB(from)->flags;
391
392#ifdef CONFIG_NET_SCHED
393 to->tc_index = from->tc_index;
394#endif
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -0700395 nf_copy(to, from);
Julian Anastasovc98d80e2005-10-22 13:39:21 +0300396#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
397 to->ipvs_property = from->ipvs_property;
398#endif
James Morris984bc162006-06-09 00:29:17 -0700399 skb_copy_secmark(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
401
402/*
403 * This IP datagram is too large to be sent in one piece. Break it up into
404 * smaller pieces (each of size equal to IP header plus
405 * a block of the data of the original IP data part) that will yet fit in a
406 * single device frame, and queue such a frame for sending.
407 */
408
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700409int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410{
411 struct iphdr *iph;
412 int raw = 0;
413 int ptr;
414 struct net_device *dev;
415 struct sk_buff *skb2;
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700416 unsigned int mtu, hlen, left, len, ll_rs, pad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 int offset;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -0800418 __be16 not_last_frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 struct rtable *rt = (struct rtable*)skb->dst;
420 int err = 0;
421
422 dev = rt->u.dst.dev;
423
424 /*
425 * Point into the IP datagram header.
426 */
427
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700428 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
430 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
Wei Dong0668b472006-08-31 15:24:48 -0700431 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
John Heffner628a5c52007-04-20 15:53:27 -0700433 htonl(ip_skb_dst_mtu(skb)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 kfree_skb(skb);
435 return -EMSGSIZE;
436 }
437
438 /*
439 * Setup starting values.
440 */
441
442 hlen = iph->ihl * 4;
443 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
Herbert Xu89cee8b2005-12-13 23:14:27 -0800444 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446 /* When frag_list is given, use it. First, check its validity:
447 * some transformers could create wrong frag_list or break existing
448 * one, it is not prohibited. In this case fall back to copying.
449 *
450 * LATER: this step can be merged to real generation of fragments,
451 * we can switch to copy when see the first bad fragment.
452 */
453 if (skb_shinfo(skb)->frag_list) {
454 struct sk_buff *frag;
455 int first_len = skb_pagelen(skb);
456
457 if (first_len - hlen > mtu ||
458 ((first_len - hlen) & 7) ||
459 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
460 skb_cloned(skb))
461 goto slow_path;
462
463 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
464 /* Correct geometry. */
465 if (frag->len > mtu ||
466 ((frag->len & 7) && frag->next) ||
467 skb_headroom(frag) < hlen)
468 goto slow_path;
469
470 /* Partially cloned skb? */
471 if (skb_shared(frag))
472 goto slow_path;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700473
474 BUG_ON(frag->sk);
475 if (skb->sk) {
476 sock_hold(skb->sk);
477 frag->sk = skb->sk;
478 frag->destructor = sock_wfree;
479 skb->truesize -= frag->truesize;
480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 }
482
483 /* Everything is OK. Generate! */
484
485 err = 0;
486 offset = 0;
487 frag = skb_shinfo(skb)->frag_list;
488 skb_shinfo(skb)->frag_list = NULL;
489 skb->data_len = first_len - skb_headlen(skb);
490 skb->len = first_len;
491 iph->tot_len = htons(first_len);
492 iph->frag_off = htons(IP_MF);
493 ip_send_check(iph);
494
495 for (;;) {
496 /* Prepare header of the next frame,
497 * before previous one went down. */
498 if (frag) {
499 frag->ip_summed = CHECKSUM_NONE;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300500 skb_reset_transport_header(frag);
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700501 __skb_push(frag, hlen);
502 skb_reset_network_header(frag);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700503 memcpy(skb_network_header(frag), iph, hlen);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700504 iph = ip_hdr(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 iph->tot_len = htons(frag->len);
506 ip_copy_metadata(frag, skb);
507 if (offset == 0)
508 ip_options_fragment(frag);
509 offset += skb->len - hlen;
510 iph->frag_off = htons(offset>>3);
511 if (frag->next != NULL)
512 iph->frag_off |= htons(IP_MF);
513 /* Ready, complete checksum */
514 ip_send_check(iph);
515 }
516
517 err = output(skb);
518
Wei Dongdafee492006-08-02 13:41:21 -0700519 if (!err)
520 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 if (err || !frag)
522 break;
523
524 skb = frag;
525 frag = skb->next;
526 skb->next = NULL;
527 }
528
529 if (err == 0) {
530 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
531 return 0;
532 }
533
534 while (frag) {
535 skb = frag->next;
536 kfree_skb(frag);
537 frag = skb;
538 }
539 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
540 return err;
541 }
542
543slow_path:
544 left = skb->len - hlen; /* Space per frame */
545 ptr = raw + hlen; /* Where to start from */
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700548 * we need to make room for the encapsulating header
549 */
550 pad = nf_bridge_pad(skb);
551 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
552 mtu -= pad;
553
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 /*
555 * Fragment the datagram.
556 */
557
558 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
559 not_last_frag = iph->frag_off & htons(IP_MF);
560
561 /*
562 * Keep copying data until we run out.
563 */
564
Stephen Hemminger132adf52007-03-08 20:44:43 -0800565 while (left > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 len = left;
567 /* IF: it doesn't fit, use 'mtu' - the data space left */
568 if (len > mtu)
569 len = mtu;
570 /* IF: we are not sending upto and including the packet end
571 then align the next start on an eight byte boundary */
572 if (len < left) {
573 len &= ~7;
574 }
575 /*
576 * Allocate buffer.
577 */
578
579 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
Patrick McHardy64ce2072005-08-09 20:50:53 -0700580 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 err = -ENOMEM;
582 goto fail;
583 }
584
585 /*
586 * Set up data on packet
587 */
588
589 ip_copy_metadata(skb2, skb);
590 skb_reserve(skb2, ll_rs);
591 skb_put(skb2, len + hlen);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700592 skb_reset_network_header(skb2);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700593 skb2->transport_header = skb2->network_header + hlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 /*
596 * Charge the memory for the fragment to any owner
597 * it might possess
598 */
599
600 if (skb->sk)
601 skb_set_owner_w(skb2, skb->sk);
602
603 /*
604 * Copy the packet header into the new buffer.
605 */
606
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300607 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609 /*
610 * Copy a block of the IP datagram.
611 */
Arnaldo Carvalho de Melobff9b612007-03-16 17:19:57 -0300612 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 BUG();
614 left -= len;
615
616 /*
617 * Fill in the new header fields.
618 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700619 iph = ip_hdr(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 iph->frag_off = htons((offset >> 3));
621
622 /* ANK: dirty, but effective trick. Upgrade options only if
623 * the segment to be fragmented was THE FIRST (otherwise,
624 * options are already fixed) and make it ONCE
625 * on the initial skb, so that all the following fragments
626 * will inherit fixed options.
627 */
628 if (offset == 0)
629 ip_options_fragment(skb);
630
631 /*
632 * Added AC : If we are fragmenting a fragment that's not the
633 * last fragment then keep MF on each bit
634 */
635 if (left > 0 || not_last_frag)
636 iph->frag_off |= htons(IP_MF);
637 ptr += len;
638 offset += len;
639
640 /*
641 * Put this fragment into the sending queue.
642 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 iph->tot_len = htons(len + hlen);
644
645 ip_send_check(iph);
646
647 err = output(skb2);
648 if (err)
649 goto fail;
Wei Dongdafee492006-08-02 13:41:21 -0700650
651 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 }
653 kfree_skb(skb);
654 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
655 return err;
656
657fail:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900658 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
660 return err;
661}
662
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700663EXPORT_SYMBOL(ip_fragment);
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665int
666ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
667{
668 struct iovec *iov = from;
669
Patrick McHardy84fa7932006-08-29 16:44:56 -0700670 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
672 return -EFAULT;
673 } else {
Al Viro44bb9362006-11-14 21:36:14 -0800674 __wsum csum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
676 return -EFAULT;
677 skb->csum = csum_block_add(skb->csum, csum, odd);
678 }
679 return 0;
680}
681
Al Viro44bb9362006-11-14 21:36:14 -0800682static inline __wsum
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683csum_page(struct page *page, int offset, int copy)
684{
685 char *kaddr;
Al Viro44bb9362006-11-14 21:36:14 -0800686 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 kaddr = kmap(page);
688 csum = csum_partial(kaddr + offset, copy, 0);
689 kunmap(page);
690 return csum;
691}
692
Adrian Bunk4b30b1c2005-11-29 16:27:20 -0800693static inline int ip_ufo_append_data(struct sock *sk,
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700694 int getfrag(void *from, char *to, int offset, int len,
695 int odd, struct sk_buff *skb),
696 void *from, int length, int hh_len, int fragheaderlen,
697 int transhdrlen, int mtu,unsigned int flags)
698{
699 struct sk_buff *skb;
700 int err;
701
702 /* There is support for UDP fragmentation offload by network
703 * device, so create one single skb packet containing complete
704 * udp datagram
705 */
706 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
707 skb = sock_alloc_send_skb(sk,
708 hh_len + fragheaderlen + transhdrlen + 20,
709 (flags & MSG_DONTWAIT), &err);
710
711 if (skb == NULL)
712 return err;
713
714 /* reserve space for Hardware header */
715 skb_reserve(skb, hh_len);
716
717 /* create space for UDP/IP header */
718 skb_put(skb,fragheaderlen + transhdrlen);
719
720 /* initialize network header pointer */
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700721 skb_reset_network_header(skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700722
723 /* initialize protocol header pointer */
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700724 skb->transport_header = skb->network_header + fragheaderlen;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700725
Patrick McHardy84fa7932006-08-29 16:44:56 -0700726 skb->ip_summed = CHECKSUM_PARTIAL;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700727 skb->csum = 0;
728 sk->sk_sndmsg_off = 0;
729 }
730
731 err = skb_append_datato_frags(sk,skb, getfrag, from,
732 (length - transhdrlen));
733 if (!err) {
734 /* specify the length of each IP datagram fragment*/
Herbert Xu79671682006-06-22 02:40:14 -0700735 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700736 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700737 __skb_queue_tail(&sk->sk_write_queue, skb);
738
739 return 0;
740 }
741 /* There is not enough support do UFO ,
742 * so follow normal path
743 */
744 kfree_skb(skb);
745 return err;
746}
747
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748/*
749 * ip_append_data() and ip_append_page() can make one large IP datagram
750 * from many pieces of data. Each pieces will be holded on the socket
751 * until ip_push_pending_frames() is called. Each piece can be a page
752 * or non-page data.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900753 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 * Not only UDP, other transport protocols - e.g. raw sockets - can use
755 * this interface potentially.
756 *
757 * LATER: length must be adjusted by pad at tail, when it is required.
758 */
759int ip_append_data(struct sock *sk,
760 int getfrag(void *from, char *to, int offset, int len,
761 int odd, struct sk_buff *skb),
762 void *from, int length, int transhdrlen,
763 struct ipcm_cookie *ipc, struct rtable *rt,
764 unsigned int flags)
765{
766 struct inet_sock *inet = inet_sk(sk);
767 struct sk_buff *skb;
768
769 struct ip_options *opt = NULL;
770 int hh_len;
771 int exthdrlen;
772 int mtu;
773 int copy;
774 int err;
775 int offset = 0;
776 unsigned int maxfraglen, fragheaderlen;
777 int csummode = CHECKSUM_NONE;
778
779 if (flags&MSG_PROBE)
780 return 0;
781
782 if (skb_queue_empty(&sk->sk_write_queue)) {
783 /*
784 * setup for corking.
785 */
786 opt = ipc->opt;
787 if (opt) {
788 if (inet->cork.opt == NULL) {
789 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
790 if (unlikely(inet->cork.opt == NULL))
791 return -ENOBUFS;
792 }
793 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
794 inet->cork.flags |= IPCORK_OPT;
795 inet->cork.addr = ipc->addr;
796 }
797 dst_hold(&rt->u.dst);
John Heffner628a5c52007-04-20 15:53:27 -0700798 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
799 rt->u.dst.dev->mtu :
800 dst_mtu(rt->u.dst.path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 inet->cork.rt = rt;
802 inet->cork.length = 0;
803 sk->sk_sndmsg_page = NULL;
804 sk->sk_sndmsg_off = 0;
805 if ((exthdrlen = rt->u.dst.header_len) != 0) {
806 length += exthdrlen;
807 transhdrlen += exthdrlen;
808 }
809 } else {
810 rt = inet->cork.rt;
811 if (inet->cork.flags & IPCORK_OPT)
812 opt = inet->cork.opt;
813
814 transhdrlen = 0;
815 exthdrlen = 0;
816 mtu = inet->cork.fragsize;
817 }
818 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
819
820 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
821 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
822
823 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
824 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
825 return -EMSGSIZE;
826 }
827
828 /*
829 * transhdrlen > 0 means that this is the first fragment and we wish
830 * it won't be fragmented in the future.
831 */
832 if (transhdrlen &&
833 length + fragheaderlen <= mtu &&
Herbert Xu8648b302006-06-17 22:06:05 -0700834 rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 !exthdrlen)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700836 csummode = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
838 inet->cork.length += length;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700839 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
840 (rt->u.dst.dev->features & NETIF_F_UFO)) {
841
Patrick McHardybaa829d2006-03-12 20:35:12 -0800842 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
843 fragheaderlen, transhdrlen, mtu,
844 flags);
845 if (err)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700846 goto error;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700847 return 0;
848 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
850 /* So, what's going on in the loop below?
851 *
852 * We use calculated fragment length to generate chained skb,
853 * each of segments is IP fragment ready for sending to network after
854 * adding appropriate IP header.
855 */
856
857 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
858 goto alloc_new_skb;
859
860 while (length > 0) {
861 /* Check if the remaining data fits into current packet. */
862 copy = mtu - skb->len;
863 if (copy < length)
864 copy = maxfraglen - skb->len;
865 if (copy <= 0) {
866 char *data;
867 unsigned int datalen;
868 unsigned int fraglen;
869 unsigned int fraggap;
870 unsigned int alloclen;
871 struct sk_buff *skb_prev;
872alloc_new_skb:
873 skb_prev = skb;
874 if (skb_prev)
875 fraggap = skb_prev->len - maxfraglen;
876 else
877 fraggap = 0;
878
879 /*
880 * If remaining data exceeds the mtu,
881 * we know we need more fragment(s).
882 */
883 datalen = length + fraggap;
884 if (datalen > mtu - fragheaderlen)
885 datalen = maxfraglen - fragheaderlen;
886 fraglen = datalen + fragheaderlen;
887
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900888 if ((flags & MSG_MORE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 !(rt->u.dst.dev->features&NETIF_F_SG))
890 alloclen = mtu;
891 else
892 alloclen = datalen + fragheaderlen;
893
894 /* The last fragment gets additional space at tail.
895 * Note, with MSG_MORE we overallocate on fragments,
896 * because we have no idea what fragment will be
897 * the last.
898 */
Zach Brown3d9dd752006-04-14 16:04:18 -0700899 if (datalen == length + fraggap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 alloclen += rt->u.dst.trailer_len;
901
902 if (transhdrlen) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900903 skb = sock_alloc_send_skb(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 alloclen + hh_len + 15,
905 (flags & MSG_DONTWAIT), &err);
906 } else {
907 skb = NULL;
908 if (atomic_read(&sk->sk_wmem_alloc) <=
909 2 * sk->sk_sndbuf)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900910 skb = sock_wmalloc(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 alloclen + hh_len + 15, 1,
912 sk->sk_allocation);
913 if (unlikely(skb == NULL))
914 err = -ENOBUFS;
915 }
916 if (skb == NULL)
917 goto error;
918
919 /*
920 * Fill in the control structures
921 */
922 skb->ip_summed = csummode;
923 skb->csum = 0;
924 skb_reserve(skb, hh_len);
925
926 /*
927 * Find where to start putting bytes.
928 */
929 data = skb_put(skb, fraglen);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300930 skb_set_network_header(skb, exthdrlen);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700931 skb->transport_header = (skb->network_header +
932 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 data += fragheaderlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
935 if (fraggap) {
936 skb->csum = skb_copy_and_csum_bits(
937 skb_prev, maxfraglen,
938 data + transhdrlen, fraggap, 0);
939 skb_prev->csum = csum_sub(skb_prev->csum,
940 skb->csum);
941 data += fraggap;
Herbert Xue9fa4f72006-08-13 20:12:58 -0700942 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 }
944
945 copy = datalen - transhdrlen - fraggap;
946 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
947 err = -EFAULT;
948 kfree_skb(skb);
949 goto error;
950 }
951
952 offset += copy;
953 length -= datalen - fraggap;
954 transhdrlen = 0;
955 exthdrlen = 0;
956 csummode = CHECKSUM_NONE;
957
958 /*
959 * Put the packet on the pending queue.
960 */
961 __skb_queue_tail(&sk->sk_write_queue, skb);
962 continue;
963 }
964
965 if (copy > length)
966 copy = length;
967
968 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
969 unsigned int off;
970
971 off = skb->len;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900972 if (getfrag(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 offset, copy, off, skb) < 0) {
974 __skb_trim(skb, off);
975 err = -EFAULT;
976 goto error;
977 }
978 } else {
979 int i = skb_shinfo(skb)->nr_frags;
980 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
981 struct page *page = sk->sk_sndmsg_page;
982 int off = sk->sk_sndmsg_off;
983 unsigned int left;
984
985 if (page && (left = PAGE_SIZE - off) > 0) {
986 if (copy >= left)
987 copy = left;
988 if (page != frag->page) {
989 if (i == MAX_SKB_FRAGS) {
990 err = -EMSGSIZE;
991 goto error;
992 }
993 get_page(page);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900994 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 frag = &skb_shinfo(skb)->frags[i];
996 }
997 } else if (i < MAX_SKB_FRAGS) {
998 if (copy > PAGE_SIZE)
999 copy = PAGE_SIZE;
1000 page = alloc_pages(sk->sk_allocation, 0);
1001 if (page == NULL) {
1002 err = -ENOMEM;
1003 goto error;
1004 }
1005 sk->sk_sndmsg_page = page;
1006 sk->sk_sndmsg_off = 0;
1007
1008 skb_fill_page_desc(skb, i, page, 0, 0);
1009 frag = &skb_shinfo(skb)->frags[i];
1010 skb->truesize += PAGE_SIZE;
1011 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1012 } else {
1013 err = -EMSGSIZE;
1014 goto error;
1015 }
1016 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1017 err = -EFAULT;
1018 goto error;
1019 }
1020 sk->sk_sndmsg_off += copy;
1021 frag->size += copy;
1022 skb->len += copy;
1023 skb->data_len += copy;
1024 }
1025 offset += copy;
1026 length -= copy;
1027 }
1028
1029 return 0;
1030
1031error:
1032 inet->cork.length -= length;
1033 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001034 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035}
1036
1037ssize_t ip_append_page(struct sock *sk, struct page *page,
1038 int offset, size_t size, int flags)
1039{
1040 struct inet_sock *inet = inet_sk(sk);
1041 struct sk_buff *skb;
1042 struct rtable *rt;
1043 struct ip_options *opt = NULL;
1044 int hh_len;
1045 int mtu;
1046 int len;
1047 int err;
1048 unsigned int maxfraglen, fragheaderlen, fraggap;
1049
1050 if (inet->hdrincl)
1051 return -EPERM;
1052
1053 if (flags&MSG_PROBE)
1054 return 0;
1055
1056 if (skb_queue_empty(&sk->sk_write_queue))
1057 return -EINVAL;
1058
1059 rt = inet->cork.rt;
1060 if (inet->cork.flags & IPCORK_OPT)
1061 opt = inet->cork.opt;
1062
1063 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1064 return -EOPNOTSUPP;
1065
1066 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1067 mtu = inet->cork.fragsize;
1068
1069 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1070 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1071
1072 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1073 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1074 return -EMSGSIZE;
1075 }
1076
1077 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1078 return -EINVAL;
1079
1080 inet->cork.length += size;
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001081 if ((sk->sk_protocol == IPPROTO_UDP) &&
Herbert Xu79671682006-06-22 02:40:14 -07001082 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1083 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -07001084 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Herbert Xu79671682006-06-22 02:40:14 -07001085 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001086
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 while (size > 0) {
1089 int i;
1090
Herbert Xu89114af2006-07-08 13:34:32 -07001091 if (skb_is_gso(skb))
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001092 len = size;
1093 else {
1094
1095 /* Check if the remaining data fits into current packet. */
1096 len = mtu - skb->len;
1097 if (len < size)
1098 len = maxfraglen - skb->len;
1099 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 if (len <= 0) {
1101 struct sk_buff *skb_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 int alloclen;
1103
1104 skb_prev = skb;
Jayachandran C0d0d2bb2005-10-13 11:43:02 -07001105 fraggap = skb_prev->len - maxfraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107 alloclen = fragheaderlen + hh_len + fraggap + 15;
1108 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1109 if (unlikely(!skb)) {
1110 err = -ENOBUFS;
1111 goto error;
1112 }
1113
1114 /*
1115 * Fill in the control structures
1116 */
1117 skb->ip_summed = CHECKSUM_NONE;
1118 skb->csum = 0;
1119 skb_reserve(skb, hh_len);
1120
1121 /*
1122 * Find where to start putting bytes.
1123 */
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001124 skb_put(skb, fragheaderlen + fraggap);
Arnaldo Carvalho de Melo2ca9e6f2007-03-10 19:15:25 -03001125 skb_reset_network_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001126 skb->transport_header = (skb->network_header +
1127 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 if (fraggap) {
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001129 skb->csum = skb_copy_and_csum_bits(skb_prev,
1130 maxfraglen,
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001131 skb_transport_header(skb),
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001132 fraggap, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 skb_prev->csum = csum_sub(skb_prev->csum,
1134 skb->csum);
Herbert Xue9fa4f72006-08-13 20:12:58 -07001135 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 }
1137
1138 /*
1139 * Put the packet on the pending queue.
1140 */
1141 __skb_queue_tail(&sk->sk_write_queue, skb);
1142 continue;
1143 }
1144
1145 i = skb_shinfo(skb)->nr_frags;
1146 if (len > size)
1147 len = size;
1148 if (skb_can_coalesce(skb, i, page, offset)) {
1149 skb_shinfo(skb)->frags[i-1].size += len;
1150 } else if (i < MAX_SKB_FRAGS) {
1151 get_page(page);
1152 skb_fill_page_desc(skb, i, page, offset, len);
1153 } else {
1154 err = -EMSGSIZE;
1155 goto error;
1156 }
1157
1158 if (skb->ip_summed == CHECKSUM_NONE) {
Al Viro44bb9362006-11-14 21:36:14 -08001159 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 csum = csum_page(page, offset, len);
1161 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1162 }
1163
1164 skb->len += len;
1165 skb->data_len += len;
1166 offset += len;
1167 size -= len;
1168 }
1169 return 0;
1170
1171error:
1172 inet->cork.length -= size;
1173 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1174 return err;
1175}
1176
1177/*
1178 * Combined all pending IP fragments on the socket as one IP datagram
1179 * and push them out.
1180 */
1181int ip_push_pending_frames(struct sock *sk)
1182{
1183 struct sk_buff *skb, *tmp_skb;
1184 struct sk_buff **tail_skb;
1185 struct inet_sock *inet = inet_sk(sk);
1186 struct ip_options *opt = NULL;
1187 struct rtable *rt = inet->cork.rt;
1188 struct iphdr *iph;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -08001189 __be16 df = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 __u8 ttl;
1191 int err = 0;
1192
1193 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1194 goto out;
1195 tail_skb = &(skb_shinfo(skb)->frag_list);
1196
1197 /* move skb->data to ip header from ext header */
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001198 if (skb->data < skb_network_header(skb))
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001199 __skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001201 __skb_pull(tmp_skb, skb_network_header_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 *tail_skb = tmp_skb;
1203 tail_skb = &(tmp_skb->next);
1204 skb->len += tmp_skb->len;
1205 skb->data_len += tmp_skb->len;
1206 skb->truesize += tmp_skb->truesize;
1207 __sock_put(tmp_skb->sk);
1208 tmp_skb->destructor = NULL;
1209 tmp_skb->sk = NULL;
1210 }
1211
1212 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1213 * to fragment the frame generated here. No matter, what transforms
1214 * how transforms change size of the packet, it will come out.
1215 */
John Heffner628a5c52007-04-20 15:53:27 -07001216 if (inet->pmtudisc < IP_PMTUDISC_DO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 skb->local_df = 1;
1218
1219 /* DF bit is set when we want to see DF on outgoing frames.
1220 * If local_df is set too, we still allow to fragment this frame
1221 * locally. */
John Heffner628a5c52007-04-20 15:53:27 -07001222 if (inet->pmtudisc >= IP_PMTUDISC_DO ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 (skb->len <= dst_mtu(&rt->u.dst) &&
1224 ip_dont_fragment(sk, &rt->u.dst)))
1225 df = htons(IP_DF);
1226
1227 if (inet->cork.flags & IPCORK_OPT)
1228 opt = inet->cork.opt;
1229
1230 if (rt->rt_type == RTN_MULTICAST)
1231 ttl = inet->mc_ttl;
1232 else
1233 ttl = ip_select_ttl(inet, &rt->u.dst);
1234
1235 iph = (struct iphdr *)skb->data;
1236 iph->version = 4;
1237 iph->ihl = 5;
1238 if (opt) {
1239 iph->ihl += opt->optlen>>2;
1240 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1241 }
1242 iph->tos = inet->tos;
1243 iph->tot_len = htons(skb->len);
1244 iph->frag_off = df;
Alexey Kuznetsov1a55d572006-03-22 14:27:59 -08001245 ip_select_ident(iph, &rt->u.dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 iph->ttl = ttl;
1247 iph->protocol = sk->sk_protocol;
1248 iph->saddr = rt->rt_src;
1249 iph->daddr = rt->rt_dst;
1250 ip_send_check(iph);
1251
1252 skb->priority = sk->sk_priority;
1253 skb->dst = dst_clone(&rt->u.dst);
1254
1255 /* Netfilter gets whole the not fragmented skb. */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001256 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 skb->dst->dev, dst_output);
1258 if (err) {
1259 if (err > 0)
1260 err = inet->recverr ? net_xmit_errno(err) : 0;
1261 if (err)
1262 goto error;
1263 }
1264
1265out:
1266 inet->cork.flags &= ~IPCORK_OPT;
Jesper Juhla51482b2005-11-08 09:41:34 -08001267 kfree(inet->cork.opt);
1268 inet->cork.opt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 if (inet->cork.rt) {
1270 ip_rt_put(inet->cork.rt);
1271 inet->cork.rt = NULL;
1272 }
1273 return err;
1274
1275error:
1276 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1277 goto out;
1278}
1279
1280/*
1281 * Throw away all pending data on the socket.
1282 */
1283void ip_flush_pending_frames(struct sock *sk)
1284{
1285 struct inet_sock *inet = inet_sk(sk);
1286 struct sk_buff *skb;
1287
1288 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1289 kfree_skb(skb);
1290
1291 inet->cork.flags &= ~IPCORK_OPT;
Jesper Juhla51482b2005-11-08 09:41:34 -08001292 kfree(inet->cork.opt);
1293 inet->cork.opt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 if (inet->cork.rt) {
1295 ip_rt_put(inet->cork.rt);
1296 inet->cork.rt = NULL;
1297 }
1298}
1299
1300
1301/*
1302 * Fetch data from kernel space and fill in checksum if needed.
1303 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001304static int ip_reply_glue_bits(void *dptr, char *to, int offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 int len, int odd, struct sk_buff *skb)
1306{
Al Viro50842052006-11-14 21:36:34 -08001307 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
1309 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1310 skb->csum = csum_block_add(skb->csum, csum, odd);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001311 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312}
1313
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001314/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 * Generic function to send a packet as reply to another packet.
1316 * Used to send TCP resets so far. ICMP should use this function too.
1317 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001318 * Should run single threaded per socket because it uses the sock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 * structure to pass arguments.
1320 *
1321 * LATER: switch from ip_build_xmit to ip_append_*
1322 */
1323void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1324 unsigned int len)
1325{
1326 struct inet_sock *inet = inet_sk(sk);
1327 struct {
1328 struct ip_options opt;
1329 char data[40];
1330 } replyopts;
1331 struct ipcm_cookie ipc;
Al Viro3ca3c682006-09-27 18:28:07 -07001332 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 struct rtable *rt = (struct rtable*)skb->dst;
1334
1335 if (ip_options_echo(&replyopts.opt, skb))
1336 return;
1337
1338 daddr = ipc.addr = rt->rt_src;
1339 ipc.opt = NULL;
1340
1341 if (replyopts.opt.optlen) {
1342 ipc.opt = &replyopts.opt;
1343
1344 if (ipc.opt->srr)
1345 daddr = replyopts.opt.faddr;
1346 }
1347
1348 {
1349 struct flowi fl = { .nl_u = { .ip4_u =
1350 { .daddr = daddr,
1351 .saddr = rt->rt_spec_dst,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001352 .tos = RT_TOS(ip_hdr(skb)->tos) } },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 /* Not quite clean, but right. */
1354 .uli_u = { .ports =
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001355 { .sport = tcp_hdr(skb)->dest,
1356 .dport = tcp_hdr(skb)->source } },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 .proto = sk->sk_protocol };
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -07001358 security_skb_classify_flow(skb, &fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 if (ip_route_output_key(&rt, &fl))
1360 return;
1361 }
1362
1363 /* And let IP do all the hard work.
1364
1365 This chunk is not reenterable, hence spinlock.
1366 Note that it uses the fact, that this function is called
1367 with locally disabled BH and that sk cannot be already spinlocked.
1368 */
1369 bh_lock_sock(sk);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001370 inet->tos = ip_hdr(skb)->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 sk->sk_priority = skb->priority;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001372 sk->sk_protocol = ip_hdr(skb)->protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1374 &ipc, rt, MSG_DONTWAIT);
1375 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1376 if (arg->csumoffset >= 0)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001377 *((__sum16 *)skb_transport_header(skb) +
1378 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1379 arg->csum));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 skb->ip_summed = CHECKSUM_NONE;
1381 ip_push_pending_frames(sk);
1382 }
1383
1384 bh_unlock_sock(sk);
1385
1386 ip_rt_put(rt);
1387}
1388
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389void __init ip_init(void)
1390{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 ip_rt_init();
1392 inet_initpeers();
1393
1394#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1395 igmp_mc_proc_init();
1396#endif
1397}
1398
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399EXPORT_SYMBOL(ip_generic_getfrag);
1400EXPORT_SYMBOL(ip_queue_xmit);
1401EXPORT_SYMBOL(ip_send_check);