blob: 93d8a0a8f03562894b2840032f438dd67c4cf39e [file] [log] [blame]
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -08001/* ip_nat_helper.c - generic support functions for NAT helpers
2 *
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/kmod.h>
12#include <linux/types.h>
13#include <linux/timer.h>
14#include <linux/skbuff.h>
15#include <linux/tcp.h>
16#include <linux/udp.h>
17#include <net/checksum.h>
18#include <net/tcp.h>
19
20#include <linux/netfilter_ipv4.h>
21#include <net/netfilter/nf_conntrack.h>
22#include <net/netfilter/nf_conntrack_helper.h>
23#include <net/netfilter/nf_conntrack_expect.h>
24#include <net/netfilter/nf_nat.h>
25#include <net/netfilter/nf_nat_protocol.h>
26#include <net/netfilter/nf_nat_core.h>
27#include <net/netfilter/nf_nat_helper.h>
28
Patrick McHardy0d537782007-07-07 22:39:38 -070029#define DUMP_OFFSET(x) \
30 pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
31 x->offset_before, x->offset_after, x->correction_pos);
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -080032
33static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
34
35/* Setup TCP sequence correction given this change at this sequence */
36static inline void
37adjust_tcp_sequence(u32 seq,
38 int sizediff,
39 struct nf_conn *ct,
40 enum ip_conntrack_info ctinfo)
41{
42 int dir;
43 struct nf_nat_seq *this_way, *other_way;
44 struct nf_conn_nat *nat = nfct_nat(ct);
45
Patrick McHardy0d537782007-07-07 22:39:38 -070046 pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
47 ntohl(seq), seq);
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -080048
49 dir = CTINFO2DIR(ctinfo);
50
Yasuyuki Kozakaib6b84d42007-07-07 22:26:35 -070051 this_way = &nat->seq[dir];
52 other_way = &nat->seq[!dir];
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -080053
Patrick McHardy0d537782007-07-07 22:39:38 -070054 pr_debug("nf_nat_resize_packet: Seq_offset before: ");
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -080055 DUMP_OFFSET(this_way);
56
57 spin_lock_bh(&nf_nat_seqofs_lock);
58
59 /* SYN adjust. If it's uninitialized, or this is after last
60 * correction, record it: we don't handle more than one
61 * adjustment in the window, but do deal with common case of a
62 * retransmit */
63 if (this_way->offset_before == this_way->offset_after ||
64 before(this_way->correction_pos, seq)) {
65 this_way->correction_pos = seq;
66 this_way->offset_before = this_way->offset_after;
67 this_way->offset_after += sizediff;
68 }
69 spin_unlock_bh(&nf_nat_seqofs_lock);
70
Patrick McHardy0d537782007-07-07 22:39:38 -070071 pr_debug("nf_nat_resize_packet: Seq_offset after: ");
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -080072 DUMP_OFFSET(this_way);
73}
74
75/* Frobs data inside this packet, which is linear. */
76static void mangle_contents(struct sk_buff *skb,
77 unsigned int dataoff,
78 unsigned int match_offset,
79 unsigned int match_len,
80 const char *rep_buffer,
81 unsigned int rep_len)
82{
83 unsigned char *data;
84
85 BUG_ON(skb_is_nonlinear(skb));
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -070086 data = skb_network_header(skb) + dataoff;
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -080087
88 /* move post-replacement */
89 memmove(data + match_offset + rep_len,
90 data + match_offset + match_len,
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -070091 skb->tail - (skb->network_header + dataoff +
92 match_offset + match_len));
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -080093
94 /* insert data from buffer */
95 memcpy(data + match_offset, rep_buffer, rep_len);
96
97 /* update skb info */
98 if (rep_len > match_len) {
Patrick McHardy0d537782007-07-07 22:39:38 -070099 pr_debug("nf_nat_mangle_packet: Extending packet by "
100 "%u from %u bytes\n", rep_len - match_len, skb->len);
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800101 skb_put(skb, rep_len - match_len);
102 } else {
Patrick McHardy0d537782007-07-07 22:39:38 -0700103 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
104 "%u from %u bytes\n", match_len - rep_len, skb->len);
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800105 __skb_trim(skb, skb->len + rep_len - match_len);
106 }
107
108 /* fix IP hdr checksum information */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700109 ip_hdr(skb)->tot_len = htons(skb->len);
110 ip_send_check(ip_hdr(skb));
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800111}
112
113/* Unusual, but possible case. */
114static int enlarge_skb(struct sk_buff **pskb, unsigned int extra)
115{
116 struct sk_buff *nskb;
117
118 if ((*pskb)->len + extra > 65535)
119 return 0;
120
121 nskb = skb_copy_expand(*pskb, skb_headroom(*pskb), extra, GFP_ATOMIC);
122 if (!nskb)
123 return 0;
124
125 /* Transfer socket to new skb. */
126 if ((*pskb)->sk)
127 skb_set_owner_w(nskb, (*pskb)->sk);
128 kfree_skb(*pskb);
129 *pskb = nskb;
130 return 1;
131}
132
133/* Generic function for mangling variable-length address changes inside
134 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
135 * command in FTP).
136 *
137 * Takes care about all the nasty sequence number changes, checksumming,
138 * skb enlargement, ...
139 *
140 * */
141int
142nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
143 struct nf_conn *ct,
144 enum ip_conntrack_info ctinfo,
145 unsigned int match_offset,
146 unsigned int match_len,
147 const char *rep_buffer,
148 unsigned int rep_len)
149{
Patrick McHardyfe6092e2007-04-12 22:15:50 -0700150 struct rtable *rt = (struct rtable *)(*pskb)->dst;
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800151 struct iphdr *iph;
152 struct tcphdr *tcph;
153 int oldlen, datalen;
154
155 if (!skb_make_writable(pskb, (*pskb)->len))
156 return 0;
157
158 if (rep_len > match_len &&
159 rep_len - match_len > skb_tailroom(*pskb) &&
160 !enlarge_skb(pskb, rep_len - match_len))
161 return 0;
162
163 SKB_LINEAR_ASSERT(*pskb);
164
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700165 iph = ip_hdr(*pskb);
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800166 tcph = (void *)iph + iph->ihl*4;
167
168 oldlen = (*pskb)->len - iph->ihl*4;
169 mangle_contents(*pskb, iph->ihl*4 + tcph->doff*4,
170 match_offset, match_len, rep_buffer, rep_len);
171
172 datalen = (*pskb)->len - iph->ihl*4;
173 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
Patrick McHardyfe6092e2007-04-12 22:15:50 -0700174 if (!(rt->rt_flags & RTCF_LOCAL) &&
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700175 (*pskb)->dev->features & NETIF_F_V4_CSUM) {
Patrick McHardyfe6092e2007-04-12 22:15:50 -0700176 (*pskb)->ip_summed = CHECKSUM_PARTIAL;
177 (*pskb)->csum_start = skb_headroom(*pskb) +
178 skb_network_offset(*pskb) +
179 iph->ihl * 4;
180 (*pskb)->csum_offset = offsetof(struct tcphdr, check);
181 tcph->check = ~tcp_v4_check(datalen,
182 iph->saddr, iph->daddr, 0);
183 } else {
184 tcph->check = 0;
185 tcph->check = tcp_v4_check(datalen,
186 iph->saddr, iph->daddr,
Jan Engelhardta47362a2007-07-07 22:16:55 -0700187 csum_partial(tcph,
Patrick McHardyfe6092e2007-04-12 22:15:50 -0700188 datalen, 0));
189 }
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800190 } else
191 nf_proto_csum_replace2(&tcph->check, *pskb,
192 htons(oldlen), htons(datalen), 1);
193
194 if (rep_len != match_len) {
195 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
196 adjust_tcp_sequence(ntohl(tcph->seq),
197 (int)rep_len - (int)match_len,
198 ct, ctinfo);
199 /* Tell TCP window tracking about seq change */
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300200 nf_conntrack_tcp_update(*pskb, ip_hdrlen(*pskb),
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800201 ct, CTINFO2DIR(ctinfo));
202 }
203 return 1;
204}
205EXPORT_SYMBOL(nf_nat_mangle_tcp_packet);
206
207/* Generic function for mangling variable-length address changes inside
208 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
209 * command in the Amanda protocol)
210 *
211 * Takes care about all the nasty sequence number changes, checksumming,
212 * skb enlargement, ...
213 *
214 * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
215 * should be fairly easy to do.
216 */
217int
218nf_nat_mangle_udp_packet(struct sk_buff **pskb,
219 struct nf_conn *ct,
220 enum ip_conntrack_info ctinfo,
221 unsigned int match_offset,
222 unsigned int match_len,
223 const char *rep_buffer,
224 unsigned int rep_len)
225{
Patrick McHardyfe6092e2007-04-12 22:15:50 -0700226 struct rtable *rt = (struct rtable *)(*pskb)->dst;
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800227 struct iphdr *iph;
228 struct udphdr *udph;
229 int datalen, oldlen;
230
231 /* UDP helpers might accidentally mangle the wrong packet */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700232 iph = ip_hdr(*pskb);
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800233 if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900234 match_offset + match_len)
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800235 return 0;
236
237 if (!skb_make_writable(pskb, (*pskb)->len))
238 return 0;
239
240 if (rep_len > match_len &&
241 rep_len - match_len > skb_tailroom(*pskb) &&
242 !enlarge_skb(pskb, rep_len - match_len))
243 return 0;
244
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700245 iph = ip_hdr(*pskb);
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800246 udph = (void *)iph + iph->ihl*4;
247
248 oldlen = (*pskb)->len - iph->ihl*4;
249 mangle_contents(*pskb, iph->ihl*4 + sizeof(*udph),
250 match_offset, match_len, rep_buffer, rep_len);
251
252 /* update the length of the UDP packet */
253 datalen = (*pskb)->len - iph->ihl*4;
254 udph->len = htons(datalen);
255
256 /* fix udp checksum if udp checksum was previously calculated */
257 if (!udph->check && (*pskb)->ip_summed != CHECKSUM_PARTIAL)
258 return 1;
259
260 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
Patrick McHardyfe6092e2007-04-12 22:15:50 -0700261 if (!(rt->rt_flags & RTCF_LOCAL) &&
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700262 (*pskb)->dev->features & NETIF_F_V4_CSUM) {
Patrick McHardyfe6092e2007-04-12 22:15:50 -0700263 (*pskb)->ip_summed = CHECKSUM_PARTIAL;
264 (*pskb)->csum_start = skb_headroom(*pskb) +
265 skb_network_offset(*pskb) +
266 iph->ihl * 4;
267 (*pskb)->csum_offset = offsetof(struct udphdr, check);
268 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
269 datalen, IPPROTO_UDP,
270 0);
271 } else {
272 udph->check = 0;
273 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
274 datalen, IPPROTO_UDP,
Jan Engelhardta47362a2007-07-07 22:16:55 -0700275 csum_partial(udph,
Patrick McHardyfe6092e2007-04-12 22:15:50 -0700276 datalen, 0));
277 if (!udph->check)
278 udph->check = CSUM_MANGLED_0;
279 }
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800280 } else
281 nf_proto_csum_replace2(&udph->check, *pskb,
282 htons(oldlen), htons(datalen), 1);
283
284 return 1;
285}
286EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
287
288/* Adjust one found SACK option including checksum correction */
289static void
290sack_adjust(struct sk_buff *skb,
291 struct tcphdr *tcph,
292 unsigned int sackoff,
293 unsigned int sackend,
294 struct nf_nat_seq *natseq)
295{
296 while (sackoff < sackend) {
297 struct tcp_sack_block_wire *sack;
298 __be32 new_start_seq, new_end_seq;
299
300 sack = (void *)skb->data + sackoff;
301 if (after(ntohl(sack->start_seq) - natseq->offset_before,
302 natseq->correction_pos))
303 new_start_seq = htonl(ntohl(sack->start_seq)
304 - natseq->offset_after);
305 else
306 new_start_seq = htonl(ntohl(sack->start_seq)
307 - natseq->offset_before);
308
309 if (after(ntohl(sack->end_seq) - natseq->offset_before,
310 natseq->correction_pos))
311 new_end_seq = htonl(ntohl(sack->end_seq)
312 - natseq->offset_after);
313 else
314 new_end_seq = htonl(ntohl(sack->end_seq)
315 - natseq->offset_before);
316
Patrick McHardy0d537782007-07-07 22:39:38 -0700317 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
318 ntohl(sack->start_seq), new_start_seq,
319 ntohl(sack->end_seq), new_end_seq);
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800320
321 nf_proto_csum_replace4(&tcph->check, skb,
322 sack->start_seq, new_start_seq, 0);
323 nf_proto_csum_replace4(&tcph->check, skb,
324 sack->end_seq, new_end_seq, 0);
325 sack->start_seq = new_start_seq;
326 sack->end_seq = new_end_seq;
327 sackoff += sizeof(*sack);
328 }
329}
330
331/* TCP SACK sequence number adjustment */
332static inline unsigned int
333nf_nat_sack_adjust(struct sk_buff **pskb,
334 struct tcphdr *tcph,
335 struct nf_conn *ct,
336 enum ip_conntrack_info ctinfo)
337{
338 unsigned int dir, optoff, optend;
339 struct nf_conn_nat *nat = nfct_nat(ct);
340
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300341 optoff = ip_hdrlen(*pskb) + sizeof(struct tcphdr);
342 optend = ip_hdrlen(*pskb) + tcph->doff * 4;
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800343
344 if (!skb_make_writable(pskb, optend))
345 return 0;
346
347 dir = CTINFO2DIR(ctinfo);
348
349 while (optoff < optend) {
350 /* Usually: option, length. */
351 unsigned char *op = (*pskb)->data + optoff;
352
353 switch (op[0]) {
354 case TCPOPT_EOL:
355 return 1;
356 case TCPOPT_NOP:
357 optoff++;
358 continue;
359 default:
360 /* no partial options */
361 if (optoff + 1 == optend ||
362 optoff + op[1] > optend ||
363 op[1] < 2)
364 return 0;
365 if (op[0] == TCPOPT_SACK &&
366 op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
367 ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
368 sack_adjust(*pskb, tcph, optoff+2,
Yasuyuki Kozakaib6b84d42007-07-07 22:26:35 -0700369 optoff+op[1], &nat->seq[!dir]);
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800370 optoff += op[1];
371 }
372 }
373 return 1;
374}
375
376/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
377int
378nf_nat_seq_adjust(struct sk_buff **pskb,
379 struct nf_conn *ct,
380 enum ip_conntrack_info ctinfo)
381{
382 struct tcphdr *tcph;
383 int dir;
384 __be32 newseq, newack;
385 struct nf_conn_nat *nat = nfct_nat(ct);
386 struct nf_nat_seq *this_way, *other_way;
387
388 dir = CTINFO2DIR(ctinfo);
389
Yasuyuki Kozakaib6b84d42007-07-07 22:26:35 -0700390 this_way = &nat->seq[dir];
391 other_way = &nat->seq[!dir];
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800392
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300393 if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph)))
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800394 return 0;
395
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300396 tcph = (void *)(*pskb)->data + ip_hdrlen(*pskb);
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800397 if (after(ntohl(tcph->seq), this_way->correction_pos))
398 newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
399 else
400 newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);
401
402 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
403 other_way->correction_pos))
404 newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
405 else
406 newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
407
408 nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0);
409 nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0);
410
Patrick McHardy0d537782007-07-07 22:39:38 -0700411 pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
412 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
413 ntohl(newack));
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800414
415 tcph->seq = newseq;
416 tcph->ack_seq = newack;
417
418 if (!nf_nat_sack_adjust(pskb, tcph, ct, ctinfo))
419 return 0;
420
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300421 nf_conntrack_tcp_update(*pskb, ip_hdrlen(*pskb), ct, dir);
Jozsef Kadlecsik5b1158e2006-12-02 22:07:13 -0800422
423 return 1;
424}
425EXPORT_SYMBOL(nf_nat_seq_adjust);
426
427/* Setup NAT on this expected conntrack so it follows master. */
428/* If we fail to get a free NAT slot, we'll get dropped on confirm */
429void nf_nat_follow_master(struct nf_conn *ct,
430 struct nf_conntrack_expect *exp)
431{
432 struct nf_nat_range range;
433
434 /* This must be a fresh one. */
435 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
436
437 /* Change src to where master sends to */
438 range.flags = IP_NAT_RANGE_MAP_IPS;
439 range.min_ip = range.max_ip
440 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
441 /* hook doesn't matter, but it has to do source manip */
442 nf_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
443
444 /* For DST manip, map port here to where it's expected. */
445 range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
446 range.min = range.max = exp->saved_proto;
447 range.min_ip = range.max_ip
448 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
449 /* hook doesn't matter, but it has to do destination manip */
450 nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
451}
452EXPORT_SYMBOL(nf_nat_follow_master);