Patrick McHardy | c7232c9 | 2012-08-26 19:14:06 +0200 | [diff] [blame] | 1 | /* |
| 2 | * (C) 1999-2001 Paul `Rusty' Russell |
| 3 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> |
| 4 | * (C) 2011 Patrick McHardy <kaber@trash.net> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/timer.h> |
| 14 | #include <linux/skbuff.h> |
| 15 | #include <linux/gfp.h> |
| 16 | #include <net/xfrm.h> |
| 17 | #include <linux/jhash.h> |
| 18 | #include <linux/rtnetlink.h> |
| 19 | |
| 20 | #include <net/netfilter/nf_conntrack.h> |
| 21 | #include <net/netfilter/nf_conntrack_core.h> |
| 22 | #include <net/netfilter/nf_nat.h> |
| 23 | #include <net/netfilter/nf_nat_l3proto.h> |
| 24 | #include <net/netfilter/nf_nat_l4proto.h> |
| 25 | #include <net/netfilter/nf_nat_core.h> |
| 26 | #include <net/netfilter/nf_nat_helper.h> |
| 27 | #include <net/netfilter/nf_conntrack_helper.h> |
| 28 | #include <net/netfilter/nf_conntrack_l3proto.h> |
| 29 | #include <net/netfilter/nf_conntrack_zones.h> |
| 30 | #include <linux/netfilter/nf_nat.h> |
| 31 | |
| 32 | static DEFINE_SPINLOCK(nf_nat_lock); |
| 33 | |
| 34 | static DEFINE_MUTEX(nf_nat_proto_mutex); |
| 35 | static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] |
| 36 | __read_mostly; |
| 37 | static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] |
| 38 | __read_mostly; |
| 39 | |
| 40 | |
| 41 | inline const struct nf_nat_l3proto * |
| 42 | __nf_nat_l3proto_find(u8 family) |
| 43 | { |
| 44 | return rcu_dereference(nf_nat_l3protos[family]); |
| 45 | } |
| 46 | |
| 47 | inline const struct nf_nat_l4proto * |
| 48 | __nf_nat_l4proto_find(u8 family, u8 protonum) |
| 49 | { |
| 50 | return rcu_dereference(nf_nat_l4protos[family][protonum]); |
| 51 | } |
| 52 | EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find); |
| 53 | |
| 54 | #ifdef CONFIG_XFRM |
| 55 | static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) |
| 56 | { |
| 57 | const struct nf_nat_l3proto *l3proto; |
| 58 | const struct nf_conn *ct; |
| 59 | enum ip_conntrack_info ctinfo; |
| 60 | enum ip_conntrack_dir dir; |
| 61 | unsigned long statusbit; |
| 62 | u8 family; |
| 63 | |
| 64 | ct = nf_ct_get(skb, &ctinfo); |
| 65 | if (ct == NULL) |
| 66 | return; |
| 67 | |
| 68 | family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; |
| 69 | rcu_read_lock(); |
| 70 | l3proto = __nf_nat_l3proto_find(family); |
| 71 | if (l3proto == NULL) |
| 72 | goto out; |
| 73 | |
| 74 | dir = CTINFO2DIR(ctinfo); |
| 75 | if (dir == IP_CT_DIR_ORIGINAL) |
| 76 | statusbit = IPS_DST_NAT; |
| 77 | else |
| 78 | statusbit = IPS_SRC_NAT; |
| 79 | |
| 80 | l3proto->decode_session(skb, ct, dir, statusbit, fl); |
| 81 | out: |
| 82 | rcu_read_unlock(); |
| 83 | } |
| 84 | |
| 85 | int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family) |
| 86 | { |
| 87 | struct flowi fl; |
| 88 | unsigned int hh_len; |
| 89 | struct dst_entry *dst; |
| 90 | |
| 91 | if (xfrm_decode_session(skb, &fl, family) < 0) |
| 92 | return -1; |
| 93 | |
| 94 | dst = skb_dst(skb); |
| 95 | if (dst->xfrm) |
| 96 | dst = ((struct xfrm_dst *)dst)->route; |
| 97 | dst_hold(dst); |
| 98 | |
| 99 | dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0); |
| 100 | if (IS_ERR(dst)) |
| 101 | return -1; |
| 102 | |
| 103 | skb_dst_drop(skb); |
| 104 | skb_dst_set(skb, dst); |
| 105 | |
| 106 | /* Change in oif may mean change in hh_len. */ |
| 107 | hh_len = skb_dst(skb)->dev->hard_header_len; |
| 108 | if (skb_headroom(skb) < hh_len && |
| 109 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) |
| 110 | return -1; |
| 111 | return 0; |
| 112 | } |
| 113 | EXPORT_SYMBOL(nf_xfrm_me_harder); |
| 114 | #endif /* CONFIG_XFRM */ |
| 115 | |
| 116 | /* We keep an extra hash for each conntrack, for fast searching. */ |
| 117 | static inline unsigned int |
| 118 | hash_by_src(const struct net *net, u16 zone, |
| 119 | const struct nf_conntrack_tuple *tuple) |
| 120 | { |
| 121 | unsigned int hash; |
| 122 | |
| 123 | /* Original src, to ensure we map it consistently if poss. */ |
| 124 | hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), |
| 125 | tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd); |
| 126 | return ((u64)hash * net->ct.nat_htable_size) >> 32; |
| 127 | } |
| 128 | |
| 129 | /* Is this tuple already taken? (not by us) */ |
| 130 | int |
| 131 | nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, |
| 132 | const struct nf_conn *ignored_conntrack) |
| 133 | { |
| 134 | /* Conntrack tracking doesn't keep track of outgoing tuples; only |
| 135 | * incoming ones. NAT means they don't have a fixed mapping, |
| 136 | * so we invert the tuple and look for the incoming reply. |
| 137 | * |
| 138 | * We could keep a separate hash if this proves too slow. |
| 139 | */ |
| 140 | struct nf_conntrack_tuple reply; |
| 141 | |
| 142 | nf_ct_invert_tuplepr(&reply, tuple); |
| 143 | return nf_conntrack_tuple_taken(&reply, ignored_conntrack); |
| 144 | } |
| 145 | EXPORT_SYMBOL(nf_nat_used_tuple); |
| 146 | |
| 147 | /* If we source map this tuple so reply looks like reply_tuple, will |
| 148 | * that meet the constraints of range. |
| 149 | */ |
| 150 | static int in_range(const struct nf_nat_l3proto *l3proto, |
| 151 | const struct nf_nat_l4proto *l4proto, |
| 152 | const struct nf_conntrack_tuple *tuple, |
| 153 | const struct nf_nat_range *range) |
| 154 | { |
| 155 | /* If we are supposed to map IPs, then we must be in the |
| 156 | * range specified, otherwise let this drag us onto a new src IP. |
| 157 | */ |
| 158 | if (range->flags & NF_NAT_RANGE_MAP_IPS && |
| 159 | !l3proto->in_range(tuple, range)) |
| 160 | return 0; |
| 161 | |
| 162 | if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) || |
| 163 | l4proto->in_range(tuple, NF_NAT_MANIP_SRC, |
| 164 | &range->min_proto, &range->max_proto)) |
| 165 | return 1; |
| 166 | |
| 167 | return 0; |
| 168 | } |
| 169 | |
| 170 | static inline int |
| 171 | same_src(const struct nf_conn *ct, |
| 172 | const struct nf_conntrack_tuple *tuple) |
| 173 | { |
| 174 | const struct nf_conntrack_tuple *t; |
| 175 | |
| 176 | t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; |
| 177 | return (t->dst.protonum == tuple->dst.protonum && |
| 178 | nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) && |
| 179 | t->src.u.all == tuple->src.u.all); |
| 180 | } |
| 181 | |
| 182 | /* Only called for SRC manip */ |
| 183 | static int |
| 184 | find_appropriate_src(struct net *net, u16 zone, |
| 185 | const struct nf_nat_l3proto *l3proto, |
| 186 | const struct nf_nat_l4proto *l4proto, |
| 187 | const struct nf_conntrack_tuple *tuple, |
| 188 | struct nf_conntrack_tuple *result, |
| 189 | const struct nf_nat_range *range) |
| 190 | { |
| 191 | unsigned int h = hash_by_src(net, zone, tuple); |
| 192 | const struct nf_conn_nat *nat; |
| 193 | const struct nf_conn *ct; |
| 194 | const struct hlist_node *n; |
| 195 | |
| 196 | hlist_for_each_entry_rcu(nat, n, &net->ct.nat_bysource[h], bysource) { |
| 197 | ct = nat->ct; |
| 198 | if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { |
| 199 | /* Copy source part from reply tuple. */ |
| 200 | nf_ct_invert_tuplepr(result, |
| 201 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); |
| 202 | result->dst = tuple->dst; |
| 203 | |
Ulrich Weber | 136251d | 2012-09-20 03:52:04 +0000 | [diff] [blame] | 204 | if (in_range(l3proto, l4proto, result, range)) |
Patrick McHardy | c7232c9 | 2012-08-26 19:14:06 +0200 | [diff] [blame] | 205 | return 1; |
Patrick McHardy | c7232c9 | 2012-08-26 19:14:06 +0200 | [diff] [blame] | 206 | } |
| 207 | } |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | /* For [FUTURE] fragmentation handling, we want the least-used |
| 212 | * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus |
| 213 | * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports |
| 214 | * 1-65535, we don't do pro-rata allocation based on ports; we choose |
| 215 | * the ip with the lowest src-ip/dst-ip/proto usage. |
| 216 | */ |
| 217 | static void |
| 218 | find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, |
| 219 | const struct nf_nat_range *range, |
| 220 | const struct nf_conn *ct, |
| 221 | enum nf_nat_manip_type maniptype) |
| 222 | { |
| 223 | union nf_inet_addr *var_ipp; |
| 224 | unsigned int i, max; |
| 225 | /* Host order */ |
| 226 | u32 minip, maxip, j, dist; |
| 227 | bool full_range; |
| 228 | |
| 229 | /* No IP mapping? Do nothing. */ |
| 230 | if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) |
| 231 | return; |
| 232 | |
| 233 | if (maniptype == NF_NAT_MANIP_SRC) |
| 234 | var_ipp = &tuple->src.u3; |
| 235 | else |
| 236 | var_ipp = &tuple->dst.u3; |
| 237 | |
| 238 | /* Fast path: only one choice. */ |
| 239 | if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) { |
| 240 | *var_ipp = range->min_addr; |
| 241 | return; |
| 242 | } |
| 243 | |
| 244 | if (nf_ct_l3num(ct) == NFPROTO_IPV4) |
| 245 | max = sizeof(var_ipp->ip) / sizeof(u32) - 1; |
| 246 | else |
| 247 | max = sizeof(var_ipp->ip6) / sizeof(u32) - 1; |
| 248 | |
| 249 | /* Hashing source and destination IPs gives a fairly even |
| 250 | * spread in practice (if there are a small number of IPs |
| 251 | * involved, there usually aren't that many connections |
| 252 | * anyway). The consistency means that servers see the same |
| 253 | * client coming from the same IP (some Internet Banking sites |
| 254 | * like this), even across reboots. |
| 255 | */ |
Florian Westphal | 5693d68 | 2012-09-05 10:10:28 +0000 | [diff] [blame] | 256 | j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32), |
Patrick McHardy | c7232c9 | 2012-08-26 19:14:06 +0200 | [diff] [blame] | 257 | range->flags & NF_NAT_RANGE_PERSISTENT ? |
| 258 | 0 : (__force u32)tuple->dst.u3.all[max] ^ zone); |
| 259 | |
| 260 | full_range = false; |
| 261 | for (i = 0; i <= max; i++) { |
| 262 | /* If first bytes of the address are at the maximum, use the |
| 263 | * distance. Otherwise use the full range. |
| 264 | */ |
| 265 | if (!full_range) { |
| 266 | minip = ntohl((__force __be32)range->min_addr.all[i]); |
| 267 | maxip = ntohl((__force __be32)range->max_addr.all[i]); |
| 268 | dist = maxip - minip + 1; |
| 269 | } else { |
| 270 | minip = 0; |
| 271 | dist = ~0; |
| 272 | } |
| 273 | |
| 274 | var_ipp->all[i] = (__force __u32) |
| 275 | htonl(minip + (((u64)j * dist) >> 32)); |
| 276 | if (var_ipp->all[i] != range->max_addr.all[i]) |
| 277 | full_range = true; |
| 278 | |
| 279 | if (!(range->flags & NF_NAT_RANGE_PERSISTENT)) |
| 280 | j ^= (__force u32)tuple->dst.u3.all[i]; |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING, |
| 285 | * we change the source to map into the range. For NF_INET_PRE_ROUTING |
| 286 | * and NF_INET_LOCAL_OUT, we change the destination to map into the |
| 287 | * range. It might not be possible to get a unique tuple, but we try. |
| 288 | * At worst (or if we race), we will end up with a final duplicate in |
| 289 | * __ip_conntrack_confirm and drop the packet. */ |
| 290 | static void |
| 291 | get_unique_tuple(struct nf_conntrack_tuple *tuple, |
| 292 | const struct nf_conntrack_tuple *orig_tuple, |
| 293 | const struct nf_nat_range *range, |
| 294 | struct nf_conn *ct, |
| 295 | enum nf_nat_manip_type maniptype) |
| 296 | { |
| 297 | const struct nf_nat_l3proto *l3proto; |
| 298 | const struct nf_nat_l4proto *l4proto; |
| 299 | struct net *net = nf_ct_net(ct); |
| 300 | u16 zone = nf_ct_zone(ct); |
| 301 | |
| 302 | rcu_read_lock(); |
| 303 | l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num); |
| 304 | l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num, |
| 305 | orig_tuple->dst.protonum); |
| 306 | |
| 307 | /* 1) If this srcip/proto/src-proto-part is currently mapped, |
| 308 | * and that same mapping gives a unique tuple within the given |
| 309 | * range, use that. |
| 310 | * |
| 311 | * This is only required for source (ie. NAT/masq) mappings. |
| 312 | * So far, we don't do local source mappings, so multiple |
| 313 | * manips not an issue. |
| 314 | */ |
| 315 | if (maniptype == NF_NAT_MANIP_SRC && |
| 316 | !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) { |
| 317 | /* try the original tuple first */ |
| 318 | if (in_range(l3proto, l4proto, orig_tuple, range)) { |
| 319 | if (!nf_nat_used_tuple(orig_tuple, ct)) { |
| 320 | *tuple = *orig_tuple; |
| 321 | goto out; |
| 322 | } |
| 323 | } else if (find_appropriate_src(net, zone, l3proto, l4proto, |
| 324 | orig_tuple, tuple, range)) { |
| 325 | pr_debug("get_unique_tuple: Found current src map\n"); |
| 326 | if (!nf_nat_used_tuple(tuple, ct)) |
| 327 | goto out; |
| 328 | } |
| 329 | } |
| 330 | |
| 331 | /* 2) Select the least-used IP/proto combination in the given range */ |
| 332 | *tuple = *orig_tuple; |
| 333 | find_best_ips_proto(zone, tuple, range, ct, maniptype); |
| 334 | |
| 335 | /* 3) The per-protocol part of the manip is made to map into |
| 336 | * the range to make a unique tuple. |
| 337 | */ |
| 338 | |
| 339 | /* Only bother mapping if it's not already in range and unique */ |
| 340 | if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) { |
| 341 | if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { |
| 342 | if (l4proto->in_range(tuple, maniptype, |
| 343 | &range->min_proto, |
| 344 | &range->max_proto) && |
| 345 | (range->min_proto.all == range->max_proto.all || |
| 346 | !nf_nat_used_tuple(tuple, ct))) |
| 347 | goto out; |
| 348 | } else if (!nf_nat_used_tuple(tuple, ct)) { |
| 349 | goto out; |
| 350 | } |
| 351 | } |
| 352 | |
| 353 | /* Last change: get protocol to try to obtain unique tuple. */ |
| 354 | l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct); |
| 355 | out: |
| 356 | rcu_read_unlock(); |
| 357 | } |
| 358 | |
| 359 | unsigned int |
| 360 | nf_nat_setup_info(struct nf_conn *ct, |
| 361 | const struct nf_nat_range *range, |
| 362 | enum nf_nat_manip_type maniptype) |
| 363 | { |
| 364 | struct net *net = nf_ct_net(ct); |
| 365 | struct nf_conntrack_tuple curr_tuple, new_tuple; |
| 366 | struct nf_conn_nat *nat; |
| 367 | |
| 368 | /* nat helper or nfctnetlink also setup binding */ |
| 369 | nat = nfct_nat(ct); |
| 370 | if (!nat) { |
| 371 | nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); |
| 372 | if (nat == NULL) { |
| 373 | pr_debug("failed to add NAT extension\n"); |
| 374 | return NF_ACCEPT; |
| 375 | } |
| 376 | } |
| 377 | |
| 378 | NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || |
| 379 | maniptype == NF_NAT_MANIP_DST); |
| 380 | BUG_ON(nf_nat_initialized(ct, maniptype)); |
| 381 | |
| 382 | /* What we've got will look like inverse of reply. Normally |
| 383 | * this is what is in the conntrack, except for prior |
| 384 | * manipulations (future optimization: if num_manips == 0, |
| 385 | * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) |
| 386 | */ |
| 387 | nf_ct_invert_tuplepr(&curr_tuple, |
| 388 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); |
| 389 | |
| 390 | get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); |
| 391 | |
| 392 | if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { |
| 393 | struct nf_conntrack_tuple reply; |
| 394 | |
| 395 | /* Alter conntrack table so will recognize replies. */ |
| 396 | nf_ct_invert_tuplepr(&reply, &new_tuple); |
| 397 | nf_conntrack_alter_reply(ct, &reply); |
| 398 | |
| 399 | /* Non-atomic: we own this at the moment. */ |
| 400 | if (maniptype == NF_NAT_MANIP_SRC) |
| 401 | ct->status |= IPS_SRC_NAT; |
| 402 | else |
| 403 | ct->status |= IPS_DST_NAT; |
| 404 | } |
| 405 | |
| 406 | if (maniptype == NF_NAT_MANIP_SRC) { |
| 407 | unsigned int srchash; |
| 408 | |
| 409 | srchash = hash_by_src(net, nf_ct_zone(ct), |
| 410 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
| 411 | spin_lock_bh(&nf_nat_lock); |
| 412 | /* nf_conntrack_alter_reply might re-allocate extension aera */ |
| 413 | nat = nfct_nat(ct); |
| 414 | nat->ct = ct; |
| 415 | hlist_add_head_rcu(&nat->bysource, |
| 416 | &net->ct.nat_bysource[srchash]); |
| 417 | spin_unlock_bh(&nf_nat_lock); |
| 418 | } |
| 419 | |
| 420 | /* It's done. */ |
| 421 | if (maniptype == NF_NAT_MANIP_DST) |
| 422 | ct->status |= IPS_DST_NAT_DONE; |
| 423 | else |
| 424 | ct->status |= IPS_SRC_NAT_DONE; |
| 425 | |
| 426 | return NF_ACCEPT; |
| 427 | } |
| 428 | EXPORT_SYMBOL(nf_nat_setup_info); |
| 429 | |
| 430 | /* Do packet manipulations according to nf_nat_setup_info. */ |
| 431 | unsigned int nf_nat_packet(struct nf_conn *ct, |
| 432 | enum ip_conntrack_info ctinfo, |
| 433 | unsigned int hooknum, |
| 434 | struct sk_buff *skb) |
| 435 | { |
| 436 | const struct nf_nat_l3proto *l3proto; |
| 437 | const struct nf_nat_l4proto *l4proto; |
| 438 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
| 439 | unsigned long statusbit; |
| 440 | enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); |
| 441 | |
| 442 | if (mtype == NF_NAT_MANIP_SRC) |
| 443 | statusbit = IPS_SRC_NAT; |
| 444 | else |
| 445 | statusbit = IPS_DST_NAT; |
| 446 | |
| 447 | /* Invert if this is reply dir. */ |
| 448 | if (dir == IP_CT_DIR_REPLY) |
| 449 | statusbit ^= IPS_NAT_MASK; |
| 450 | |
| 451 | /* Non-atomic: these bits don't change. */ |
| 452 | if (ct->status & statusbit) { |
| 453 | struct nf_conntrack_tuple target; |
| 454 | |
| 455 | /* We are aiming to look like inverse of other direction. */ |
| 456 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
| 457 | |
| 458 | l3proto = __nf_nat_l3proto_find(target.src.l3num); |
| 459 | l4proto = __nf_nat_l4proto_find(target.src.l3num, |
| 460 | target.dst.protonum); |
| 461 | if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype)) |
| 462 | return NF_DROP; |
| 463 | } |
| 464 | return NF_ACCEPT; |
| 465 | } |
| 466 | EXPORT_SYMBOL_GPL(nf_nat_packet); |
| 467 | |
| 468 | struct nf_nat_proto_clean { |
| 469 | u8 l3proto; |
| 470 | u8 l4proto; |
| 471 | bool hash; |
| 472 | }; |
| 473 | |
| 474 | /* Clear NAT section of all conntracks, in case we're loaded again. */ |
| 475 | static int nf_nat_proto_clean(struct nf_conn *i, void *data) |
| 476 | { |
| 477 | const struct nf_nat_proto_clean *clean = data; |
| 478 | struct nf_conn_nat *nat = nfct_nat(i); |
| 479 | |
| 480 | if (!nat) |
| 481 | return 0; |
Patrick McHardy | b0cdb1d | 2012-09-19 20:57:04 +0000 | [diff] [blame] | 482 | if (!(i->status & IPS_SRC_NAT_DONE)) |
| 483 | return 0; |
Patrick McHardy | c7232c9 | 2012-08-26 19:14:06 +0200 | [diff] [blame] | 484 | if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || |
| 485 | (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) |
| 486 | return 0; |
| 487 | |
| 488 | if (clean->hash) { |
| 489 | spin_lock_bh(&nf_nat_lock); |
| 490 | hlist_del_rcu(&nat->bysource); |
| 491 | spin_unlock_bh(&nf_nat_lock); |
| 492 | } else { |
| 493 | memset(nat, 0, sizeof(*nat)); |
| 494 | i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | |
| 495 | IPS_SEQ_ADJUST); |
| 496 | } |
| 497 | return 0; |
| 498 | } |
| 499 | |
| 500 | static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) |
| 501 | { |
| 502 | struct nf_nat_proto_clean clean = { |
| 503 | .l3proto = l3proto, |
| 504 | .l4proto = l4proto, |
| 505 | }; |
| 506 | struct net *net; |
| 507 | |
| 508 | rtnl_lock(); |
| 509 | /* Step 1 - remove from bysource hash */ |
| 510 | clean.hash = true; |
| 511 | for_each_net(net) |
| 512 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); |
| 513 | synchronize_rcu(); |
| 514 | |
| 515 | /* Step 2 - clean NAT section */ |
| 516 | clean.hash = false; |
| 517 | for_each_net(net) |
| 518 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); |
| 519 | rtnl_unlock(); |
| 520 | } |
| 521 | |
| 522 | static void nf_nat_l3proto_clean(u8 l3proto) |
| 523 | { |
| 524 | struct nf_nat_proto_clean clean = { |
| 525 | .l3proto = l3proto, |
| 526 | }; |
| 527 | struct net *net; |
| 528 | |
| 529 | rtnl_lock(); |
| 530 | /* Step 1 - remove from bysource hash */ |
| 531 | clean.hash = true; |
| 532 | for_each_net(net) |
| 533 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); |
| 534 | synchronize_rcu(); |
| 535 | |
| 536 | /* Step 2 - clean NAT section */ |
| 537 | clean.hash = false; |
| 538 | for_each_net(net) |
| 539 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); |
| 540 | rtnl_unlock(); |
| 541 | } |
| 542 | |
| 543 | /* Protocol registration. */ |
| 544 | int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto) |
| 545 | { |
| 546 | const struct nf_nat_l4proto **l4protos; |
| 547 | unsigned int i; |
| 548 | int ret = 0; |
| 549 | |
| 550 | mutex_lock(&nf_nat_proto_mutex); |
| 551 | if (nf_nat_l4protos[l3proto] == NULL) { |
| 552 | l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *), |
| 553 | GFP_KERNEL); |
| 554 | if (l4protos == NULL) { |
| 555 | ret = -ENOMEM; |
| 556 | goto out; |
| 557 | } |
| 558 | |
| 559 | for (i = 0; i < IPPROTO_MAX; i++) |
| 560 | RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown); |
| 561 | |
| 562 | /* Before making proto_array visible to lockless readers, |
| 563 | * we must make sure its content is committed to memory. |
| 564 | */ |
| 565 | smp_wmb(); |
| 566 | |
| 567 | nf_nat_l4protos[l3proto] = l4protos; |
| 568 | } |
| 569 | |
| 570 | if (rcu_dereference_protected( |
| 571 | nf_nat_l4protos[l3proto][l4proto->l4proto], |
| 572 | lockdep_is_held(&nf_nat_proto_mutex) |
| 573 | ) != &nf_nat_l4proto_unknown) { |
| 574 | ret = -EBUSY; |
| 575 | goto out; |
| 576 | } |
| 577 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto); |
| 578 | out: |
| 579 | mutex_unlock(&nf_nat_proto_mutex); |
| 580 | return ret; |
| 581 | } |
| 582 | EXPORT_SYMBOL_GPL(nf_nat_l4proto_register); |
| 583 | |
| 584 | /* No one stores the protocol anywhere; simply delete it. */ |
| 585 | void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto) |
| 586 | { |
| 587 | mutex_lock(&nf_nat_proto_mutex); |
| 588 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], |
| 589 | &nf_nat_l4proto_unknown); |
| 590 | mutex_unlock(&nf_nat_proto_mutex); |
| 591 | synchronize_rcu(); |
| 592 | |
| 593 | nf_nat_l4proto_clean(l3proto, l4proto->l4proto); |
| 594 | } |
| 595 | EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister); |
| 596 | |
| 597 | int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto) |
| 598 | { |
| 599 | int err; |
| 600 | |
| 601 | err = nf_ct_l3proto_try_module_get(l3proto->l3proto); |
| 602 | if (err < 0) |
| 603 | return err; |
| 604 | |
| 605 | mutex_lock(&nf_nat_proto_mutex); |
| 606 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP], |
| 607 | &nf_nat_l4proto_tcp); |
| 608 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP], |
| 609 | &nf_nat_l4proto_udp); |
| 610 | mutex_unlock(&nf_nat_proto_mutex); |
| 611 | |
| 612 | RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto); |
| 613 | return 0; |
| 614 | } |
| 615 | EXPORT_SYMBOL_GPL(nf_nat_l3proto_register); |
| 616 | |
| 617 | void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto) |
| 618 | { |
| 619 | mutex_lock(&nf_nat_proto_mutex); |
| 620 | RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL); |
| 621 | mutex_unlock(&nf_nat_proto_mutex); |
| 622 | synchronize_rcu(); |
| 623 | |
| 624 | nf_nat_l3proto_clean(l3proto->l3proto); |
| 625 | nf_ct_l3proto_module_put(l3proto->l3proto); |
| 626 | } |
| 627 | EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister); |
| 628 | |
| 629 | /* No one using conntrack by the time this called. */ |
| 630 | static void nf_nat_cleanup_conntrack(struct nf_conn *ct) |
| 631 | { |
| 632 | struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); |
| 633 | |
| 634 | if (nat == NULL || nat->ct == NULL) |
| 635 | return; |
| 636 | |
| 637 | NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE); |
| 638 | |
| 639 | spin_lock_bh(&nf_nat_lock); |
| 640 | hlist_del_rcu(&nat->bysource); |
| 641 | spin_unlock_bh(&nf_nat_lock); |
| 642 | } |
| 643 | |
| 644 | static void nf_nat_move_storage(void *new, void *old) |
| 645 | { |
| 646 | struct nf_conn_nat *new_nat = new; |
| 647 | struct nf_conn_nat *old_nat = old; |
| 648 | struct nf_conn *ct = old_nat->ct; |
| 649 | |
| 650 | if (!ct || !(ct->status & IPS_SRC_NAT_DONE)) |
| 651 | return; |
| 652 | |
| 653 | spin_lock_bh(&nf_nat_lock); |
| 654 | hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); |
| 655 | spin_unlock_bh(&nf_nat_lock); |
| 656 | } |
| 657 | |
| 658 | static struct nf_ct_ext_type nat_extend __read_mostly = { |
| 659 | .len = sizeof(struct nf_conn_nat), |
| 660 | .align = __alignof__(struct nf_conn_nat), |
| 661 | .destroy = nf_nat_cleanup_conntrack, |
| 662 | .move = nf_nat_move_storage, |
| 663 | .id = NF_CT_EXT_NAT, |
| 664 | .flags = NF_CT_EXT_F_PREALLOC, |
| 665 | }; |
| 666 | |
| 667 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
| 668 | |
| 669 | #include <linux/netfilter/nfnetlink.h> |
| 670 | #include <linux/netfilter/nfnetlink_conntrack.h> |
| 671 | |
| 672 | static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { |
| 673 | [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, |
| 674 | [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, |
| 675 | }; |
| 676 | |
| 677 | static int nfnetlink_parse_nat_proto(struct nlattr *attr, |
| 678 | const struct nf_conn *ct, |
| 679 | struct nf_nat_range *range) |
| 680 | { |
| 681 | struct nlattr *tb[CTA_PROTONAT_MAX+1]; |
| 682 | const struct nf_nat_l4proto *l4proto; |
| 683 | int err; |
| 684 | |
| 685 | err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy); |
| 686 | if (err < 0) |
| 687 | return err; |
| 688 | |
| 689 | l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); |
| 690 | if (l4proto->nlattr_to_range) |
| 691 | err = l4proto->nlattr_to_range(tb, range); |
| 692 | |
| 693 | return err; |
| 694 | } |
| 695 | |
| 696 | static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { |
| 697 | [CTA_NAT_V4_MINIP] = { .type = NLA_U32 }, |
| 698 | [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 }, |
Patrick McHardy | 58a317f | 2012-08-26 19:14:12 +0200 | [diff] [blame] | 699 | [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) }, |
| 700 | [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) }, |
Patrick McHardy | c7232c9 | 2012-08-26 19:14:06 +0200 | [diff] [blame] | 701 | [CTA_NAT_PROTO] = { .type = NLA_NESTED }, |
| 702 | }; |
| 703 | |
| 704 | static int |
| 705 | nfnetlink_parse_nat(const struct nlattr *nat, |
| 706 | const struct nf_conn *ct, struct nf_nat_range *range) |
| 707 | { |
| 708 | const struct nf_nat_l3proto *l3proto; |
| 709 | struct nlattr *tb[CTA_NAT_MAX+1]; |
| 710 | int err; |
| 711 | |
| 712 | memset(range, 0, sizeof(*range)); |
| 713 | |
| 714 | err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy); |
| 715 | if (err < 0) |
| 716 | return err; |
| 717 | |
| 718 | rcu_read_lock(); |
| 719 | l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); |
| 720 | if (l3proto == NULL) { |
| 721 | err = -EAGAIN; |
| 722 | goto out; |
| 723 | } |
| 724 | err = l3proto->nlattr_to_range(tb, range); |
| 725 | if (err < 0) |
| 726 | goto out; |
| 727 | |
| 728 | if (!tb[CTA_NAT_PROTO]) |
| 729 | goto out; |
| 730 | |
| 731 | err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); |
| 732 | out: |
| 733 | rcu_read_unlock(); |
| 734 | return err; |
| 735 | } |
| 736 | |
| 737 | static int |
| 738 | nfnetlink_parse_nat_setup(struct nf_conn *ct, |
| 739 | enum nf_nat_manip_type manip, |
| 740 | const struct nlattr *attr) |
| 741 | { |
| 742 | struct nf_nat_range range; |
| 743 | int err; |
| 744 | |
| 745 | err = nfnetlink_parse_nat(attr, ct, &range); |
| 746 | if (err < 0) |
| 747 | return err; |
| 748 | if (nf_nat_initialized(ct, manip)) |
| 749 | return -EEXIST; |
| 750 | |
| 751 | return nf_nat_setup_info(ct, &range, manip); |
| 752 | } |
| 753 | #else |
| 754 | static int |
| 755 | nfnetlink_parse_nat_setup(struct nf_conn *ct, |
| 756 | enum nf_nat_manip_type manip, |
| 757 | const struct nlattr *attr) |
| 758 | { |
| 759 | return -EOPNOTSUPP; |
| 760 | } |
| 761 | #endif |
| 762 | |
| 763 | static int __net_init nf_nat_net_init(struct net *net) |
| 764 | { |
| 765 | /* Leave them the same for the moment. */ |
| 766 | net->ct.nat_htable_size = net->ct.htable_size; |
| 767 | net->ct.nat_bysource = nf_ct_alloc_hashtable(&net->ct.nat_htable_size, 0); |
| 768 | if (!net->ct.nat_bysource) |
| 769 | return -ENOMEM; |
| 770 | return 0; |
| 771 | } |
| 772 | |
| 773 | static void __net_exit nf_nat_net_exit(struct net *net) |
| 774 | { |
| 775 | struct nf_nat_proto_clean clean = {}; |
| 776 | |
| 777 | nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean); |
| 778 | synchronize_rcu(); |
| 779 | nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); |
| 780 | } |
| 781 | |
| 782 | static struct pernet_operations nf_nat_net_ops = { |
| 783 | .init = nf_nat_net_init, |
| 784 | .exit = nf_nat_net_exit, |
| 785 | }; |
| 786 | |
| 787 | static struct nf_ct_helper_expectfn follow_master_nat = { |
| 788 | .name = "nat-follow-master", |
| 789 | .expectfn = nf_nat_follow_master, |
| 790 | }; |
| 791 | |
| 792 | static struct nfq_ct_nat_hook nfq_ct_nat = { |
| 793 | .seq_adjust = nf_nat_tcp_seq_adjust, |
| 794 | }; |
| 795 | |
| 796 | static int __init nf_nat_init(void) |
| 797 | { |
| 798 | int ret; |
| 799 | |
| 800 | ret = nf_ct_extend_register(&nat_extend); |
| 801 | if (ret < 0) { |
| 802 | printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); |
| 803 | return ret; |
| 804 | } |
| 805 | |
| 806 | ret = register_pernet_subsys(&nf_nat_net_ops); |
| 807 | if (ret < 0) |
| 808 | goto cleanup_extend; |
| 809 | |
| 810 | nf_ct_helper_expectfn_register(&follow_master_nat); |
| 811 | |
| 812 | /* Initialize fake conntrack so that NAT will skip it */ |
| 813 | nf_ct_untracked_status_or(IPS_NAT_DONE_MASK); |
| 814 | |
| 815 | BUG_ON(nf_nat_seq_adjust_hook != NULL); |
| 816 | RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust); |
| 817 | BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); |
| 818 | RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, |
| 819 | nfnetlink_parse_nat_setup); |
| 820 | BUG_ON(nf_ct_nat_offset != NULL); |
| 821 | RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset); |
| 822 | RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat); |
| 823 | #ifdef CONFIG_XFRM |
| 824 | BUG_ON(nf_nat_decode_session_hook != NULL); |
| 825 | RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session); |
| 826 | #endif |
| 827 | return 0; |
| 828 | |
| 829 | cleanup_extend: |
| 830 | nf_ct_extend_unregister(&nat_extend); |
| 831 | return ret; |
| 832 | } |
| 833 | |
| 834 | static void __exit nf_nat_cleanup(void) |
| 835 | { |
| 836 | unsigned int i; |
| 837 | |
| 838 | unregister_pernet_subsys(&nf_nat_net_ops); |
| 839 | nf_ct_extend_unregister(&nat_extend); |
| 840 | nf_ct_helper_expectfn_unregister(&follow_master_nat); |
| 841 | RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL); |
| 842 | RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); |
| 843 | RCU_INIT_POINTER(nf_ct_nat_offset, NULL); |
| 844 | RCU_INIT_POINTER(nfq_ct_nat_hook, NULL); |
| 845 | #ifdef CONFIG_XFRM |
| 846 | RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); |
| 847 | #endif |
| 848 | for (i = 0; i < NFPROTO_NUMPROTO; i++) |
| 849 | kfree(nf_nat_l4protos[i]); |
| 850 | synchronize_net(); |
| 851 | } |
| 852 | |
| 853 | MODULE_LICENSE("GPL"); |
| 854 | |
| 855 | module_init(nf_nat_init); |
| 856 | module_exit(nf_nat_cleanup); |