Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 1 | /* NAT for netfilter; shared with compatibility layer. */ |
| 2 | |
| 3 | /* (C) 1999-2001 Paul `Rusty' Russell |
| 4 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/timer.h> |
| 14 | #include <linux/skbuff.h> |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 15 | #include <net/checksum.h> |
| 16 | #include <net/icmp.h> |
| 17 | #include <net/ip.h> |
| 18 | #include <net/tcp.h> /* For tcp_prot in getorigdst */ |
| 19 | #include <linux/icmp.h> |
| 20 | #include <linux/udp.h> |
| 21 | #include <linux/jhash.h> |
| 22 | |
| 23 | #include <linux/netfilter_ipv4.h> |
| 24 | #include <net/netfilter/nf_conntrack.h> |
| 25 | #include <net/netfilter/nf_conntrack_core.h> |
| 26 | #include <net/netfilter/nf_nat.h> |
| 27 | #include <net/netfilter/nf_nat_protocol.h> |
| 28 | #include <net/netfilter/nf_nat_core.h> |
| 29 | #include <net/netfilter/nf_nat_helper.h> |
| 30 | #include <net/netfilter/nf_conntrack_helper.h> |
| 31 | #include <net/netfilter/nf_conntrack_l3proto.h> |
| 32 | #include <net/netfilter/nf_conntrack_l4proto.h> |
| 33 | |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 34 | static DEFINE_RWLOCK(nf_nat_lock); |
| 35 | |
| 36 | static struct nf_conntrack_l3proto *l3proto = NULL; |
| 37 | |
| 38 | /* Calculated at init based on memory size */ |
| 39 | static unsigned int nf_nat_htable_size; |
Patrick McHardy | 53aba59 | 2007-07-07 22:30:27 -0700 | [diff] [blame] | 40 | static int nf_nat_vmalloced; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 41 | |
Patrick McHardy | 53aba59 | 2007-07-07 22:30:27 -0700 | [diff] [blame] | 42 | static struct hlist_head *bysource; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 43 | |
| 44 | #define MAX_IP_NAT_PROTO 256 |
| 45 | static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]; |
| 46 | |
| 47 | static inline struct nf_nat_protocol * |
| 48 | __nf_nat_proto_find(u_int8_t protonum) |
| 49 | { |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 50 | return rcu_dereference(nf_nat_protos[protonum]); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 51 | } |
| 52 | |
| 53 | struct nf_nat_protocol * |
| 54 | nf_nat_proto_find_get(u_int8_t protonum) |
| 55 | { |
| 56 | struct nf_nat_protocol *p; |
| 57 | |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 58 | rcu_read_lock(); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 59 | p = __nf_nat_proto_find(protonum); |
| 60 | if (!try_module_get(p->me)) |
| 61 | p = &nf_nat_unknown_protocol; |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 62 | rcu_read_unlock(); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 63 | |
| 64 | return p; |
| 65 | } |
| 66 | EXPORT_SYMBOL_GPL(nf_nat_proto_find_get); |
| 67 | |
| 68 | void |
| 69 | nf_nat_proto_put(struct nf_nat_protocol *p) |
| 70 | { |
| 71 | module_put(p->me); |
| 72 | } |
| 73 | EXPORT_SYMBOL_GPL(nf_nat_proto_put); |
| 74 | |
| 75 | /* We keep an extra hash for each conntrack, for fast searching. */ |
| 76 | static inline unsigned int |
| 77 | hash_by_src(const struct nf_conntrack_tuple *tuple) |
| 78 | { |
| 79 | /* Original src, to ensure we map it consistently if poss. */ |
Al Viro | a34c458 | 2007-07-26 17:33:19 +0100 | [diff] [blame] | 80 | return jhash_3words((__force u32)tuple->src.u3.ip, |
| 81 | (__force u32)tuple->src.u.all, |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 82 | tuple->dst.protonum, 0) % nf_nat_htable_size; |
| 83 | } |
| 84 | |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 85 | /* Is this tuple already taken? (not by us) */ |
| 86 | int |
| 87 | nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, |
| 88 | const struct nf_conn *ignored_conntrack) |
| 89 | { |
| 90 | /* Conntrack tracking doesn't keep track of outgoing tuples; only |
| 91 | incoming ones. NAT means they don't have a fixed mapping, |
| 92 | so we invert the tuple and look for the incoming reply. |
| 93 | |
| 94 | We could keep a separate hash if this proves too slow. */ |
| 95 | struct nf_conntrack_tuple reply; |
| 96 | |
| 97 | nf_ct_invert_tuplepr(&reply, tuple); |
| 98 | return nf_conntrack_tuple_taken(&reply, ignored_conntrack); |
| 99 | } |
| 100 | EXPORT_SYMBOL(nf_nat_used_tuple); |
| 101 | |
| 102 | /* If we source map this tuple so reply looks like reply_tuple, will |
| 103 | * that meet the constraints of range. */ |
| 104 | static int |
| 105 | in_range(const struct nf_conntrack_tuple *tuple, |
| 106 | const struct nf_nat_range *range) |
| 107 | { |
| 108 | struct nf_nat_protocol *proto; |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 109 | int ret = 0; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 110 | |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 111 | /* If we are supposed to map IPs, then we must be in the |
| 112 | range specified, otherwise let this drag us onto a new src IP. */ |
| 113 | if (range->flags & IP_NAT_RANGE_MAP_IPS) { |
| 114 | if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) || |
| 115 | ntohl(tuple->src.u3.ip) > ntohl(range->max_ip)) |
| 116 | return 0; |
| 117 | } |
| 118 | |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 119 | rcu_read_lock(); |
| 120 | proto = __nf_nat_proto_find(tuple->dst.protonum); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 121 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || |
| 122 | proto->in_range(tuple, IP_NAT_MANIP_SRC, |
| 123 | &range->min, &range->max)) |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 124 | ret = 1; |
| 125 | rcu_read_unlock(); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 126 | |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 127 | return ret; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | static inline int |
| 131 | same_src(const struct nf_conn *ct, |
| 132 | const struct nf_conntrack_tuple *tuple) |
| 133 | { |
| 134 | const struct nf_conntrack_tuple *t; |
| 135 | |
| 136 | t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; |
| 137 | return (t->dst.protonum == tuple->dst.protonum && |
| 138 | t->src.u3.ip == tuple->src.u3.ip && |
| 139 | t->src.u.all == tuple->src.u.all); |
| 140 | } |
| 141 | |
| 142 | /* Only called for SRC manip */ |
| 143 | static int |
| 144 | find_appropriate_src(const struct nf_conntrack_tuple *tuple, |
| 145 | struct nf_conntrack_tuple *result, |
| 146 | const struct nf_nat_range *range) |
| 147 | { |
| 148 | unsigned int h = hash_by_src(tuple); |
| 149 | struct nf_conn_nat *nat; |
| 150 | struct nf_conn *ct; |
Patrick McHardy | 53aba59 | 2007-07-07 22:30:27 -0700 | [diff] [blame] | 151 | struct hlist_node *n; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 152 | |
| 153 | read_lock_bh(&nf_nat_lock); |
Patrick McHardy | 53aba59 | 2007-07-07 22:30:27 -0700 | [diff] [blame] | 154 | hlist_for_each_entry(nat, n, &bysource[h], bysource) { |
Yasuyuki Kozakai | b6b84d4 | 2007-07-07 22:26:35 -0700 | [diff] [blame] | 155 | ct = nat->ct; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 156 | if (same_src(ct, tuple)) { |
| 157 | /* Copy source part from reply tuple. */ |
| 158 | nf_ct_invert_tuplepr(result, |
| 159 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); |
| 160 | result->dst = tuple->dst; |
| 161 | |
| 162 | if (in_range(result, range)) { |
| 163 | read_unlock_bh(&nf_nat_lock); |
| 164 | return 1; |
| 165 | } |
| 166 | } |
| 167 | } |
| 168 | read_unlock_bh(&nf_nat_lock); |
| 169 | return 0; |
| 170 | } |
| 171 | |
| 172 | /* For [FUTURE] fragmentation handling, we want the least-used |
| 173 | src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus |
| 174 | if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports |
| 175 | 1-65535, we don't do pro-rata allocation based on ports; we choose |
| 176 | the ip with the lowest src-ip/dst-ip/proto usage. |
| 177 | */ |
| 178 | static void |
| 179 | find_best_ips_proto(struct nf_conntrack_tuple *tuple, |
| 180 | const struct nf_nat_range *range, |
| 181 | const struct nf_conn *ct, |
| 182 | enum nf_nat_manip_type maniptype) |
| 183 | { |
| 184 | __be32 *var_ipp; |
| 185 | /* Host order */ |
| 186 | u_int32_t minip, maxip, j; |
| 187 | |
| 188 | /* No IP mapping? Do nothing. */ |
| 189 | if (!(range->flags & IP_NAT_RANGE_MAP_IPS)) |
| 190 | return; |
| 191 | |
| 192 | if (maniptype == IP_NAT_MANIP_SRC) |
| 193 | var_ipp = &tuple->src.u3.ip; |
| 194 | else |
| 195 | var_ipp = &tuple->dst.u3.ip; |
| 196 | |
| 197 | /* Fast path: only one choice. */ |
| 198 | if (range->min_ip == range->max_ip) { |
| 199 | *var_ipp = range->min_ip; |
| 200 | return; |
| 201 | } |
| 202 | |
| 203 | /* Hashing source and destination IPs gives a fairly even |
| 204 | * spread in practice (if there are a small number of IPs |
| 205 | * involved, there usually aren't that many connections |
| 206 | * anyway). The consistency means that servers see the same |
| 207 | * client coming from the same IP (some Internet Banking sites |
| 208 | * like this), even across reboots. */ |
| 209 | minip = ntohl(range->min_ip); |
| 210 | maxip = ntohl(range->max_ip); |
| 211 | j = jhash_2words((__force u32)tuple->src.u3.ip, |
| 212 | (__force u32)tuple->dst.u3.ip, 0); |
| 213 | *var_ipp = htonl(minip + j % (maxip - minip + 1)); |
| 214 | } |
| 215 | |
| 216 | /* Manipulate the tuple into the range given. For NF_IP_POST_ROUTING, |
| 217 | * we change the source to map into the range. For NF_IP_PRE_ROUTING |
| 218 | * and NF_IP_LOCAL_OUT, we change the destination to map into the |
| 219 | * range. It might not be possible to get a unique tuple, but we try. |
| 220 | * At worst (or if we race), we will end up with a final duplicate in |
| 221 | * __ip_conntrack_confirm and drop the packet. */ |
| 222 | static void |
| 223 | get_unique_tuple(struct nf_conntrack_tuple *tuple, |
| 224 | const struct nf_conntrack_tuple *orig_tuple, |
| 225 | const struct nf_nat_range *range, |
| 226 | struct nf_conn *ct, |
| 227 | enum nf_nat_manip_type maniptype) |
| 228 | { |
| 229 | struct nf_nat_protocol *proto; |
| 230 | |
| 231 | /* 1) If this srcip/proto/src-proto-part is currently mapped, |
| 232 | and that same mapping gives a unique tuple within the given |
| 233 | range, use that. |
| 234 | |
| 235 | This is only required for source (ie. NAT/masq) mappings. |
| 236 | So far, we don't do local source mappings, so multiple |
| 237 | manips not an issue. */ |
| 238 | if (maniptype == IP_NAT_MANIP_SRC) { |
| 239 | if (find_appropriate_src(orig_tuple, tuple, range)) { |
Patrick McHardy | 0d53778 | 2007-07-07 22:39:38 -0700 | [diff] [blame] | 240 | pr_debug("get_unique_tuple: Found current src map\n"); |
Eric Leblond | 41f4689 | 2007-02-07 15:10:09 -0800 | [diff] [blame] | 241 | if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) |
| 242 | if (!nf_nat_used_tuple(tuple, ct)) |
| 243 | return; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 244 | } |
| 245 | } |
| 246 | |
| 247 | /* 2) Select the least-used IP/proto combination in the given |
| 248 | range. */ |
| 249 | *tuple = *orig_tuple; |
| 250 | find_best_ips_proto(tuple, range, ct, maniptype); |
| 251 | |
| 252 | /* 3) The per-protocol part of the manip is made to map into |
| 253 | the range to make a unique tuple. */ |
| 254 | |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 255 | rcu_read_lock(); |
| 256 | proto = __nf_nat_proto_find(orig_tuple->dst.protonum); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 257 | |
Eric Leblond | 41f4689 | 2007-02-07 15:10:09 -0800 | [diff] [blame] | 258 | /* Change protocol info to have some randomization */ |
| 259 | if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) { |
| 260 | proto->unique_tuple(tuple, range, maniptype, ct); |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 261 | goto out; |
Eric Leblond | 41f4689 | 2007-02-07 15:10:09 -0800 | [diff] [blame] | 262 | } |
| 263 | |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 264 | /* Only bother mapping if it's not already in range and unique */ |
| 265 | if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || |
| 266 | proto->in_range(tuple, maniptype, &range->min, &range->max)) && |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 267 | !nf_nat_used_tuple(tuple, ct)) |
| 268 | goto out; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 269 | |
| 270 | /* Last change: get protocol to try to obtain unique tuple. */ |
| 271 | proto->unique_tuple(tuple, range, maniptype, ct); |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 272 | out: |
| 273 | rcu_read_unlock(); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | unsigned int |
| 277 | nf_nat_setup_info(struct nf_conn *ct, |
| 278 | const struct nf_nat_range *range, |
| 279 | unsigned int hooknum) |
| 280 | { |
| 281 | struct nf_conntrack_tuple curr_tuple, new_tuple; |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 282 | struct nf_conn_nat *nat; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 283 | int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK); |
| 284 | enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); |
| 285 | |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 286 | /* nat helper or nfctnetlink also setup binding */ |
| 287 | nat = nfct_nat(ct); |
| 288 | if (!nat) { |
| 289 | nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); |
| 290 | if (nat == NULL) { |
Patrick McHardy | 0d53778 | 2007-07-07 22:39:38 -0700 | [diff] [blame] | 291 | pr_debug("failed to add NAT extension\n"); |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 292 | return NF_ACCEPT; |
| 293 | } |
| 294 | } |
| 295 | |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 296 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || |
| 297 | hooknum == NF_IP_POST_ROUTING || |
| 298 | hooknum == NF_IP_LOCAL_IN || |
| 299 | hooknum == NF_IP_LOCAL_OUT); |
| 300 | BUG_ON(nf_nat_initialized(ct, maniptype)); |
| 301 | |
| 302 | /* What we've got will look like inverse of reply. Normally |
| 303 | this is what is in the conntrack, except for prior |
| 304 | manipulations (future optimization: if num_manips == 0, |
| 305 | orig_tp = |
| 306 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */ |
| 307 | nf_ct_invert_tuplepr(&curr_tuple, |
| 308 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); |
| 309 | |
| 310 | get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); |
| 311 | |
| 312 | if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { |
| 313 | struct nf_conntrack_tuple reply; |
| 314 | |
| 315 | /* Alter conntrack table so will recognize replies. */ |
| 316 | nf_ct_invert_tuplepr(&reply, &new_tuple); |
| 317 | nf_conntrack_alter_reply(ct, &reply); |
| 318 | |
| 319 | /* Non-atomic: we own this at the moment. */ |
| 320 | if (maniptype == IP_NAT_MANIP_SRC) |
| 321 | ct->status |= IPS_SRC_NAT; |
| 322 | else |
| 323 | ct->status |= IPS_DST_NAT; |
| 324 | } |
| 325 | |
| 326 | /* Place in source hash if this is the first time. */ |
| 327 | if (have_to_hash) { |
| 328 | unsigned int srchash; |
| 329 | |
| 330 | srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
| 331 | write_lock_bh(&nf_nat_lock); |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 332 | /* nf_conntrack_alter_reply might re-allocate exntension aera */ |
Yasuyuki Kozakai | b6b84d4 | 2007-07-07 22:26:35 -0700 | [diff] [blame] | 333 | nat = nfct_nat(ct); |
| 334 | nat->ct = ct; |
Patrick McHardy | 53aba59 | 2007-07-07 22:30:27 -0700 | [diff] [blame] | 335 | hlist_add_head(&nat->bysource, &bysource[srchash]); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 336 | write_unlock_bh(&nf_nat_lock); |
| 337 | } |
| 338 | |
| 339 | /* It's done. */ |
| 340 | if (maniptype == IP_NAT_MANIP_DST) |
| 341 | set_bit(IPS_DST_NAT_DONE_BIT, &ct->status); |
| 342 | else |
| 343 | set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); |
| 344 | |
| 345 | return NF_ACCEPT; |
| 346 | } |
| 347 | EXPORT_SYMBOL(nf_nat_setup_info); |
| 348 | |
| 349 | /* Returns true if succeeded. */ |
| 350 | static int |
| 351 | manip_pkt(u_int16_t proto, |
| 352 | struct sk_buff **pskb, |
| 353 | unsigned int iphdroff, |
| 354 | const struct nf_conntrack_tuple *target, |
| 355 | enum nf_nat_manip_type maniptype) |
| 356 | { |
| 357 | struct iphdr *iph; |
| 358 | struct nf_nat_protocol *p; |
| 359 | |
| 360 | if (!skb_make_writable(pskb, iphdroff + sizeof(*iph))) |
| 361 | return 0; |
| 362 | |
| 363 | iph = (void *)(*pskb)->data + iphdroff; |
| 364 | |
| 365 | /* Manipulate protcol part. */ |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 366 | |
| 367 | /* rcu_read_lock()ed by nf_hook_slow */ |
| 368 | p = __nf_nat_proto_find(proto); |
| 369 | if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 370 | return 0; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 371 | |
| 372 | iph = (void *)(*pskb)->data + iphdroff; |
| 373 | |
| 374 | if (maniptype == IP_NAT_MANIP_SRC) { |
| 375 | nf_csum_replace4(&iph->check, iph->saddr, target->src.u3.ip); |
| 376 | iph->saddr = target->src.u3.ip; |
| 377 | } else { |
| 378 | nf_csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip); |
| 379 | iph->daddr = target->dst.u3.ip; |
| 380 | } |
| 381 | return 1; |
| 382 | } |
| 383 | |
| 384 | /* Do packet manipulations according to nf_nat_setup_info. */ |
| 385 | unsigned int nf_nat_packet(struct nf_conn *ct, |
| 386 | enum ip_conntrack_info ctinfo, |
| 387 | unsigned int hooknum, |
| 388 | struct sk_buff **pskb) |
| 389 | { |
| 390 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
| 391 | unsigned long statusbit; |
| 392 | enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); |
| 393 | |
| 394 | if (mtype == IP_NAT_MANIP_SRC) |
| 395 | statusbit = IPS_SRC_NAT; |
| 396 | else |
| 397 | statusbit = IPS_DST_NAT; |
| 398 | |
| 399 | /* Invert if this is reply dir. */ |
| 400 | if (dir == IP_CT_DIR_REPLY) |
| 401 | statusbit ^= IPS_NAT_MASK; |
| 402 | |
| 403 | /* Non-atomic: these bits don't change. */ |
| 404 | if (ct->status & statusbit) { |
| 405 | struct nf_conntrack_tuple target; |
| 406 | |
| 407 | /* We are aiming to look like inverse of other direction. */ |
| 408 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
| 409 | |
| 410 | if (!manip_pkt(target.dst.protonum, pskb, 0, &target, mtype)) |
| 411 | return NF_DROP; |
| 412 | } |
| 413 | return NF_ACCEPT; |
| 414 | } |
| 415 | EXPORT_SYMBOL_GPL(nf_nat_packet); |
| 416 | |
| 417 | /* Dir is direction ICMP is coming from (opposite to packet it contains) */ |
| 418 | int nf_nat_icmp_reply_translation(struct nf_conn *ct, |
| 419 | enum ip_conntrack_info ctinfo, |
| 420 | unsigned int hooknum, |
| 421 | struct sk_buff **pskb) |
| 422 | { |
| 423 | struct { |
| 424 | struct icmphdr icmp; |
| 425 | struct iphdr ip; |
| 426 | } *inside; |
Patrick McHardy | 923f490 | 2007-02-12 11:12:57 -0800 | [diff] [blame] | 427 | struct nf_conntrack_l4proto *l4proto; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 428 | struct nf_conntrack_tuple inner, target; |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 429 | int hdrlen = ip_hdrlen(*pskb); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 430 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
| 431 | unsigned long statusbit; |
| 432 | enum nf_nat_manip_type manip = HOOK2MANIP(hooknum); |
| 433 | |
| 434 | if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) |
| 435 | return 0; |
| 436 | |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 437 | inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 438 | |
| 439 | /* We're actually going to mangle it beyond trivial checksum |
| 440 | adjustment, so make sure the current checksum is correct. */ |
| 441 | if (nf_ip_checksum(*pskb, hooknum, hdrlen, 0)) |
| 442 | return 0; |
| 443 | |
| 444 | /* Must be RELATED */ |
| 445 | NF_CT_ASSERT((*pskb)->nfctinfo == IP_CT_RELATED || |
| 446 | (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); |
| 447 | |
| 448 | /* Redirects on non-null nats must be dropped, else they'll |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 449 | start talking to each other without our translation, and be |
| 450 | confused... --RR */ |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 451 | if (inside->icmp.type == ICMP_REDIRECT) { |
| 452 | /* If NAT isn't finished, assume it and drop. */ |
| 453 | if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) |
| 454 | return 0; |
| 455 | |
| 456 | if (ct->status & IPS_NAT_MASK) |
| 457 | return 0; |
| 458 | } |
| 459 | |
Patrick McHardy | 0d53778 | 2007-07-07 22:39:38 -0700 | [diff] [blame] | 460 | pr_debug("icmp_reply_translation: translating error %p manip %u " |
| 461 | "dir %s\n", *pskb, manip, |
| 462 | dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 463 | |
Patrick McHardy | 923f490 | 2007-02-12 11:12:57 -0800 | [diff] [blame] | 464 | /* rcu_read_lock()ed by nf_hook_slow */ |
| 465 | l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); |
| 466 | |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 467 | if (!nf_ct_get_tuple(*pskb, |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 468 | ip_hdrlen(*pskb) + sizeof(struct icmphdr), |
| 469 | (ip_hdrlen(*pskb) + |
| 470 | sizeof(struct icmphdr) + inside->ip.ihl * 4), |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 471 | (u_int16_t)AF_INET, |
| 472 | inside->ip.protocol, |
Patrick McHardy | 923f490 | 2007-02-12 11:12:57 -0800 | [diff] [blame] | 473 | &inner, l3proto, l4proto)) |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 474 | return 0; |
| 475 | |
| 476 | /* Change inner back to look like incoming packet. We do the |
| 477 | opposite manip on this hook to normal, because it might not |
| 478 | pass all hooks (locally-generated ICMP). Consider incoming |
| 479 | packet: PREROUTING (DST manip), routing produces ICMP, goes |
| 480 | through POSTROUTING (which must correct the DST manip). */ |
| 481 | if (!manip_pkt(inside->ip.protocol, pskb, |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 482 | ip_hdrlen(*pskb) + sizeof(inside->icmp), |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 483 | &ct->tuplehash[!dir].tuple, |
| 484 | !manip)) |
| 485 | return 0; |
| 486 | |
| 487 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { |
| 488 | /* Reloading "inside" here since manip_pkt inner. */ |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 489 | inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 490 | inside->icmp.checksum = 0; |
| 491 | inside->icmp.checksum = |
| 492 | csum_fold(skb_checksum(*pskb, hdrlen, |
| 493 | (*pskb)->len - hdrlen, 0)); |
| 494 | } |
| 495 | |
| 496 | /* Change outer to look the reply to an incoming packet |
| 497 | * (proto 0 means don't invert per-proto part). */ |
| 498 | if (manip == IP_NAT_MANIP_SRC) |
| 499 | statusbit = IPS_SRC_NAT; |
| 500 | else |
| 501 | statusbit = IPS_DST_NAT; |
| 502 | |
| 503 | /* Invert if this is reply dir. */ |
| 504 | if (dir == IP_CT_DIR_REPLY) |
| 505 | statusbit ^= IPS_NAT_MASK; |
| 506 | |
| 507 | if (ct->status & statusbit) { |
| 508 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
| 509 | if (!manip_pkt(0, pskb, 0, &target, manip)) |
| 510 | return 0; |
| 511 | } |
| 512 | |
| 513 | return 1; |
| 514 | } |
| 515 | EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation); |
| 516 | |
| 517 | /* Protocol registration. */ |
| 518 | int nf_nat_protocol_register(struct nf_nat_protocol *proto) |
| 519 | { |
| 520 | int ret = 0; |
| 521 | |
| 522 | write_lock_bh(&nf_nat_lock); |
| 523 | if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) { |
| 524 | ret = -EBUSY; |
| 525 | goto out; |
| 526 | } |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 527 | rcu_assign_pointer(nf_nat_protos[proto->protonum], proto); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 528 | out: |
| 529 | write_unlock_bh(&nf_nat_lock); |
| 530 | return ret; |
| 531 | } |
| 532 | EXPORT_SYMBOL(nf_nat_protocol_register); |
| 533 | |
| 534 | /* Noone stores the protocol anywhere; simply delete it. */ |
| 535 | void nf_nat_protocol_unregister(struct nf_nat_protocol *proto) |
| 536 | { |
| 537 | write_lock_bh(&nf_nat_lock); |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 538 | rcu_assign_pointer(nf_nat_protos[proto->protonum], |
| 539 | &nf_nat_unknown_protocol); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 540 | write_unlock_bh(&nf_nat_lock); |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 541 | synchronize_rcu(); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 542 | } |
| 543 | EXPORT_SYMBOL(nf_nat_protocol_unregister); |
| 544 | |
Patrick McHardy | e281db5c | 2007-03-04 15:57:25 -0800 | [diff] [blame] | 545 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 546 | int |
Patrick McHardy | fdf7083 | 2007-09-28 14:37:41 -0700 | [diff] [blame^] | 547 | nf_nat_port_range_to_nlattr(struct sk_buff *skb, |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 548 | const struct nf_nat_range *range) |
| 549 | { |
Patrick McHardy | df6fb86 | 2007-09-28 14:37:03 -0700 | [diff] [blame] | 550 | NLA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16), |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 551 | &range->min.tcp.port); |
Patrick McHardy | df6fb86 | 2007-09-28 14:37:03 -0700 | [diff] [blame] | 552 | NLA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16), |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 553 | &range->max.tcp.port); |
| 554 | |
| 555 | return 0; |
| 556 | |
Patrick McHardy | df6fb86 | 2007-09-28 14:37:03 -0700 | [diff] [blame] | 557 | nla_put_failure: |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 558 | return -1; |
| 559 | } |
Patrick McHardy | fdf7083 | 2007-09-28 14:37:41 -0700 | [diff] [blame^] | 560 | EXPORT_SYMBOL_GPL(nf_nat_port_nlattr_to_range); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 561 | |
| 562 | int |
Patrick McHardy | fdf7083 | 2007-09-28 14:37:41 -0700 | [diff] [blame^] | 563 | nf_nat_port_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range *range) |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 564 | { |
| 565 | int ret = 0; |
| 566 | |
| 567 | /* we have to return whether we actually parsed something or not */ |
| 568 | |
Patrick McHardy | df6fb86 | 2007-09-28 14:37:03 -0700 | [diff] [blame] | 569 | if (tb[CTA_PROTONAT_PORT_MIN]) { |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 570 | ret = 1; |
| 571 | range->min.tcp.port = |
Patrick McHardy | df6fb86 | 2007-09-28 14:37:03 -0700 | [diff] [blame] | 572 | *(__be16 *)nla_data(tb[CTA_PROTONAT_PORT_MIN]); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 573 | } |
| 574 | |
Patrick McHardy | df6fb86 | 2007-09-28 14:37:03 -0700 | [diff] [blame] | 575 | if (!tb[CTA_PROTONAT_PORT_MAX]) { |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 576 | if (ret) |
| 577 | range->max.tcp.port = range->min.tcp.port; |
| 578 | } else { |
| 579 | ret = 1; |
| 580 | range->max.tcp.port = |
Patrick McHardy | df6fb86 | 2007-09-28 14:37:03 -0700 | [diff] [blame] | 581 | *(__be16 *)nla_data(tb[CTA_PROTONAT_PORT_MAX]); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 582 | } |
| 583 | |
| 584 | return ret; |
| 585 | } |
Patrick McHardy | fdf7083 | 2007-09-28 14:37:41 -0700 | [diff] [blame^] | 586 | EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nlattr); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 587 | #endif |
| 588 | |
Yasuyuki Kozakai | d8a0509 | 2007-07-07 22:26:16 -0700 | [diff] [blame] | 589 | /* Noone using conntrack by the time this called. */ |
| 590 | static void nf_nat_cleanup_conntrack(struct nf_conn *ct) |
| 591 | { |
| 592 | struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); |
| 593 | |
Yasuyuki Kozakai | b6b84d4 | 2007-07-07 22:26:35 -0700 | [diff] [blame] | 594 | if (nat == NULL || nat->ct == NULL) |
Yasuyuki Kozakai | d8a0509 | 2007-07-07 22:26:16 -0700 | [diff] [blame] | 595 | return; |
| 596 | |
Yasuyuki Kozakai | b6b84d4 | 2007-07-07 22:26:35 -0700 | [diff] [blame] | 597 | NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK); |
Yasuyuki Kozakai | d8a0509 | 2007-07-07 22:26:16 -0700 | [diff] [blame] | 598 | |
| 599 | write_lock_bh(&nf_nat_lock); |
Patrick McHardy | 53aba59 | 2007-07-07 22:30:27 -0700 | [diff] [blame] | 600 | hlist_del(&nat->bysource); |
Yasuyuki Kozakai | b6b84d4 | 2007-07-07 22:26:35 -0700 | [diff] [blame] | 601 | nat->ct = NULL; |
Yasuyuki Kozakai | d8a0509 | 2007-07-07 22:26:16 -0700 | [diff] [blame] | 602 | write_unlock_bh(&nf_nat_lock); |
| 603 | } |
| 604 | |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 605 | static void nf_nat_move_storage(struct nf_conn *conntrack, void *old) |
| 606 | { |
| 607 | struct nf_conn_nat *new_nat = nf_ct_ext_find(conntrack, NF_CT_EXT_NAT); |
| 608 | struct nf_conn_nat *old_nat = (struct nf_conn_nat *)old; |
Yasuyuki Kozakai | b6b84d4 | 2007-07-07 22:26:35 -0700 | [diff] [blame] | 609 | struct nf_conn *ct = old_nat->ct; |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 610 | unsigned int srchash; |
| 611 | |
| 612 | if (!(ct->status & IPS_NAT_DONE_MASK)) |
| 613 | return; |
| 614 | |
| 615 | srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
| 616 | |
| 617 | write_lock_bh(&nf_nat_lock); |
Patrick McHardy | 53aba59 | 2007-07-07 22:30:27 -0700 | [diff] [blame] | 618 | hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); |
Yasuyuki Kozakai | b6b84d4 | 2007-07-07 22:26:35 -0700 | [diff] [blame] | 619 | new_nat->ct = ct; |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 620 | write_unlock_bh(&nf_nat_lock); |
| 621 | } |
| 622 | |
Patrick McHardy | 61eb310 | 2007-07-07 22:27:06 -0700 | [diff] [blame] | 623 | static struct nf_ct_ext_type nat_extend __read_mostly = { |
Yasuyuki Kozakai | d8a0509 | 2007-07-07 22:26:16 -0700 | [diff] [blame] | 624 | .len = sizeof(struct nf_conn_nat), |
| 625 | .align = __alignof__(struct nf_conn_nat), |
| 626 | .destroy = nf_nat_cleanup_conntrack, |
| 627 | .move = nf_nat_move_storage, |
| 628 | .id = NF_CT_EXT_NAT, |
| 629 | .flags = NF_CT_EXT_F_PREALLOC, |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 630 | }; |
| 631 | |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 632 | static int __init nf_nat_init(void) |
| 633 | { |
| 634 | size_t i; |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 635 | int ret; |
| 636 | |
| 637 | ret = nf_ct_extend_register(&nat_extend); |
| 638 | if (ret < 0) { |
| 639 | printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); |
| 640 | return ret; |
| 641 | } |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 642 | |
| 643 | /* Leave them the same for the moment. */ |
| 644 | nf_nat_htable_size = nf_conntrack_htable_size; |
| 645 | |
Patrick McHardy | 53aba59 | 2007-07-07 22:30:27 -0700 | [diff] [blame] | 646 | bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, |
| 647 | &nf_nat_vmalloced); |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 648 | if (!bysource) { |
| 649 | ret = -ENOMEM; |
| 650 | goto cleanup_extend; |
| 651 | } |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 652 | |
| 653 | /* Sew in builtin protocols. */ |
| 654 | write_lock_bh(&nf_nat_lock); |
| 655 | for (i = 0; i < MAX_IP_NAT_PROTO; i++) |
Patrick McHardy | e22a054 | 2007-02-12 11:12:26 -0800 | [diff] [blame] | 656 | rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol); |
| 657 | rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp); |
| 658 | rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp); |
| 659 | rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 660 | write_unlock_bh(&nf_nat_lock); |
| 661 | |
| 662 | for (i = 0; i < nf_nat_htable_size; i++) { |
Patrick McHardy | 53aba59 | 2007-07-07 22:30:27 -0700 | [diff] [blame] | 663 | INIT_HLIST_HEAD(&bysource[i]); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 664 | } |
| 665 | |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 666 | /* Initialize fake conntrack so that NAT will skip it */ |
| 667 | nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; |
| 668 | |
| 669 | l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); |
| 670 | return 0; |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 671 | |
| 672 | cleanup_extend: |
| 673 | nf_ct_extend_unregister(&nat_extend); |
| 674 | return ret; |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 675 | } |
| 676 | |
| 677 | /* Clear NAT section of all conntracks, in case we're loaded again. */ |
| 678 | static int clean_nat(struct nf_conn *i, void *data) |
| 679 | { |
| 680 | struct nf_conn_nat *nat = nfct_nat(i); |
| 681 | |
| 682 | if (!nat) |
| 683 | return 0; |
| 684 | memset(nat, 0, sizeof(nat)); |
| 685 | i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST); |
| 686 | return 0; |
| 687 | } |
| 688 | |
| 689 | static void __exit nf_nat_cleanup(void) |
| 690 | { |
| 691 | nf_ct_iterate_cleanup(&clean_nat, NULL); |
Patrick McHardy | 982d9a9 | 2007-02-12 11:14:11 -0800 | [diff] [blame] | 692 | synchronize_rcu(); |
Patrick McHardy | 53aba59 | 2007-07-07 22:30:27 -0700 | [diff] [blame] | 693 | nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 694 | nf_ct_l3proto_put(l3proto); |
Yasuyuki Kozakai | 2d59e5c | 2007-07-07 22:24:28 -0700 | [diff] [blame] | 695 | nf_ct_extend_unregister(&nat_extend); |
Jozsef Kadlecsik | 5b1158e | 2006-12-02 22:07:13 -0800 | [diff] [blame] | 696 | } |
| 697 | |
| 698 | MODULE_LICENSE("GPL"); |
| 699 | |
| 700 | module_init(nf_nat_init); |
| 701 | module_exit(nf_nat_cleanup); |