Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 1 | #ifndef _X_TABLES_H |
| 2 | #define _X_TABLES_H |
| 3 | |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 4 | #include <linux/types.h> |
| 5 | |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 6 | #define XT_FUNCTION_MAXNAMELEN 30 |
| 7 | #define XT_TABLE_MAXNAMELEN 32 |
| 8 | |
Dmitry Mishin | 1e30a01 | 2006-03-22 13:56:56 -0800 | [diff] [blame] | 9 | struct xt_entry_match |
| 10 | { |
| 11 | union { |
| 12 | struct { |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 13 | __u16 match_size; |
Dmitry Mishin | 1e30a01 | 2006-03-22 13:56:56 -0800 | [diff] [blame] | 14 | |
| 15 | /* Used by userspace */ |
| 16 | char name[XT_FUNCTION_MAXNAMELEN-1]; |
| 17 | |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 18 | __u8 revision; |
Dmitry Mishin | 1e30a01 | 2006-03-22 13:56:56 -0800 | [diff] [blame] | 19 | } user; |
| 20 | struct { |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 21 | __u16 match_size; |
Dmitry Mishin | 1e30a01 | 2006-03-22 13:56:56 -0800 | [diff] [blame] | 22 | |
| 23 | /* Used inside the kernel */ |
| 24 | struct xt_match *match; |
| 25 | } kernel; |
| 26 | |
| 27 | /* Total length */ |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 28 | __u16 match_size; |
Dmitry Mishin | 1e30a01 | 2006-03-22 13:56:56 -0800 | [diff] [blame] | 29 | } u; |
| 30 | |
| 31 | unsigned char data[0]; |
| 32 | }; |
| 33 | |
| 34 | struct xt_entry_target |
| 35 | { |
| 36 | union { |
| 37 | struct { |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 38 | __u16 target_size; |
Dmitry Mishin | 1e30a01 | 2006-03-22 13:56:56 -0800 | [diff] [blame] | 39 | |
| 40 | /* Used by userspace */ |
| 41 | char name[XT_FUNCTION_MAXNAMELEN-1]; |
| 42 | |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 43 | __u8 revision; |
Dmitry Mishin | 1e30a01 | 2006-03-22 13:56:56 -0800 | [diff] [blame] | 44 | } user; |
| 45 | struct { |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 46 | __u16 target_size; |
Dmitry Mishin | 1e30a01 | 2006-03-22 13:56:56 -0800 | [diff] [blame] | 47 | |
| 48 | /* Used inside the kernel */ |
| 49 | struct xt_target *target; |
| 50 | } kernel; |
| 51 | |
| 52 | /* Total length */ |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 53 | __u16 target_size; |
Dmitry Mishin | 1e30a01 | 2006-03-22 13:56:56 -0800 | [diff] [blame] | 54 | } u; |
| 55 | |
| 56 | unsigned char data[0]; |
| 57 | }; |
| 58 | |
Patrick McHardy | 3c2ad46 | 2007-05-10 14:14:16 -0700 | [diff] [blame] | 59 | #define XT_TARGET_INIT(__name, __size) \ |
| 60 | { \ |
| 61 | .target.u.user = { \ |
| 62 | .target_size = XT_ALIGN(__size), \ |
| 63 | .name = __name, \ |
| 64 | }, \ |
| 65 | } |
| 66 | |
Dmitry Mishin | 1e30a01 | 2006-03-22 13:56:56 -0800 | [diff] [blame] | 67 | struct xt_standard_target |
| 68 | { |
| 69 | struct xt_entry_target target; |
| 70 | int verdict; |
| 71 | }; |
| 72 | |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 73 | /* The argument to IPT_SO_GET_REVISION_*. Returns highest revision |
| 74 | * kernel supports, if >= revision. */ |
| 75 | struct xt_get_revision |
| 76 | { |
| 77 | char name[XT_FUNCTION_MAXNAMELEN-1]; |
| 78 | |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 79 | __u8 revision; |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 80 | }; |
| 81 | |
| 82 | /* CONTINUE verdict for targets */ |
| 83 | #define XT_CONTINUE 0xFFFFFFFF |
| 84 | |
| 85 | /* For standard target */ |
| 86 | #define XT_RETURN (-NF_REPEAT - 1) |
| 87 | |
David S. Miller | 6fbfc96 | 2006-01-20 11:57:07 -0800 | [diff] [blame] | 88 | /* this is a dummy structure to find out the alignment requirement for a struct |
| 89 | * containing all the fundamental data types that are used in ipt_entry, |
| 90 | * ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my |
| 91 | * personal pleasure to remove it -HW |
| 92 | */ |
| 93 | struct _xt_align |
| 94 | { |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 95 | __u8 u8; |
| 96 | __u16 u16; |
| 97 | __u32 u32; |
| 98 | __u64 u64; |
David S. Miller | 6fbfc96 | 2006-01-20 11:57:07 -0800 | [diff] [blame] | 99 | }; |
| 100 | |
| 101 | #define XT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) \ |
| 102 | & ~(__alignof__(struct _xt_align)-1)) |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 103 | |
| 104 | /* Standard return verdict, or do jump. */ |
| 105 | #define XT_STANDARD_TARGET "" |
| 106 | /* Error verdict. */ |
| 107 | #define XT_ERROR_TARGET "ERROR" |
| 108 | |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 109 | #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0) |
| 110 | #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0) |
| 111 | |
| 112 | struct xt_counters |
| 113 | { |
Arnd Bergmann | 60c195c | 2009-02-26 00:51:43 +0100 | [diff] [blame] | 114 | __u64 pcnt, bcnt; /* Packet and byte counters */ |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 115 | }; |
| 116 | |
| 117 | /* The argument to IPT_SO_ADD_COUNTERS. */ |
| 118 | struct xt_counters_info |
| 119 | { |
| 120 | /* Which table. */ |
| 121 | char name[XT_TABLE_MAXNAMELEN]; |
| 122 | |
| 123 | unsigned int num_counters; |
| 124 | |
| 125 | /* The counters (actually `number' of these). */ |
| 126 | struct xt_counters counters[0]; |
| 127 | }; |
| 128 | |
| 129 | #define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */ |
| 130 | |
Patrick McHardy | 89c002d | 2007-12-17 21:46:59 -0800 | [diff] [blame] | 131 | /* fn returns 0 to continue iteration */ |
| 132 | #define XT_MATCH_ITERATE(type, e, fn, args...) \ |
| 133 | ({ \ |
| 134 | unsigned int __i; \ |
| 135 | int __ret = 0; \ |
| 136 | struct xt_entry_match *__m; \ |
| 137 | \ |
| 138 | for (__i = sizeof(type); \ |
| 139 | __i < (e)->target_offset; \ |
| 140 | __i += __m->u.match_size) { \ |
| 141 | __m = (void *)e + __i; \ |
| 142 | \ |
| 143 | __ret = fn(__m , ## args); \ |
| 144 | if (__ret != 0) \ |
| 145 | break; \ |
| 146 | } \ |
| 147 | __ret; \ |
| 148 | }) |
| 149 | |
| 150 | /* fn returns 0 to continue iteration */ |
| 151 | #define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \ |
| 152 | ({ \ |
| 153 | unsigned int __i, __n; \ |
| 154 | int __ret = 0; \ |
| 155 | type *__entry; \ |
| 156 | \ |
| 157 | for (__i = 0, __n = 0; __i < (size); \ |
| 158 | __i += __entry->next_offset, __n++) { \ |
| 159 | __entry = (void *)(entries) + __i; \ |
| 160 | if (__n < n) \ |
| 161 | continue; \ |
| 162 | \ |
| 163 | __ret = fn(__entry , ## args); \ |
| 164 | if (__ret != 0) \ |
| 165 | break; \ |
| 166 | } \ |
| 167 | __ret; \ |
| 168 | }) |
| 169 | |
| 170 | /* fn returns 0 to continue iteration */ |
| 171 | #define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \ |
| 172 | XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args) |
| 173 | |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 174 | #ifdef __KERNEL__ |
| 175 | |
| 176 | #include <linux/netdevice.h> |
| 177 | |
Jan Engelhardt | f7108a2 | 2008-10-08 11:35:18 +0200 | [diff] [blame] | 178 | /** |
| 179 | * struct xt_match_param - parameters for match extensions' match functions |
| 180 | * |
| 181 | * @in: input netdevice |
| 182 | * @out: output netdevice |
| 183 | * @match: struct xt_match through which this function was invoked |
| 184 | * @matchinfo: per-match data |
| 185 | * @fragoff: packet is a fragment, this is the data offset |
| 186 | * @thoff: position of transport header relative to skb->data |
Evgeniy Polyakov | a5e7882 | 2009-06-04 16:54:42 +0200 | [diff] [blame^] | 187 | * @hook: hook number given packet came from |
Jan Engelhardt | 916a917 | 2008-10-08 11:35:20 +0200 | [diff] [blame] | 188 | * @family: Actual NFPROTO_* through which the function is invoked |
| 189 | * (helpful when match->family == NFPROTO_UNSPEC) |
Evgeniy Polyakov | a5e7882 | 2009-06-04 16:54:42 +0200 | [diff] [blame^] | 190 | * @hotdrop: drop packet if we had inspection problems |
Jan Engelhardt | f7108a2 | 2008-10-08 11:35:18 +0200 | [diff] [blame] | 191 | */ |
| 192 | struct xt_match_param { |
| 193 | const struct net_device *in, *out; |
| 194 | const struct xt_match *match; |
| 195 | const void *matchinfo; |
| 196 | int fragoff; |
| 197 | unsigned int thoff; |
Evgeniy Polyakov | a5e7882 | 2009-06-04 16:54:42 +0200 | [diff] [blame^] | 198 | unsigned int hooknum; |
Jan Engelhardt | 916a917 | 2008-10-08 11:35:20 +0200 | [diff] [blame] | 199 | u_int8_t family; |
Evgeniy Polyakov | a5e7882 | 2009-06-04 16:54:42 +0200 | [diff] [blame^] | 200 | bool *hotdrop; |
Jan Engelhardt | f7108a2 | 2008-10-08 11:35:18 +0200 | [diff] [blame] | 201 | }; |
| 202 | |
Jan Engelhardt | 9b4fce7 | 2008-10-08 11:35:18 +0200 | [diff] [blame] | 203 | /** |
| 204 | * struct xt_mtchk_param - parameters for match extensions' |
| 205 | * checkentry functions |
| 206 | * |
| 207 | * @table: table the rule is tried to be inserted into |
| 208 | * @entryinfo: the family-specific rule data |
| 209 | * (struct ipt_ip, ip6t_ip, ebt_entry) |
| 210 | * @match: struct xt_match through which this function was invoked |
| 211 | * @matchinfo: per-match data |
| 212 | * @hook_mask: via which hooks the new rule is reachable |
| 213 | */ |
| 214 | struct xt_mtchk_param { |
| 215 | const char *table; |
| 216 | const void *entryinfo; |
| 217 | const struct xt_match *match; |
| 218 | void *matchinfo; |
| 219 | unsigned int hook_mask; |
Jan Engelhardt | 916a917 | 2008-10-08 11:35:20 +0200 | [diff] [blame] | 220 | u_int8_t family; |
Jan Engelhardt | 9b4fce7 | 2008-10-08 11:35:18 +0200 | [diff] [blame] | 221 | }; |
| 222 | |
Jan Engelhardt | 6be3d85 | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 223 | /* Match destructor parameters */ |
| 224 | struct xt_mtdtor_param { |
| 225 | const struct xt_match *match; |
| 226 | void *matchinfo; |
Jan Engelhardt | 916a917 | 2008-10-08 11:35:20 +0200 | [diff] [blame] | 227 | u_int8_t family; |
Jan Engelhardt | 6be3d85 | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 228 | }; |
| 229 | |
Jan Engelhardt | 7eb3558 | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 230 | /** |
| 231 | * struct xt_target_param - parameters for target extensions' target functions |
| 232 | * |
| 233 | * @hooknum: hook through which this target was invoked |
| 234 | * @target: struct xt_target through which this function was invoked |
| 235 | * @targinfo: per-target data |
| 236 | * |
| 237 | * Other fields see above. |
| 238 | */ |
| 239 | struct xt_target_param { |
| 240 | const struct net_device *in, *out; |
| 241 | unsigned int hooknum; |
| 242 | const struct xt_target *target; |
| 243 | const void *targinfo; |
Jan Engelhardt | 916a917 | 2008-10-08 11:35:20 +0200 | [diff] [blame] | 244 | u_int8_t family; |
Jan Engelhardt | 7eb3558 | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 245 | }; |
| 246 | |
Jan Engelhardt | af5d6dc | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 247 | /** |
| 248 | * struct xt_tgchk_param - parameters for target extensions' |
| 249 | * checkentry functions |
| 250 | * |
| 251 | * @entryinfo: the family-specific rule data |
| 252 | * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry) |
| 253 | * |
| 254 | * Other fields see above. |
| 255 | */ |
| 256 | struct xt_tgchk_param { |
| 257 | const char *table; |
Jan Engelhardt | f79fca5 | 2008-11-24 16:06:17 -0800 | [diff] [blame] | 258 | const void *entryinfo; |
Jan Engelhardt | af5d6dc | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 259 | const struct xt_target *target; |
| 260 | void *targinfo; |
| 261 | unsigned int hook_mask; |
Jan Engelhardt | 916a917 | 2008-10-08 11:35:20 +0200 | [diff] [blame] | 262 | u_int8_t family; |
Jan Engelhardt | af5d6dc | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 263 | }; |
| 264 | |
Jan Engelhardt | a2df164 | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 265 | /* Target destructor parameters */ |
| 266 | struct xt_tgdtor_param { |
| 267 | const struct xt_target *target; |
| 268 | void *targinfo; |
Jan Engelhardt | 916a917 | 2008-10-08 11:35:20 +0200 | [diff] [blame] | 269 | u_int8_t family; |
Jan Engelhardt | a2df164 | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 270 | }; |
| 271 | |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 272 | struct xt_match |
| 273 | { |
| 274 | struct list_head list; |
| 275 | |
| 276 | const char name[XT_FUNCTION_MAXNAMELEN-1]; |
Richard Kennedy | daaf83d | 2009-01-12 00:06:11 +0000 | [diff] [blame] | 277 | u_int8_t revision; |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 278 | |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 279 | /* Return true or false: return FALSE and set *hotdrop = 1 to |
| 280 | force immediate packet drop. */ |
| 281 | /* Arguments changed since 2.6.9, as this must now handle |
| 282 | non-linear skb, using skb_header_pointer and |
| 283 | skb_ip_make_writable. */ |
Jan Engelhardt | 1d93a9c | 2007-07-07 22:15:35 -0700 | [diff] [blame] | 284 | bool (*match)(const struct sk_buff *skb, |
Jan Engelhardt | f7108a2 | 2008-10-08 11:35:18 +0200 | [diff] [blame] | 285 | const struct xt_match_param *); |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 286 | |
| 287 | /* Called when user tries to insert an entry of this type. */ |
Jan Engelhardt | 9b4fce7 | 2008-10-08 11:35:18 +0200 | [diff] [blame] | 288 | bool (*checkentry)(const struct xt_mtchk_param *); |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 289 | |
| 290 | /* Called when entry of this type deleted. */ |
Jan Engelhardt | 6be3d85 | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 291 | void (*destroy)(const struct xt_mtdtor_param *); |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 292 | |
Dmitry Mishin | 2722971 | 2006-04-01 02:25:19 -0800 | [diff] [blame] | 293 | /* Called when userspace align differs from kernel space one */ |
Patrick McHardy | 9fa492c | 2006-09-20 12:05:37 -0700 | [diff] [blame] | 294 | void (*compat_from_user)(void *dst, void *src); |
| 295 | int (*compat_to_user)(void __user *dst, void *src); |
Dmitry Mishin | 2722971 | 2006-04-01 02:25:19 -0800 | [diff] [blame] | 296 | |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 297 | /* Set this to THIS_MODULE if you are a module, otherwise NULL */ |
| 298 | struct module *me; |
Patrick McHardy | 37f9f73 | 2006-03-20 17:59:06 -0800 | [diff] [blame] | 299 | |
Patrick McHardy | 91270cf | 2006-08-22 00:43:38 -0700 | [diff] [blame] | 300 | /* Free to use by each match */ |
| 301 | unsigned long data; |
| 302 | |
Jan Engelhardt | ecb6f85 | 2008-01-31 03:54:47 -0800 | [diff] [blame] | 303 | const char *table; |
Patrick McHardy | 37f9f73 | 2006-03-20 17:59:06 -0800 | [diff] [blame] | 304 | unsigned int matchsize; |
Patrick McHardy | 9fa492c | 2006-09-20 12:05:37 -0700 | [diff] [blame] | 305 | unsigned int compatsize; |
Patrick McHardy | 37f9f73 | 2006-03-20 17:59:06 -0800 | [diff] [blame] | 306 | unsigned int hooks; |
| 307 | unsigned short proto; |
Patrick McHardy | c4b8851 | 2006-03-20 18:03:40 -0800 | [diff] [blame] | 308 | |
| 309 | unsigned short family; |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 310 | }; |
| 311 | |
| 312 | /* Registration hooks for targets. */ |
| 313 | struct xt_target |
| 314 | { |
| 315 | struct list_head list; |
| 316 | |
| 317 | const char name[XT_FUNCTION_MAXNAMELEN-1]; |
| 318 | |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 319 | /* Returns verdict. Argument order changed since 2.6.9, as this |
| 320 | must now handle non-linear skbs, using skb_copy_bits and |
| 321 | skb_ip_make_writable. */ |
Herbert Xu | 3db05fe | 2007-10-15 00:53:15 -0700 | [diff] [blame] | 322 | unsigned int (*target)(struct sk_buff *skb, |
Jan Engelhardt | 7eb3558 | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 323 | const struct xt_target_param *); |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 324 | |
| 325 | /* Called when user tries to insert an entry of this type: |
| 326 | hook_mask is a bitmask of hooks from which it can be |
| 327 | called. */ |
| 328 | /* Should return true or false. */ |
Jan Engelhardt | af5d6dc | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 329 | bool (*checkentry)(const struct xt_tgchk_param *); |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 330 | |
| 331 | /* Called when entry of this type deleted. */ |
Jan Engelhardt | a2df164 | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 332 | void (*destroy)(const struct xt_tgdtor_param *); |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 333 | |
Dmitry Mishin | 2722971 | 2006-04-01 02:25:19 -0800 | [diff] [blame] | 334 | /* Called when userspace align differs from kernel space one */ |
Patrick McHardy | 9fa492c | 2006-09-20 12:05:37 -0700 | [diff] [blame] | 335 | void (*compat_from_user)(void *dst, void *src); |
| 336 | int (*compat_to_user)(void __user *dst, void *src); |
Dmitry Mishin | 2722971 | 2006-04-01 02:25:19 -0800 | [diff] [blame] | 337 | |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 338 | /* Set this to THIS_MODULE if you are a module, otherwise NULL */ |
| 339 | struct module *me; |
Patrick McHardy | 37f9f73 | 2006-03-20 17:59:06 -0800 | [diff] [blame] | 340 | |
Jan Engelhardt | ecb6f85 | 2008-01-31 03:54:47 -0800 | [diff] [blame] | 341 | const char *table; |
Patrick McHardy | 37f9f73 | 2006-03-20 17:59:06 -0800 | [diff] [blame] | 342 | unsigned int targetsize; |
Patrick McHardy | 9fa492c | 2006-09-20 12:05:37 -0700 | [diff] [blame] | 343 | unsigned int compatsize; |
Patrick McHardy | 37f9f73 | 2006-03-20 17:59:06 -0800 | [diff] [blame] | 344 | unsigned int hooks; |
| 345 | unsigned short proto; |
Patrick McHardy | c4b8851 | 2006-03-20 18:03:40 -0800 | [diff] [blame] | 346 | |
| 347 | unsigned short family; |
Patrick McHardy | 37f9f73 | 2006-03-20 17:59:06 -0800 | [diff] [blame] | 348 | u_int8_t revision; |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 349 | }; |
| 350 | |
| 351 | /* Furniture shopping... */ |
| 352 | struct xt_table |
| 353 | { |
| 354 | struct list_head list; |
| 355 | |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 356 | /* What hooks you will enter on */ |
| 357 | unsigned int valid_hooks; |
| 358 | |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 359 | /* Man behind the curtain... */ |
Stephen Hemminger | 4a2f965 | 2009-02-18 16:29:44 +0100 | [diff] [blame] | 360 | struct xt_table_info *private; |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 361 | |
| 362 | /* Set this to THIS_MODULE if you are a module, otherwise NULL */ |
| 363 | struct module *me; |
| 364 | |
Jan Engelhardt | 76108ce | 2008-10-08 11:35:00 +0200 | [diff] [blame] | 365 | u_int8_t af; /* address/protocol family */ |
Stephen Hemminger | 4a2f965 | 2009-02-18 16:29:44 +0100 | [diff] [blame] | 366 | |
| 367 | /* A unique name... */ |
| 368 | const char name[XT_TABLE_MAXNAMELEN]; |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 369 | }; |
| 370 | |
| 371 | #include <linux/netfilter_ipv4.h> |
| 372 | |
| 373 | /* The table itself */ |
| 374 | struct xt_table_info |
| 375 | { |
| 376 | /* Size per table */ |
| 377 | unsigned int size; |
| 378 | /* Number of entries: FIXME. --RR */ |
| 379 | unsigned int number; |
| 380 | /* Initial number of entries. Needed for module usage count */ |
| 381 | unsigned int initial_entries; |
| 382 | |
| 383 | /* Entry points and underflows */ |
Patrick McHardy | 6e23ae2 | 2007-11-19 18:53:30 -0800 | [diff] [blame] | 384 | unsigned int hook_entry[NF_INET_NUMHOOKS]; |
| 385 | unsigned int underflow[NF_INET_NUMHOOKS]; |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 386 | |
| 387 | /* ipt_entry tables: one per CPU */ |
Eric Dumazet | 259d4e4 | 2007-12-04 23:24:56 -0800 | [diff] [blame] | 388 | /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ |
Stephen Hemminger | 7845447 | 2009-02-20 10:35:32 +0100 | [diff] [blame] | 389 | void *entries[1]; |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 390 | }; |
| 391 | |
Eric Dumazet | 259d4e4 | 2007-12-04 23:24:56 -0800 | [diff] [blame] | 392 | #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \ |
| 393 | + nr_cpu_ids * sizeof(char *)) |
Pablo Neira Ayuso | a45049c | 2006-03-22 13:55:40 -0800 | [diff] [blame] | 394 | extern int xt_register_target(struct xt_target *target); |
| 395 | extern void xt_unregister_target(struct xt_target *target); |
Patrick McHardy | 52d9c42 | 2006-08-22 00:33:45 -0700 | [diff] [blame] | 396 | extern int xt_register_targets(struct xt_target *target, unsigned int n); |
| 397 | extern void xt_unregister_targets(struct xt_target *target, unsigned int n); |
| 398 | |
Pablo Neira Ayuso | a45049c | 2006-03-22 13:55:40 -0800 | [diff] [blame] | 399 | extern int xt_register_match(struct xt_match *target); |
| 400 | extern void xt_unregister_match(struct xt_match *target); |
Patrick McHardy | 52d9c42 | 2006-08-22 00:33:45 -0700 | [diff] [blame] | 401 | extern int xt_register_matches(struct xt_match *match, unsigned int n); |
| 402 | extern void xt_unregister_matches(struct xt_match *match, unsigned int n); |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 403 | |
Jan Engelhardt | 916a917 | 2008-10-08 11:35:20 +0200 | [diff] [blame] | 404 | extern int xt_check_match(struct xt_mtchk_param *, |
Jan Engelhardt | 9b4fce7 | 2008-10-08 11:35:18 +0200 | [diff] [blame] | 405 | unsigned int size, u_int8_t proto, bool inv_proto); |
Jan Engelhardt | 916a917 | 2008-10-08 11:35:20 +0200 | [diff] [blame] | 406 | extern int xt_check_target(struct xt_tgchk_param *, |
Jan Engelhardt | af5d6dc | 2008-10-08 11:35:19 +0200 | [diff] [blame] | 407 | unsigned int size, u_int8_t proto, bool inv_proto); |
Patrick McHardy | 37f9f73 | 2006-03-20 17:59:06 -0800 | [diff] [blame] | 408 | |
Alexey Dobriyan | 8d87005 | 2008-01-31 04:02:13 -0800 | [diff] [blame] | 409 | extern struct xt_table *xt_register_table(struct net *net, |
| 410 | struct xt_table *table, |
Alexey Dobriyan | a98da11 | 2008-01-31 04:01:49 -0800 | [diff] [blame] | 411 | struct xt_table_info *bootstrap, |
| 412 | struct xt_table_info *newinfo); |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 413 | extern void *xt_unregister_table(struct xt_table *table); |
| 414 | |
| 415 | extern struct xt_table_info *xt_replace_table(struct xt_table *table, |
| 416 | unsigned int num_counters, |
| 417 | struct xt_table_info *newinfo, |
| 418 | int *error); |
| 419 | |
Jan Engelhardt | 76108ce | 2008-10-08 11:35:00 +0200 | [diff] [blame] | 420 | extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision); |
| 421 | extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision); |
| 422 | extern struct xt_target *xt_request_find_target(u8 af, const char *name, |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 423 | u8 revision); |
Jan Engelhardt | 76108ce | 2008-10-08 11:35:00 +0200 | [diff] [blame] | 424 | extern int xt_find_revision(u8 af, const char *name, u8 revision, |
| 425 | int target, int *err); |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 426 | |
Jan Engelhardt | 76108ce | 2008-10-08 11:35:00 +0200 | [diff] [blame] | 427 | extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
Alexey Dobriyan | 8d87005 | 2008-01-31 04:02:13 -0800 | [diff] [blame] | 428 | const char *name); |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 429 | extern void xt_table_unlock(struct xt_table *t); |
| 430 | |
Jan Engelhardt | 76108ce | 2008-10-08 11:35:00 +0200 | [diff] [blame] | 431 | extern int xt_proto_init(struct net *net, u_int8_t af); |
| 432 | extern void xt_proto_fini(struct net *net, u_int8_t af); |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 433 | |
| 434 | extern struct xt_table_info *xt_alloc_table_info(unsigned int size); |
| 435 | extern void xt_free_table_info(struct xt_table_info *info); |
Stephen Hemminger | 942e4a2 | 2009-04-28 22:36:33 -0700 | [diff] [blame] | 436 | |
| 437 | /* |
| 438 | * Per-CPU spinlock associated with per-cpu table entries, and |
| 439 | * with a counter for the "reading" side that allows a recursive |
| 440 | * reader to avoid taking the lock and deadlocking. |
| 441 | * |
| 442 | * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu. |
| 443 | * It needs to ensure that the rules are not being changed while the packet |
| 444 | * is being processed. In some cases, the read lock will be acquired |
| 445 | * twice on the same CPU; this is okay because of the count. |
| 446 | * |
| 447 | * "writing" is used when reading counters. |
| 448 | * During replace any readers that are using the old tables have to complete |
| 449 | * before freeing the old table. This is handled by the write locking |
| 450 | * necessary for reading the counters. |
| 451 | */ |
| 452 | struct xt_info_lock { |
| 453 | spinlock_t lock; |
| 454 | unsigned char readers; |
| 455 | }; |
| 456 | DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); |
| 457 | |
| 458 | /* |
| 459 | * Note: we need to ensure that preemption is disabled before acquiring |
| 460 | * the per-cpu-variable, so we do it as a two step process rather than |
| 461 | * using "spin_lock_bh()". |
| 462 | * |
| 463 | * We _also_ need to disable bottom half processing before updating our |
| 464 | * nesting count, to make sure that the only kind of re-entrancy is this |
| 465 | * code being called by itself: since the count+lock is not an atomic |
| 466 | * operation, we can allow no races. |
| 467 | * |
| 468 | * _Only_ that special combination of being per-cpu and never getting |
| 469 | * re-entered asynchronously means that the count is safe. |
| 470 | */ |
| 471 | static inline void xt_info_rdlock_bh(void) |
| 472 | { |
| 473 | struct xt_info_lock *lock; |
| 474 | |
| 475 | local_bh_disable(); |
| 476 | lock = &__get_cpu_var(xt_info_locks); |
Eric Dumazet | 0f3d042 | 2009-05-01 09:10:46 -0700 | [diff] [blame] | 477 | if (likely(!lock->readers++)) |
Stephen Hemminger | 942e4a2 | 2009-04-28 22:36:33 -0700 | [diff] [blame] | 478 | spin_lock(&lock->lock); |
| 479 | } |
| 480 | |
| 481 | static inline void xt_info_rdunlock_bh(void) |
| 482 | { |
| 483 | struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); |
| 484 | |
Eric Dumazet | 0f3d042 | 2009-05-01 09:10:46 -0700 | [diff] [blame] | 485 | if (likely(!--lock->readers)) |
Stephen Hemminger | 942e4a2 | 2009-04-28 22:36:33 -0700 | [diff] [blame] | 486 | spin_unlock(&lock->lock); |
| 487 | local_bh_enable(); |
| 488 | } |
| 489 | |
| 490 | /* |
| 491 | * The "writer" side needs to get exclusive access to the lock, |
| 492 | * regardless of readers. This must be called with bottom half |
| 493 | * processing (and thus also preemption) disabled. |
| 494 | */ |
| 495 | static inline void xt_info_wrlock(unsigned int cpu) |
| 496 | { |
| 497 | spin_lock(&per_cpu(xt_info_locks, cpu).lock); |
| 498 | } |
| 499 | |
| 500 | static inline void xt_info_wrunlock(unsigned int cpu) |
| 501 | { |
| 502 | spin_unlock(&per_cpu(xt_info_locks, cpu).lock); |
| 503 | } |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 504 | |
Eric Dumazet | b8dfe49 | 2009-03-25 17:31:52 +0100 | [diff] [blame] | 505 | /* |
| 506 | * This helper is performance critical and must be inlined |
| 507 | */ |
| 508 | static inline unsigned long ifname_compare_aligned(const char *_a, |
| 509 | const char *_b, |
| 510 | const char *_mask) |
| 511 | { |
| 512 | const unsigned long *a = (const unsigned long *)_a; |
| 513 | const unsigned long *b = (const unsigned long *)_b; |
| 514 | const unsigned long *mask = (const unsigned long *)_mask; |
| 515 | unsigned long ret; |
| 516 | |
| 517 | ret = (a[0] ^ b[0]) & mask[0]; |
| 518 | if (IFNAMSIZ > sizeof(unsigned long)) |
| 519 | ret |= (a[1] ^ b[1]) & mask[1]; |
| 520 | if (IFNAMSIZ > 2 * sizeof(unsigned long)) |
| 521 | ret |= (a[2] ^ b[2]) & mask[2]; |
| 522 | if (IFNAMSIZ > 3 * sizeof(unsigned long)) |
| 523 | ret |= (a[3] ^ b[3]) & mask[3]; |
| 524 | BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); |
| 525 | return ret; |
| 526 | } |
| 527 | |
Dmitry Mishin | 2722971 | 2006-04-01 02:25:19 -0800 | [diff] [blame] | 528 | #ifdef CONFIG_COMPAT |
| 529 | #include <net/compat.h> |
| 530 | |
| 531 | struct compat_xt_entry_match |
| 532 | { |
| 533 | union { |
| 534 | struct { |
| 535 | u_int16_t match_size; |
| 536 | char name[XT_FUNCTION_MAXNAMELEN - 1]; |
| 537 | u_int8_t revision; |
| 538 | } user; |
Patrick McHardy | 46c5ea3c | 2006-05-02 05:12:22 +0200 | [diff] [blame] | 539 | struct { |
| 540 | u_int16_t match_size; |
| 541 | compat_uptr_t match; |
| 542 | } kernel; |
Dmitry Mishin | 2722971 | 2006-04-01 02:25:19 -0800 | [diff] [blame] | 543 | u_int16_t match_size; |
| 544 | } u; |
| 545 | unsigned char data[0]; |
| 546 | }; |
| 547 | |
| 548 | struct compat_xt_entry_target |
| 549 | { |
| 550 | union { |
| 551 | struct { |
| 552 | u_int16_t target_size; |
| 553 | char name[XT_FUNCTION_MAXNAMELEN - 1]; |
| 554 | u_int8_t revision; |
| 555 | } user; |
Patrick McHardy | 46c5ea3c | 2006-05-02 05:12:22 +0200 | [diff] [blame] | 556 | struct { |
| 557 | u_int16_t target_size; |
| 558 | compat_uptr_t target; |
| 559 | } kernel; |
Dmitry Mishin | 2722971 | 2006-04-01 02:25:19 -0800 | [diff] [blame] | 560 | u_int16_t target_size; |
| 561 | } u; |
| 562 | unsigned char data[0]; |
| 563 | }; |
| 564 | |
| 565 | /* FIXME: this works only on 32 bit tasks |
| 566 | * need to change whole approach in order to calculate align as function of |
| 567 | * current task alignment */ |
| 568 | |
| 569 | struct compat_xt_counters |
| 570 | { |
Patrick McHardy | 55fe586 | 2006-04-24 17:16:28 -0700 | [diff] [blame] | 571 | #if defined(CONFIG_X86_64) || defined(CONFIG_IA64) |
Dmitry Mishin | 2722971 | 2006-04-01 02:25:19 -0800 | [diff] [blame] | 572 | u_int32_t cnt[4]; |
Patrick McHardy | 55fe586 | 2006-04-24 17:16:28 -0700 | [diff] [blame] | 573 | #else |
| 574 | u_int64_t cnt[2]; |
| 575 | #endif |
Dmitry Mishin | 2722971 | 2006-04-01 02:25:19 -0800 | [diff] [blame] | 576 | }; |
| 577 | |
| 578 | struct compat_xt_counters_info |
| 579 | { |
| 580 | char name[XT_TABLE_MAXNAMELEN]; |
| 581 | compat_uint_t num_counters; |
| 582 | struct compat_xt_counters counters[0]; |
| 583 | }; |
| 584 | |
| 585 | #define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \ |
| 586 | & ~(__alignof__(struct compat_xt_counters)-1)) |
| 587 | |
Jan Engelhardt | 76108ce | 2008-10-08 11:35:00 +0200 | [diff] [blame] | 588 | extern void xt_compat_lock(u_int8_t af); |
| 589 | extern void xt_compat_unlock(u_int8_t af); |
Patrick McHardy | 9fa492c | 2006-09-20 12:05:37 -0700 | [diff] [blame] | 590 | |
Jan Engelhardt | 76108ce | 2008-10-08 11:35:00 +0200 | [diff] [blame] | 591 | extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta); |
| 592 | extern void xt_compat_flush_offsets(u_int8_t af); |
| 593 | extern short xt_compat_calc_jump(u_int8_t af, unsigned int offset); |
Patrick McHardy | b386d9f | 2007-12-17 21:47:48 -0800 | [diff] [blame] | 594 | |
Jan Engelhardt | 5452e42 | 2008-04-14 11:15:35 +0200 | [diff] [blame] | 595 | extern int xt_compat_match_offset(const struct xt_match *match); |
Patrick McHardy | 8956695 | 2007-12-17 21:46:40 -0800 | [diff] [blame] | 596 | extern int xt_compat_match_from_user(struct xt_entry_match *m, |
Patrick McHardy | b0a6363 | 2008-01-31 04:10:18 -0800 | [diff] [blame] | 597 | void **dstptr, unsigned int *size); |
Patrick McHardy | 9fa492c | 2006-09-20 12:05:37 -0700 | [diff] [blame] | 598 | extern int xt_compat_match_to_user(struct xt_entry_match *m, |
Patrick McHardy | b0a6363 | 2008-01-31 04:10:18 -0800 | [diff] [blame] | 599 | void __user **dstptr, unsigned int *size); |
Patrick McHardy | 9fa492c | 2006-09-20 12:05:37 -0700 | [diff] [blame] | 600 | |
Jan Engelhardt | 5452e42 | 2008-04-14 11:15:35 +0200 | [diff] [blame] | 601 | extern int xt_compat_target_offset(const struct xt_target *target); |
Patrick McHardy | 9fa492c | 2006-09-20 12:05:37 -0700 | [diff] [blame] | 602 | extern void xt_compat_target_from_user(struct xt_entry_target *t, |
Patrick McHardy | b0a6363 | 2008-01-31 04:10:18 -0800 | [diff] [blame] | 603 | void **dstptr, unsigned int *size); |
Patrick McHardy | 9fa492c | 2006-09-20 12:05:37 -0700 | [diff] [blame] | 604 | extern int xt_compat_target_to_user(struct xt_entry_target *t, |
Patrick McHardy | b0a6363 | 2008-01-31 04:10:18 -0800 | [diff] [blame] | 605 | void __user **dstptr, unsigned int *size); |
Dmitry Mishin | 2722971 | 2006-04-01 02:25:19 -0800 | [diff] [blame] | 606 | |
| 607 | #endif /* CONFIG_COMPAT */ |
Harald Welte | 2e4e6a1 | 2006-01-12 13:30:04 -0800 | [diff] [blame] | 608 | #endif /* __KERNEL__ */ |
| 609 | |
| 610 | #endif /* _X_TABLES_H */ |