Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 1 | #ifndef __NET_FRAG_H__ |
| 2 | #define __NET_FRAG_H__ |
| 3 | |
Eric Dumazet | 23ce9c5 | 2018-10-10 12:29:56 -0700 | [diff] [blame] | 4 | #include <linux/rhashtable.h> |
| 5 | |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 6 | struct netns_frags { |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 7 | /* sysctls */ |
Eric Dumazet | 7f61706 | 2018-10-10 12:30:00 -0700 | [diff] [blame] | 8 | long high_thresh; |
| 9 | long low_thresh; |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 10 | int timeout; |
Nikolay Borisov | 0fbf4cb | 2016-02-15 12:11:31 +0200 | [diff] [blame] | 11 | int max_dist; |
Eric Dumazet | 2ffb1c3 | 2018-10-10 12:29:50 -0700 | [diff] [blame] | 12 | struct inet_frags *f; |
Eric Dumazet | 5b68fda | 2018-10-10 12:30:04 -0700 | [diff] [blame] | 13 | |
| 14 | struct rhashtable rhashtable ____cacheline_aligned_in_smp; |
| 15 | |
| 16 | /* Keep atomic mem on separate cachelines in structs that include it */ |
| 17 | atomic_long_t mem ____cacheline_aligned_in_smp; |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 18 | }; |
| 19 | |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 20 | /** |
| 21 | * fragment queue flags |
| 22 | * |
| 23 | * @INET_FRAG_FIRST_IN: first fragment has arrived |
| 24 | * @INET_FRAG_LAST_IN: final fragment has arrived |
| 25 | * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 26 | */ |
| 27 | enum { |
| 28 | INET_FRAG_FIRST_IN = BIT(0), |
| 29 | INET_FRAG_LAST_IN = BIT(1), |
| 30 | INET_FRAG_COMPLETE = BIT(2), |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 31 | }; |
| 32 | |
Eric Dumazet | 23ce9c5 | 2018-10-10 12:29:56 -0700 | [diff] [blame] | 33 | struct frag_v4_compare_key { |
| 34 | __be32 saddr; |
| 35 | __be32 daddr; |
| 36 | u32 user; |
| 37 | u32 vif; |
| 38 | __be16 id; |
| 39 | u16 protocol; |
| 40 | }; |
| 41 | |
| 42 | struct frag_v6_compare_key { |
| 43 | struct in6_addr saddr; |
| 44 | struct in6_addr daddr; |
| 45 | u32 user; |
| 46 | __be32 id; |
| 47 | u32 iif; |
| 48 | }; |
| 49 | |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 50 | /** |
| 51 | * struct inet_frag_queue - fragment queue |
| 52 | * |
Eric Dumazet | 23ce9c5 | 2018-10-10 12:29:56 -0700 | [diff] [blame] | 53 | * @node: rhash node |
| 54 | * @key: keys identifying this frag. |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 55 | * @timer: queue expiration timer |
Eric Dumazet | 23ce9c5 | 2018-10-10 12:29:56 -0700 | [diff] [blame] | 56 | * @lock: spinlock protecting this frag |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 57 | * @refcnt: reference count of the queue |
| 58 | * @fragments: received fragments head |
Peter Oskolkov | e9e4ac4 | 2018-10-10 12:30:14 -0700 | [diff] [blame] | 59 | * @rb_fragments: received fragments rb-tree root |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 60 | * @fragments_tail: received fragments tail |
Peter Oskolkov | e9e4ac4 | 2018-10-10 12:30:14 -0700 | [diff] [blame] | 61 | * @last_run_head: the head of the last "run". see ip_fragment.c |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 62 | * @stamp: timestamp of the last received fragment |
| 63 | * @len: total length of the original datagram |
| 64 | * @meat: length of received fragments so far |
| 65 | * @flags: fragment queue flags |
Florian Westphal | d6b915e | 2015-05-22 16:32:51 +0200 | [diff] [blame] | 66 | * @max_size: maximum received fragment size |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 67 | * @net: namespace that this frag belongs to |
Eric Dumazet | 23ce9c5 | 2018-10-10 12:29:56 -0700 | [diff] [blame] | 68 | * @rcu: rcu head for freeing deferall |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 69 | */ |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 70 | struct inet_frag_queue { |
Eric Dumazet | 23ce9c5 | 2018-10-10 12:29:56 -0700 | [diff] [blame] | 71 | struct rhash_head node; |
| 72 | union { |
| 73 | struct frag_v4_compare_key v4; |
| 74 | struct frag_v6_compare_key v6; |
| 75 | } key; |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 76 | struct timer_list timer; |
Eric Dumazet | 23ce9c5 | 2018-10-10 12:29:56 -0700 | [diff] [blame] | 77 | spinlock_t lock; |
Jesper Dangaard Brouer | 6e34a8b | 2013-01-28 23:44:49 +0000 | [diff] [blame] | 78 | atomic_t refcnt; |
Peter Oskolkov | aaee29e | 2019-04-26 08:41:05 -0700 | [diff] [blame] | 79 | struct sk_buff *fragments; /* used in 6lopwpan IPv6. */ |
| 80 | struct rb_root rb_fragments; /* Used in IPv4/IPv6. */ |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 81 | struct sk_buff *fragments_tail; |
Peter Oskolkov | e9e4ac4 | 2018-10-10 12:30:14 -0700 | [diff] [blame] | 82 | struct sk_buff *last_run_head; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 83 | ktime_t stamp; |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 84 | int len; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 85 | int meat; |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 86 | __u8 flags; |
Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 87 | u16 max_size; |
Eric Dumazet | 23ce9c5 | 2018-10-10 12:29:56 -0700 | [diff] [blame] | 88 | struct netns_frags *net; |
| 89 | struct rcu_head rcu; |
Jesper Dangaard Brouer | 19952cc4 | 2013-04-03 23:38:16 +0000 | [diff] [blame] | 90 | }; |
| 91 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 92 | struct inet_frags { |
Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 93 | int qsize; |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 94 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 95 | void (*constructor)(struct inet_frag_queue *q, |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 96 | const void *arg); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 97 | void (*destructor)(struct inet_frag_queue *); |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 98 | void (*frag_expire)(unsigned long data); |
Nikolay Aleksandrov | d4ad4d2 | 2014-08-01 12:29:48 +0200 | [diff] [blame] | 99 | struct kmem_cache *frags_cachep; |
| 100 | const char *frags_cache_name; |
Eric Dumazet | 23ce9c5 | 2018-10-10 12:29:56 -0700 | [diff] [blame] | 101 | struct rhashtable_params rhash_params; |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 102 | }; |
| 103 | |
Nikolay Aleksandrov | d4ad4d2 | 2014-08-01 12:29:48 +0200 | [diff] [blame] | 104 | int inet_frags_init(struct inet_frags *); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 105 | void inet_frags_fini(struct inet_frags *); |
| 106 | |
Eric Dumazet | 7fca771 | 2018-10-10 12:29:49 -0700 | [diff] [blame] | 107 | static inline int inet_frags_init_net(struct netns_frags *nf) |
Eric Dumazet | 1d6119b | 2015-11-02 09:03:11 -0800 | [diff] [blame] | 108 | { |
Eric Dumazet | 7f61706 | 2018-10-10 12:30:00 -0700 | [diff] [blame] | 109 | atomic_long_set(&nf->mem, 0); |
Eric Dumazet | 23ce9c5 | 2018-10-10 12:29:56 -0700 | [diff] [blame] | 110 | return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params); |
Eric Dumazet | 1d6119b | 2015-11-02 09:03:11 -0800 | [diff] [blame] | 111 | } |
Eric Dumazet | 2ffb1c3 | 2018-10-10 12:29:50 -0700 | [diff] [blame] | 112 | void inet_frags_exit_net(struct netns_frags *nf); |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 113 | |
Eric Dumazet | 2ffb1c3 | 2018-10-10 12:29:50 -0700 | [diff] [blame] | 114 | void inet_frag_kill(struct inet_frag_queue *q); |
| 115 | void inet_frag_destroy(struct inet_frag_queue *q); |
Eric Dumazet | 23ce9c5 | 2018-10-10 12:29:56 -0700 | [diff] [blame] | 116 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key); |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 117 | |
Peter Oskolkov | e9e4ac4 | 2018-10-10 12:30:14 -0700 | [diff] [blame] | 118 | /* Free all skbs in the queue; return the sum of their truesizes. */ |
| 119 | unsigned int inet_frag_rbtree_purge(struct rb_root *root); |
| 120 | |
Eric Dumazet | 2ffb1c3 | 2018-10-10 12:29:50 -0700 | [diff] [blame] | 121 | static inline void inet_frag_put(struct inet_frag_queue *q) |
Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 122 | { |
| 123 | if (atomic_dec_and_test(&q->refcnt)) |
Eric Dumazet | 2ffb1c3 | 2018-10-10 12:29:50 -0700 | [diff] [blame] | 124 | inet_frag_destroy(q); |
Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 125 | } |
| 126 | |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 127 | /* Memory Tracking Functions. */ |
| 128 | |
Eric Dumazet | 7f61706 | 2018-10-10 12:30:00 -0700 | [diff] [blame] | 129 | static inline long frag_mem_limit(const struct netns_frags *nf) |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 130 | { |
Eric Dumazet | 7f61706 | 2018-10-10 12:30:00 -0700 | [diff] [blame] | 131 | return atomic_long_read(&nf->mem); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 132 | } |
| 133 | |
Eric Dumazet | 7f61706 | 2018-10-10 12:30:00 -0700 | [diff] [blame] | 134 | static inline void sub_frag_mem_limit(struct netns_frags *nf, long val) |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 135 | { |
Eric Dumazet | 7f61706 | 2018-10-10 12:30:00 -0700 | [diff] [blame] | 136 | atomic_long_sub(val, &nf->mem); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 137 | } |
| 138 | |
Eric Dumazet | 7f61706 | 2018-10-10 12:30:00 -0700 | [diff] [blame] | 139 | static inline void add_frag_mem_limit(struct netns_frags *nf, long val) |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 140 | { |
Eric Dumazet | 7f61706 | 2018-10-10 12:30:00 -0700 | [diff] [blame] | 141 | atomic_long_add(val, &nf->mem); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 142 | } |
| 143 | |
Hannes Frederic Sowa | be99197 | 2013-03-22 08:24:37 +0000 | [diff] [blame] | 144 | /* RFC 3168 support : |
| 145 | * We want to check ECN values of all fragments, do detect invalid combinations. |
| 146 | * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. |
| 147 | */ |
| 148 | #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ |
| 149 | #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ |
| 150 | #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ |
| 151 | #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ |
| 152 | |
| 153 | extern const u8 ip_frag_ecn_table[16]; |
| 154 | |
Peter Oskolkov | aaee29e | 2019-04-26 08:41:05 -0700 | [diff] [blame] | 155 | /* Return values of inet_frag_queue_insert() */ |
| 156 | #define IPFRAG_OK 0 |
| 157 | #define IPFRAG_DUP 1 |
| 158 | #define IPFRAG_OVERLAP 2 |
| 159 | int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb, |
| 160 | int offset, int end); |
| 161 | void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, |
| 162 | struct sk_buff *parent); |
| 163 | void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, |
| 164 | void *reasm_data); |
| 165 | struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); |
| 166 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 167 | #endif |