Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 1 | #ifndef __NET_FRAG_H__ |
| 2 | #define __NET_FRAG_H__ |
| 3 | |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 4 | #include <linux/percpu_counter.h> |
| 5 | |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 6 | struct netns_frags { |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 7 | /* The percpu_counter "mem" need to be cacheline aligned. |
| 8 | * mem.count must not share cacheline with other writers |
Jesper Dangaard Brouer | cd39a78 | 2013-01-28 23:44:14 +0000 | [diff] [blame] | 9 | */ |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 10 | struct percpu_counter mem ____cacheline_aligned_in_smp; |
| 11 | |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 12 | /* sysctls */ |
| 13 | int timeout; |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 14 | int high_thresh; |
| 15 | int low_thresh; |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 16 | }; |
| 17 | |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 18 | /** |
| 19 | * fragment queue flags |
| 20 | * |
| 21 | * @INET_FRAG_FIRST_IN: first fragment has arrived |
| 22 | * @INET_FRAG_LAST_IN: final fragment has arrived |
| 23 | * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 24 | */ |
| 25 | enum { |
| 26 | INET_FRAG_FIRST_IN = BIT(0), |
| 27 | INET_FRAG_LAST_IN = BIT(1), |
| 28 | INET_FRAG_COMPLETE = BIT(2), |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 29 | }; |
| 30 | |
| 31 | /** |
| 32 | * struct inet_frag_queue - fragment queue |
| 33 | * |
| 34 | * @lock: spinlock protecting the queue |
| 35 | * @timer: queue expiration timer |
| 36 | * @list: hash bucket list |
| 37 | * @refcnt: reference count of the queue |
| 38 | * @fragments: received fragments head |
| 39 | * @fragments_tail: received fragments tail |
| 40 | * @stamp: timestamp of the last received fragment |
| 41 | * @len: total length of the original datagram |
| 42 | * @meat: length of received fragments so far |
| 43 | * @flags: fragment queue flags |
Florian Westphal | d6b915e | 2015-05-22 16:32:51 +0200 | [diff] [blame] | 44 | * @max_size: maximum received fragment size |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 45 | * @net: namespace that this frag belongs to |
Florian Westphal | d1fe194 | 2015-07-23 12:05:37 +0200 | [diff] [blame] | 46 | * @list_evictor: list of queues to forcefully evict (e.g. due to low memory) |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 47 | */ |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 48 | struct inet_frag_queue { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 49 | spinlock_t lock; |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 50 | struct timer_list timer; |
Jesper Dangaard Brouer | 6e34a8b | 2013-01-28 23:44:49 +0000 | [diff] [blame] | 51 | struct hlist_node list; |
| 52 | atomic_t refcnt; |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 53 | struct sk_buff *fragments; |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 54 | struct sk_buff *fragments_tail; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 55 | ktime_t stamp; |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 56 | int len; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 57 | int meat; |
Nikolay Aleksandrov | 1ab1934 | 2014-08-01 12:29:45 +0200 | [diff] [blame] | 58 | __u8 flags; |
Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 59 | u16 max_size; |
Jesper Dangaard Brouer | 6e34a8b | 2013-01-28 23:44:49 +0000 | [diff] [blame] | 60 | struct netns_frags *net; |
Florian Westphal | d1fe194 | 2015-07-23 12:05:37 +0200 | [diff] [blame] | 61 | struct hlist_node list_evictor; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 62 | }; |
| 63 | |
Jesper Dangaard Brouer | a4c4009 | 2013-04-25 09:52:25 +0000 | [diff] [blame] | 64 | #define INETFRAGS_HASHSZ 1024 |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 65 | |
Hannes Frederic Sowa | 5a3da1f | 2013-03-15 11:32:30 +0000 | [diff] [blame] | 66 | /* averaged: |
| 67 | * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / |
| 68 | * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or |
| 69 | * struct frag_queue)) |
| 70 | */ |
Florian Westphal | b13d3cb | 2014-07-24 16:50:32 +0200 | [diff] [blame] | 71 | #define INETFRAGS_MAXDEPTH 128 |
Hannes Frederic Sowa | 5a3da1f | 2013-03-15 11:32:30 +0000 | [diff] [blame] | 72 | |
Jesper Dangaard Brouer | 19952cc4 | 2013-04-03 23:38:16 +0000 | [diff] [blame] | 73 | struct inet_frag_bucket { |
| 74 | struct hlist_head chain; |
| 75 | spinlock_t chain_lock; |
| 76 | }; |
| 77 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 78 | struct inet_frags { |
Jesper Dangaard Brouer | 19952cc4 | 2013-04-03 23:38:16 +0000 | [diff] [blame] | 79 | struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; |
Hannes Frederic Sowa | 7088ad7 | 2013-10-23 11:06:57 +0200 | [diff] [blame] | 80 | |
Florian Westphal | b13d3cb | 2014-07-24 16:50:32 +0200 | [diff] [blame] | 81 | struct work_struct frags_work; |
| 82 | unsigned int next_bucket; |
Florian Westphal | e3a57d1 | 2014-07-24 16:50:35 +0200 | [diff] [blame] | 83 | unsigned long last_rebuild_jiffies; |
| 84 | bool rebuild; |
Florian Westphal | b13d3cb | 2014-07-24 16:50:32 +0200 | [diff] [blame] | 85 | |
Hannes Frederic Sowa | 7088ad7 | 2013-10-23 11:06:57 +0200 | [diff] [blame] | 86 | /* The first call to hashfn is responsible to initialize |
| 87 | * rnd. This is best done with net_get_random_once. |
Florian Westphal | ab1c724 | 2014-07-24 16:50:36 +0200 | [diff] [blame] | 88 | * |
| 89 | * rnd_seqlock is used to let hash insertion detect |
| 90 | * when it needs to re-lookup the hash chain to use. |
Hannes Frederic Sowa | 7088ad7 | 2013-10-23 11:06:57 +0200 | [diff] [blame] | 91 | */ |
Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 92 | u32 rnd; |
Florian Westphal | ab1c724 | 2014-07-24 16:50:36 +0200 | [diff] [blame] | 93 | seqlock_t rnd_seqlock; |
Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 94 | int qsize; |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 95 | |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 96 | unsigned int (*hashfn)(const struct inet_frag_queue *); |
| 97 | bool (*match)(const struct inet_frag_queue *q, |
| 98 | const void *arg); |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 99 | void (*constructor)(struct inet_frag_queue *q, |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 100 | const void *arg); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 101 | void (*destructor)(struct inet_frag_queue *); |
| 102 | void (*skb_free)(struct sk_buff *); |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 103 | void (*frag_expire)(unsigned long data); |
Nikolay Aleksandrov | d4ad4d2 | 2014-08-01 12:29:48 +0200 | [diff] [blame] | 104 | struct kmem_cache *frags_cachep; |
| 105 | const char *frags_cache_name; |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 106 | }; |
| 107 | |
Nikolay Aleksandrov | d4ad4d2 | 2014-08-01 12:29:48 +0200 | [diff] [blame] | 108 | int inet_frags_init(struct inet_frags *); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 109 | void inet_frags_fini(struct inet_frags *); |
| 110 | |
Eric Dumazet | 1d6119b | 2015-11-02 09:03:11 -0800 | [diff] [blame] | 111 | static inline int inet_frags_init_net(struct netns_frags *nf) |
| 112 | { |
| 113 | return percpu_counter_init(&nf->mem, 0, GFP_KERNEL); |
| 114 | } |
| 115 | static inline void inet_frags_uninit_net(struct netns_frags *nf) |
| 116 | { |
| 117 | percpu_counter_destroy(&nf->mem); |
| 118 | } |
| 119 | |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 120 | void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 121 | |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 122 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); |
Florian Westphal | 3fd588e | 2014-07-24 16:50:34 +0200 | [diff] [blame] | 123 | void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f); |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 124 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
Florian Westphal | ab1c724 | 2014-07-24 16:50:36 +0200 | [diff] [blame] | 125 | struct inet_frags *f, void *key, unsigned int hash); |
| 126 | |
Hannes Frederic Sowa | 5a3da1f | 2013-03-15 11:32:30 +0000 | [diff] [blame] | 127 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, |
| 128 | const char *prefix); |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 129 | |
Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 130 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) |
| 131 | { |
| 132 | if (atomic_dec_and_test(&q->refcnt)) |
Florian Westphal | 3fd588e | 2014-07-24 16:50:34 +0200 | [diff] [blame] | 133 | inet_frag_destroy(q, f); |
Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 134 | } |
| 135 | |
Nikolay Aleksandrov | caaecdd | 2015-07-23 12:05:40 +0200 | [diff] [blame] | 136 | static inline bool inet_frag_evicting(struct inet_frag_queue *q) |
| 137 | { |
| 138 | return !hlist_unhashed(&q->list_evictor); |
| 139 | } |
| 140 | |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 141 | /* Memory Tracking Functions. */ |
| 142 | |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 143 | /* The default percpu_counter batch size is not big enough to scale to |
| 144 | * fragmentation mem acct sizes. |
| 145 | * The mem size of a 64K fragment is approx: |
| 146 | * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes |
| 147 | */ |
| 148 | static unsigned int frag_percpu_counter_batch = 130000; |
| 149 | |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 150 | static inline int frag_mem_limit(struct netns_frags *nf) |
| 151 | { |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 152 | return percpu_counter_read(&nf->mem); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 153 | } |
| 154 | |
Florian Westphal | 0e60d24 | 2015-07-23 12:05:38 +0200 | [diff] [blame] | 155 | static inline void sub_frag_mem_limit(struct netns_frags *nf, int i) |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 156 | { |
Florian Westphal | 0e60d24 | 2015-07-23 12:05:38 +0200 | [diff] [blame] | 157 | __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 158 | } |
| 159 | |
Florian Westphal | 0e60d24 | 2015-07-23 12:05:38 +0200 | [diff] [blame] | 160 | static inline void add_frag_mem_limit(struct netns_frags *nf, int i) |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 161 | { |
Florian Westphal | 0e60d24 | 2015-07-23 12:05:38 +0200 | [diff] [blame] | 162 | __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 163 | } |
| 164 | |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 165 | static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 166 | { |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 167 | unsigned int res; |
Eric Dumazet | 4cfb048 | 2013-02-22 07:43:35 +0000 | [diff] [blame] | 168 | |
| 169 | local_bh_disable(); |
| 170 | res = percpu_counter_sum_positive(&nf->mem); |
| 171 | local_bh_enable(); |
| 172 | |
| 173 | return res; |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 174 | } |
| 175 | |
Hannes Frederic Sowa | be99197 | 2013-03-22 08:24:37 +0000 | [diff] [blame] | 176 | /* RFC 3168 support : |
| 177 | * We want to check ECN values of all fragments, do detect invalid combinations. |
| 178 | * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. |
| 179 | */ |
| 180 | #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ |
| 181 | #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ |
| 182 | #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ |
| 183 | #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ |
| 184 | |
| 185 | extern const u8 ip_frag_ecn_table[16]; |
| 186 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 187 | #endif |