Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 1 | #ifndef __NET_FRAG_H__ |
| 2 | #define __NET_FRAG_H__ |
| 3 | |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 4 | #include <linux/percpu_counter.h> |
| 5 | |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 6 | struct netns_frags { |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 7 | int nqueues; |
Pavel Emelyanov | 3140c25 | 2008-01-22 06:11:48 -0800 | [diff] [blame] | 8 | struct list_head lru_list; |
Jesper Dangaard Brouer | 3ef0eb0 | 2013-01-28 23:45:51 +0000 | [diff] [blame] | 9 | spinlock_t lru_lock; |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 10 | |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 11 | /* The percpu_counter "mem" need to be cacheline aligned. |
| 12 | * mem.count must not share cacheline with other writers |
Jesper Dangaard Brouer | cd39a78 | 2013-01-28 23:44:14 +0000 | [diff] [blame] | 13 | */ |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 14 | struct percpu_counter mem ____cacheline_aligned_in_smp; |
| 15 | |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 16 | /* sysctls */ |
| 17 | int timeout; |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 18 | int high_thresh; |
| 19 | int low_thresh; |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 20 | }; |
| 21 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 22 | struct inet_frag_queue { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 23 | spinlock_t lock; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 24 | struct timer_list timer; /* when will this queue expire? */ |
Jesper Dangaard Brouer | 6e34a8b | 2013-01-28 23:44:49 +0000 | [diff] [blame] | 25 | struct list_head lru_list; /* lru list member */ |
| 26 | struct hlist_node list; |
| 27 | atomic_t refcnt; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 28 | struct sk_buff *fragments; /* list of received fragments */ |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 29 | struct sk_buff *fragments_tail; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 30 | ktime_t stamp; |
| 31 | int len; /* total length of orig datagram */ |
| 32 | int meat; |
| 33 | __u8 last_in; /* first/last segment arrived? */ |
| 34 | |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 35 | #define INET_FRAG_COMPLETE 4 |
| 36 | #define INET_FRAG_FIRST_IN 2 |
| 37 | #define INET_FRAG_LAST_IN 1 |
Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 38 | |
| 39 | u16 max_size; |
Jesper Dangaard Brouer | 6e34a8b | 2013-01-28 23:44:49 +0000 | [diff] [blame] | 40 | |
| 41 | struct netns_frags *net; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 42 | }; |
| 43 | |
Jesper Dangaard Brouer | a4c4009 | 2013-04-25 09:52:25 +0000 | [diff] [blame] | 44 | #define INETFRAGS_HASHSZ 1024 |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 45 | |
Hannes Frederic Sowa | 5a3da1f | 2013-03-15 11:32:30 +0000 | [diff] [blame] | 46 | /* averaged: |
| 47 | * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / |
| 48 | * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or |
| 49 | * struct frag_queue)) |
| 50 | */ |
| 51 | #define INETFRAGS_MAXDEPTH 128 |
| 52 | |
Jesper Dangaard Brouer | 19952cc4 | 2013-04-03 23:38:16 +0000 | [diff] [blame] | 53 | struct inet_frag_bucket { |
| 54 | struct hlist_head chain; |
| 55 | spinlock_t chain_lock; |
| 56 | }; |
| 57 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 58 | struct inet_frags { |
Jesper Dangaard Brouer | 19952cc4 | 2013-04-03 23:38:16 +0000 | [diff] [blame] | 59 | struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; |
Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 60 | /* This rwlock is a global lock (seperate per IPv4, IPv6 and |
| 61 | * netfilter). Important to keep this on a seperate cacheline. |
Jesper Dangaard Brouer | 19952cc4 | 2013-04-03 23:38:16 +0000 | [diff] [blame] | 62 | * Its primarily a rebuild protection rwlock. |
Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 63 | */ |
| 64 | rwlock_t lock ____cacheline_aligned_in_smp; |
Pavel Emelyanov | 3b4bc4a | 2008-01-22 06:11:04 -0800 | [diff] [blame] | 65 | int secret_interval; |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 66 | struct timer_list secret_timer; |
Hannes Frederic Sowa | 7088ad7 | 2013-10-23 11:06:57 +0200 | [diff] [blame] | 67 | |
| 68 | /* The first call to hashfn is responsible to initialize |
| 69 | * rnd. This is best done with net_get_random_once. |
| 70 | */ |
Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 71 | u32 rnd; |
| 72 | int qsize; |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 73 | |
| 74 | unsigned int (*hashfn)(struct inet_frag_queue *); |
Jesper Dangaard Brouer | 5f8e1e8 | 2013-01-28 23:44:37 +0000 | [diff] [blame] | 75 | bool (*match)(struct inet_frag_queue *q, void *arg); |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 76 | void (*constructor)(struct inet_frag_queue *q, |
| 77 | void *arg); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 78 | void (*destructor)(struct inet_frag_queue *); |
| 79 | void (*skb_free)(struct sk_buff *); |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 80 | void (*frag_expire)(unsigned long data); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 81 | }; |
| 82 | |
| 83 | void inet_frags_init(struct inet_frags *); |
| 84 | void inet_frags_fini(struct inet_frags *); |
| 85 | |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 86 | void inet_frags_init_net(struct netns_frags *nf); |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 87 | void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 88 | |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 89 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 90 | void inet_frag_destroy(struct inet_frag_queue *q, |
| 91 | struct inet_frags *f, int *work); |
Amerigo Wang | 6b10286 | 2012-09-18 16:50:11 +0000 | [diff] [blame] | 92 | int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 93 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
Hannes Eder | 56bca31 | 2009-02-25 10:32:52 +0000 | [diff] [blame] | 94 | struct inet_frags *f, void *key, unsigned int hash) |
| 95 | __releases(&f->lock); |
Hannes Frederic Sowa | 5a3da1f | 2013-03-15 11:32:30 +0000 | [diff] [blame] | 96 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, |
| 97 | const char *prefix); |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 98 | |
Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 99 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) |
| 100 | { |
| 101 | if (atomic_dec_and_test(&q->refcnt)) |
| 102 | inet_frag_destroy(q, f, NULL); |
| 103 | } |
| 104 | |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 105 | /* Memory Tracking Functions. */ |
| 106 | |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 107 | /* The default percpu_counter batch size is not big enough to scale to |
| 108 | * fragmentation mem acct sizes. |
| 109 | * The mem size of a 64K fragment is approx: |
| 110 | * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes |
| 111 | */ |
| 112 | static unsigned int frag_percpu_counter_batch = 130000; |
| 113 | |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 114 | static inline int frag_mem_limit(struct netns_frags *nf) |
| 115 | { |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 116 | return percpu_counter_read(&nf->mem); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) |
| 120 | { |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 121 | __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) |
| 125 | { |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 126 | __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | static inline void init_frag_mem_limit(struct netns_frags *nf) |
| 130 | { |
Jesper Dangaard Brouer | 6d7b857 | 2013-01-28 23:45:33 +0000 | [diff] [blame] | 131 | percpu_counter_init(&nf->mem, 0); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | static inline int sum_frag_mem_limit(struct netns_frags *nf) |
| 135 | { |
Eric Dumazet | 4cfb048 | 2013-02-22 07:43:35 +0000 | [diff] [blame] | 136 | int res; |
| 137 | |
| 138 | local_bh_disable(); |
| 139 | res = percpu_counter_sum_positive(&nf->mem); |
| 140 | local_bh_enable(); |
| 141 | |
| 142 | return res; |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 143 | } |
| 144 | |
Jesper Dangaard Brouer | 3ef0eb0 | 2013-01-28 23:45:51 +0000 | [diff] [blame] | 145 | static inline void inet_frag_lru_move(struct inet_frag_queue *q) |
| 146 | { |
| 147 | spin_lock(&q->net->lru_lock); |
Konstantin Khlebnikov | b56141a | 2013-05-05 04:56:22 +0000 | [diff] [blame] | 148 | if (!list_empty(&q->lru_list)) |
| 149 | list_move_tail(&q->lru_list, &q->net->lru_list); |
Jesper Dangaard Brouer | 3ef0eb0 | 2013-01-28 23:45:51 +0000 | [diff] [blame] | 150 | spin_unlock(&q->net->lru_lock); |
| 151 | } |
| 152 | |
| 153 | static inline void inet_frag_lru_del(struct inet_frag_queue *q) |
| 154 | { |
| 155 | spin_lock(&q->net->lru_lock); |
Konstantin Khlebnikov | b56141a | 2013-05-05 04:56:22 +0000 | [diff] [blame] | 156 | list_del_init(&q->lru_list); |
Jesper Dangaard Brouer | 1b5ab0d | 2013-03-27 05:55:56 +0000 | [diff] [blame] | 157 | q->net->nqueues--; |
Jesper Dangaard Brouer | 3ef0eb0 | 2013-01-28 23:45:51 +0000 | [diff] [blame] | 158 | spin_unlock(&q->net->lru_lock); |
| 159 | } |
| 160 | |
| 161 | static inline void inet_frag_lru_add(struct netns_frags *nf, |
| 162 | struct inet_frag_queue *q) |
| 163 | { |
| 164 | spin_lock(&nf->lru_lock); |
| 165 | list_add_tail(&q->lru_list, &nf->lru_list); |
Jesper Dangaard Brouer | 1b5ab0d | 2013-03-27 05:55:56 +0000 | [diff] [blame] | 166 | q->net->nqueues++; |
Jesper Dangaard Brouer | 3ef0eb0 | 2013-01-28 23:45:51 +0000 | [diff] [blame] | 167 | spin_unlock(&nf->lru_lock); |
| 168 | } |
Hannes Frederic Sowa | be99197 | 2013-03-22 08:24:37 +0000 | [diff] [blame] | 169 | |
| 170 | /* RFC 3168 support : |
| 171 | * We want to check ECN values of all fragments, do detect invalid combinations. |
| 172 | * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. |
| 173 | */ |
| 174 | #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ |
| 175 | #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ |
| 176 | #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ |
| 177 | #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ |
| 178 | |
| 179 | extern const u8 ip_frag_ecn_table[16]; |
| 180 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 181 | #endif |