Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 1 | /* |
| 2 | * inet fragments management |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. |
| 8 | * |
| 9 | * Authors: Pavel Emelyanov <xemul@openvz.org> |
| 10 | * Started as consolidation of ipv4/ip_fragment.c, |
| 11 | * ipv6/reassembly. and ipv6 nf conntrack reassembly |
| 12 | */ |
| 13 | |
| 14 | #include <linux/list.h> |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/timer.h> |
| 18 | #include <linux/mm.h> |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 19 | #include <linux/random.h> |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 20 | #include <linux/skbuff.h> |
| 21 | #include <linux/rtnetlink.h> |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 22 | |
| 23 | #include <net/inet_frag.h> |
| 24 | |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 25 | static void inet_frag_secret_rebuild(unsigned long dummy) |
| 26 | { |
| 27 | struct inet_frags *f = (struct inet_frags *)dummy; |
| 28 | unsigned long now = jiffies; |
| 29 | int i; |
| 30 | |
| 31 | write_lock(&f->lock); |
| 32 | get_random_bytes(&f->rnd, sizeof(u32)); |
| 33 | for (i = 0; i < INETFRAGS_HASHSZ; i++) { |
| 34 | struct inet_frag_queue *q; |
| 35 | struct hlist_node *p, *n; |
| 36 | |
| 37 | hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { |
| 38 | unsigned int hval = f->hashfn(q); |
| 39 | |
| 40 | if (hval != i) { |
| 41 | hlist_del(&q->list); |
| 42 | |
| 43 | /* Relink to new hash chain. */ |
| 44 | hlist_add_head(&q->list, &f->hash[hval]); |
| 45 | } |
| 46 | } |
| 47 | } |
| 48 | write_unlock(&f->lock); |
| 49 | |
Pavel Emelyanov | 3b4bc4a | 2008-01-22 06:11:04 -0800 | [diff] [blame] | 50 | mod_timer(&f->secret_timer, now + f->secret_interval); |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 51 | } |
| 52 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 53 | void inet_frags_init(struct inet_frags *f) |
| 54 | { |
| 55 | int i; |
| 56 | |
| 57 | for (i = 0; i < INETFRAGS_HASHSZ; i++) |
| 58 | INIT_HLIST_HEAD(&f->hash[i]); |
| 59 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 60 | rwlock_init(&f->lock); |
| 61 | |
| 62 | f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ |
| 63 | (jiffies ^ (jiffies >> 6))); |
| 64 | |
Pavel Emelyanov | b24b8a2 | 2008-01-23 21:20:07 -0800 | [diff] [blame] | 65 | setup_timer(&f->secret_timer, inet_frag_secret_rebuild, |
| 66 | (unsigned long)f); |
Pavel Emelyanov | 3b4bc4a | 2008-01-22 06:11:04 -0800 | [diff] [blame] | 67 | f->secret_timer.expires = jiffies + f->secret_interval; |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 68 | add_timer(&f->secret_timer); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 69 | } |
| 70 | EXPORT_SYMBOL(inet_frags_init); |
| 71 | |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 72 | void inet_frags_init_net(struct netns_frags *nf) |
| 73 | { |
| 74 | nf->nqueues = 0; |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 75 | atomic_set(&nf->mem, 0); |
Pavel Emelyanov | 3140c25 | 2008-01-22 06:11:48 -0800 | [diff] [blame] | 76 | INIT_LIST_HEAD(&nf->lru_list); |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 77 | } |
| 78 | EXPORT_SYMBOL(inet_frags_init_net); |
| 79 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 80 | void inet_frags_fini(struct inet_frags *f) |
| 81 | { |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 82 | del_timer(&f->secret_timer); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 83 | } |
| 84 | EXPORT_SYMBOL(inet_frags_fini); |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 85 | |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 86 | void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f) |
| 87 | { |
| 88 | nf->low_thresh = 0; |
David S. Miller | e8e16b70 | 2008-03-28 17:30:18 -0700 | [diff] [blame] | 89 | |
| 90 | local_bh_disable(); |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 91 | inet_frag_evictor(nf, f); |
David S. Miller | e8e16b70 | 2008-03-28 17:30:18 -0700 | [diff] [blame] | 92 | local_bh_enable(); |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 93 | } |
| 94 | EXPORT_SYMBOL(inet_frags_exit_net); |
| 95 | |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 96 | static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) |
| 97 | { |
| 98 | write_lock(&f->lock); |
| 99 | hlist_del(&fq->list); |
| 100 | list_del(&fq->lru_list); |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 101 | fq->net->nqueues--; |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 102 | write_unlock(&f->lock); |
| 103 | } |
| 104 | |
| 105 | void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) |
| 106 | { |
| 107 | if (del_timer(&fq->timer)) |
| 108 | atomic_dec(&fq->refcnt); |
| 109 | |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 110 | if (!(fq->last_in & INET_FRAG_COMPLETE)) { |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 111 | fq_unlink(fq, f); |
| 112 | atomic_dec(&fq->refcnt); |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 113 | fq->last_in |= INET_FRAG_COMPLETE; |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 114 | } |
| 115 | } |
| 116 | |
| 117 | EXPORT_SYMBOL(inet_frag_kill); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 118 | |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 119 | static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f, |
| 120 | struct sk_buff *skb, int *work) |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 121 | { |
| 122 | if (work) |
| 123 | *work -= skb->truesize; |
| 124 | |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 125 | atomic_sub(skb->truesize, &nf->mem); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 126 | if (f->skb_free) |
| 127 | f->skb_free(skb); |
| 128 | kfree_skb(skb); |
| 129 | } |
| 130 | |
| 131 | void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, |
| 132 | int *work) |
| 133 | { |
| 134 | struct sk_buff *fp; |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 135 | struct netns_frags *nf; |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 136 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 137 | WARN_ON(!(q->last_in & INET_FRAG_COMPLETE)); |
| 138 | WARN_ON(del_timer(&q->timer) != 0); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 139 | |
| 140 | /* Release all fragment data. */ |
| 141 | fp = q->fragments; |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 142 | nf = q->net; |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 143 | while (fp) { |
| 144 | struct sk_buff *xp = fp->next; |
| 145 | |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 146 | frag_kfree_skb(nf, f, fp, work); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 147 | fp = xp; |
| 148 | } |
| 149 | |
| 150 | if (work) |
| 151 | *work -= f->qsize; |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 152 | atomic_sub(f->qsize, &nf->mem); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 153 | |
Pavel Emelyanov | c954770 | 2007-10-17 19:48:26 -0700 | [diff] [blame] | 154 | if (f->destructor) |
| 155 | f->destructor(q); |
| 156 | kfree(q); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 157 | |
| 158 | } |
| 159 | EXPORT_SYMBOL(inet_frag_destroy); |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 160 | |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 161 | int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f) |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 162 | { |
| 163 | struct inet_frag_queue *q; |
| 164 | int work, evicted = 0; |
| 165 | |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 166 | work = atomic_read(&nf->mem) - nf->low_thresh; |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 167 | while (work > 0) { |
| 168 | read_lock(&f->lock); |
Pavel Emelyanov | 3140c25 | 2008-01-22 06:11:48 -0800 | [diff] [blame] | 169 | if (list_empty(&nf->lru_list)) { |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 170 | read_unlock(&f->lock); |
| 171 | break; |
| 172 | } |
| 173 | |
Pavel Emelyanov | 3140c25 | 2008-01-22 06:11:48 -0800 | [diff] [blame] | 174 | q = list_first_entry(&nf->lru_list, |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 175 | struct inet_frag_queue, lru_list); |
| 176 | atomic_inc(&q->refcnt); |
| 177 | read_unlock(&f->lock); |
| 178 | |
| 179 | spin_lock(&q->lock); |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 180 | if (!(q->last_in & INET_FRAG_COMPLETE)) |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 181 | inet_frag_kill(q, f); |
| 182 | spin_unlock(&q->lock); |
| 183 | |
| 184 | if (atomic_dec_and_test(&q->refcnt)) |
| 185 | inet_frag_destroy(q, f, &work); |
| 186 | evicted++; |
| 187 | } |
| 188 | |
| 189 | return evicted; |
| 190 | } |
| 191 | EXPORT_SYMBOL(inet_frag_evictor); |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 192 | |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 193 | static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, |
| 194 | struct inet_frag_queue *qp_in, struct inet_frags *f, |
Pavel Emelyanov | 9a37580 | 2008-06-27 20:06:08 -0700 | [diff] [blame] | 195 | void *arg) |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 196 | { |
| 197 | struct inet_frag_queue *qp; |
| 198 | #ifdef CONFIG_SMP |
| 199 | struct hlist_node *n; |
| 200 | #endif |
Pavel Emelyanov | 9a37580 | 2008-06-27 20:06:08 -0700 | [diff] [blame] | 201 | unsigned int hash; |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 202 | |
| 203 | write_lock(&f->lock); |
Pavel Emelyanov | 9a37580 | 2008-06-27 20:06:08 -0700 | [diff] [blame] | 204 | /* |
| 205 | * While we stayed w/o the lock other CPU could update |
| 206 | * the rnd seed, so we need to re-calculate the hash |
| 207 | * chain. Fortunatelly the qp_in can be used to get one. |
| 208 | */ |
| 209 | hash = f->hashfn(qp_in); |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 210 | #ifdef CONFIG_SMP |
| 211 | /* With SMP race we have to recheck hash table, because |
| 212 | * such entry could be created on other cpu, while we |
| 213 | * promoted read lock to write lock. |
| 214 | */ |
| 215 | hlist_for_each_entry(qp, n, &f->hash[hash], list) { |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 216 | if (qp->net == nf && f->match(qp, arg)) { |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 217 | atomic_inc(&qp->refcnt); |
| 218 | write_unlock(&f->lock); |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 219 | qp_in->last_in |= INET_FRAG_COMPLETE; |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 220 | inet_frag_put(qp_in, f); |
| 221 | return qp; |
| 222 | } |
| 223 | } |
| 224 | #endif |
| 225 | qp = qp_in; |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 226 | if (!mod_timer(&qp->timer, jiffies + nf->timeout)) |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 227 | atomic_inc(&qp->refcnt); |
| 228 | |
| 229 | atomic_inc(&qp->refcnt); |
| 230 | hlist_add_head(&qp->list, &f->hash[hash]); |
Pavel Emelyanov | 3140c25 | 2008-01-22 06:11:48 -0800 | [diff] [blame] | 231 | list_add_tail(&qp->lru_list, &nf->lru_list); |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 232 | nf->nqueues++; |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 233 | write_unlock(&f->lock); |
| 234 | return qp; |
| 235 | } |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 236 | |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 237 | static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, |
| 238 | struct inet_frags *f, void *arg) |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 239 | { |
| 240 | struct inet_frag_queue *q; |
| 241 | |
| 242 | q = kzalloc(f->qsize, GFP_ATOMIC); |
| 243 | if (q == NULL) |
| 244 | return NULL; |
| 245 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 246 | f->constructor(q, arg); |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 247 | atomic_add(f->qsize, &nf->mem); |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 248 | setup_timer(&q->timer, f->frag_expire, (unsigned long)q); |
| 249 | spin_lock_init(&q->lock); |
| 250 | atomic_set(&q->refcnt, 1); |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 251 | q->net = nf; |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 252 | |
| 253 | return q; |
| 254 | } |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 255 | |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 256 | static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, |
Pavel Emelyanov | 9a37580 | 2008-06-27 20:06:08 -0700 | [diff] [blame] | 257 | struct inet_frags *f, void *arg) |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 258 | { |
| 259 | struct inet_frag_queue *q; |
| 260 | |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 261 | q = inet_frag_alloc(nf, f, arg); |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 262 | if (q == NULL) |
| 263 | return NULL; |
| 264 | |
Pavel Emelyanov | 9a37580 | 2008-06-27 20:06:08 -0700 | [diff] [blame] | 265 | return inet_frag_intern(nf, q, f, arg); |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 266 | } |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 267 | |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 268 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
| 269 | struct inet_frags *f, void *key, unsigned int hash) |
Hannes Eder | 56bca31 | 2009-02-25 10:32:52 +0000 | [diff] [blame] | 270 | __releases(&f->lock) |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 271 | { |
| 272 | struct inet_frag_queue *q; |
| 273 | struct hlist_node *n; |
| 274 | |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 275 | hlist_for_each_entry(q, n, &f->hash[hash], list) { |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 276 | if (q->net == nf && f->match(q, key)) { |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 277 | atomic_inc(&q->refcnt); |
| 278 | read_unlock(&f->lock); |
| 279 | return q; |
| 280 | } |
| 281 | } |
| 282 | read_unlock(&f->lock); |
| 283 | |
Pavel Emelyanov | 9a37580 | 2008-06-27 20:06:08 -0700 | [diff] [blame] | 284 | return inet_frag_create(nf, f, key); |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 285 | } |
| 286 | EXPORT_SYMBOL(inet_frag_find); |