Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 1 | /* |
| 2 | * inet fragments management |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. |
| 8 | * |
| 9 | * Authors: Pavel Emelyanov <xemul@openvz.org> |
| 10 | * Started as consolidation of ipv4/ip_fragment.c, |
| 11 | * ipv6/reassembly. and ipv6 nf conntrack reassembly |
| 12 | */ |
| 13 | |
| 14 | #include <linux/list.h> |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/timer.h> |
| 18 | #include <linux/mm.h> |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 19 | #include <linux/random.h> |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 20 | #include <linux/skbuff.h> |
| 21 | #include <linux/rtnetlink.h> |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 22 | |
| 23 | #include <net/inet_frag.h> |
| 24 | |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 25 | static void inet_frag_secret_rebuild(unsigned long dummy) |
| 26 | { |
| 27 | struct inet_frags *f = (struct inet_frags *)dummy; |
| 28 | unsigned long now = jiffies; |
| 29 | int i; |
| 30 | |
| 31 | write_lock(&f->lock); |
| 32 | get_random_bytes(&f->rnd, sizeof(u32)); |
| 33 | for (i = 0; i < INETFRAGS_HASHSZ; i++) { |
| 34 | struct inet_frag_queue *q; |
| 35 | struct hlist_node *p, *n; |
| 36 | |
| 37 | hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { |
| 38 | unsigned int hval = f->hashfn(q); |
| 39 | |
| 40 | if (hval != i) { |
| 41 | hlist_del(&q->list); |
| 42 | |
| 43 | /* Relink to new hash chain. */ |
| 44 | hlist_add_head(&q->list, &f->hash[hval]); |
| 45 | } |
| 46 | } |
| 47 | } |
| 48 | write_unlock(&f->lock); |
| 49 | |
| 50 | mod_timer(&f->secret_timer, now + f->ctl->secret_interval); |
| 51 | } |
| 52 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 53 | void inet_frags_init(struct inet_frags *f) |
| 54 | { |
| 55 | int i; |
| 56 | |
| 57 | for (i = 0; i < INETFRAGS_HASHSZ; i++) |
| 58 | INIT_HLIST_HEAD(&f->hash[i]); |
| 59 | |
| 60 | INIT_LIST_HEAD(&f->lru_list); |
| 61 | rwlock_init(&f->lock); |
| 62 | |
| 63 | f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ |
| 64 | (jiffies ^ (jiffies >> 6))); |
| 65 | |
| 66 | f->nqueues = 0; |
| 67 | atomic_set(&f->mem, 0); |
| 68 | |
Pavel Emelyanov | b24b8a2 | 2008-01-23 21:20:07 -0800 | [diff] [blame] | 69 | setup_timer(&f->secret_timer, inet_frag_secret_rebuild, |
| 70 | (unsigned long)f); |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 71 | f->secret_timer.expires = jiffies + f->ctl->secret_interval; |
| 72 | add_timer(&f->secret_timer); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 73 | } |
| 74 | EXPORT_SYMBOL(inet_frags_init); |
| 75 | |
| 76 | void inet_frags_fini(struct inet_frags *f) |
| 77 | { |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 78 | del_timer(&f->secret_timer); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 79 | } |
| 80 | EXPORT_SYMBOL(inet_frags_fini); |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 81 | |
| 82 | static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) |
| 83 | { |
| 84 | write_lock(&f->lock); |
| 85 | hlist_del(&fq->list); |
| 86 | list_del(&fq->lru_list); |
| 87 | f->nqueues--; |
| 88 | write_unlock(&f->lock); |
| 89 | } |
| 90 | |
| 91 | void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) |
| 92 | { |
| 93 | if (del_timer(&fq->timer)) |
| 94 | atomic_dec(&fq->refcnt); |
| 95 | |
| 96 | if (!(fq->last_in & COMPLETE)) { |
| 97 | fq_unlink(fq, f); |
| 98 | atomic_dec(&fq->refcnt); |
| 99 | fq->last_in |= COMPLETE; |
| 100 | } |
| 101 | } |
| 102 | |
| 103 | EXPORT_SYMBOL(inet_frag_kill); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 104 | |
| 105 | static inline void frag_kfree_skb(struct inet_frags *f, struct sk_buff *skb, |
| 106 | int *work) |
| 107 | { |
| 108 | if (work) |
| 109 | *work -= skb->truesize; |
| 110 | |
| 111 | atomic_sub(skb->truesize, &f->mem); |
| 112 | if (f->skb_free) |
| 113 | f->skb_free(skb); |
| 114 | kfree_skb(skb); |
| 115 | } |
| 116 | |
| 117 | void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, |
| 118 | int *work) |
| 119 | { |
| 120 | struct sk_buff *fp; |
| 121 | |
| 122 | BUG_TRAP(q->last_in & COMPLETE); |
| 123 | BUG_TRAP(del_timer(&q->timer) == 0); |
| 124 | |
| 125 | /* Release all fragment data. */ |
| 126 | fp = q->fragments; |
| 127 | while (fp) { |
| 128 | struct sk_buff *xp = fp->next; |
| 129 | |
| 130 | frag_kfree_skb(f, fp, work); |
| 131 | fp = xp; |
| 132 | } |
| 133 | |
| 134 | if (work) |
| 135 | *work -= f->qsize; |
| 136 | atomic_sub(f->qsize, &f->mem); |
| 137 | |
Pavel Emelyanov | c954770 | 2007-10-17 19:48:26 -0700 | [diff] [blame] | 138 | if (f->destructor) |
| 139 | f->destructor(q); |
| 140 | kfree(q); |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 141 | |
| 142 | } |
| 143 | EXPORT_SYMBOL(inet_frag_destroy); |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 144 | |
| 145 | int inet_frag_evictor(struct inet_frags *f) |
| 146 | { |
| 147 | struct inet_frag_queue *q; |
| 148 | int work, evicted = 0; |
| 149 | |
| 150 | work = atomic_read(&f->mem) - f->ctl->low_thresh; |
| 151 | while (work > 0) { |
| 152 | read_lock(&f->lock); |
| 153 | if (list_empty(&f->lru_list)) { |
| 154 | read_unlock(&f->lock); |
| 155 | break; |
| 156 | } |
| 157 | |
| 158 | q = list_first_entry(&f->lru_list, |
| 159 | struct inet_frag_queue, lru_list); |
| 160 | atomic_inc(&q->refcnt); |
| 161 | read_unlock(&f->lock); |
| 162 | |
| 163 | spin_lock(&q->lock); |
| 164 | if (!(q->last_in & COMPLETE)) |
| 165 | inet_frag_kill(q, f); |
| 166 | spin_unlock(&q->lock); |
| 167 | |
| 168 | if (atomic_dec_and_test(&q->refcnt)) |
| 169 | inet_frag_destroy(q, f, &work); |
| 170 | evicted++; |
| 171 | } |
| 172 | |
| 173 | return evicted; |
| 174 | } |
| 175 | EXPORT_SYMBOL(inet_frag_evictor); |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 176 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 177 | static struct inet_frag_queue *inet_frag_intern(struct inet_frag_queue *qp_in, |
Pavel Emelyanov | 48d6005 | 2007-10-17 19:47:56 -0700 | [diff] [blame] | 178 | struct inet_frags *f, unsigned int hash, void *arg) |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 179 | { |
| 180 | struct inet_frag_queue *qp; |
| 181 | #ifdef CONFIG_SMP |
| 182 | struct hlist_node *n; |
| 183 | #endif |
| 184 | |
| 185 | write_lock(&f->lock); |
| 186 | #ifdef CONFIG_SMP |
| 187 | /* With SMP race we have to recheck hash table, because |
| 188 | * such entry could be created on other cpu, while we |
| 189 | * promoted read lock to write lock. |
| 190 | */ |
| 191 | hlist_for_each_entry(qp, n, &f->hash[hash], list) { |
Pavel Emelyanov | 48d6005 | 2007-10-17 19:47:56 -0700 | [diff] [blame] | 192 | if (f->match(qp, arg)) { |
Pavel Emelyanov | 2588fe1 | 2007-10-17 19:44:34 -0700 | [diff] [blame] | 193 | atomic_inc(&qp->refcnt); |
| 194 | write_unlock(&f->lock); |
| 195 | qp_in->last_in |= COMPLETE; |
| 196 | inet_frag_put(qp_in, f); |
| 197 | return qp; |
| 198 | } |
| 199 | } |
| 200 | #endif |
| 201 | qp = qp_in; |
| 202 | if (!mod_timer(&qp->timer, jiffies + f->ctl->timeout)) |
| 203 | atomic_inc(&qp->refcnt); |
| 204 | |
| 205 | atomic_inc(&qp->refcnt); |
| 206 | hlist_add_head(&qp->list, &f->hash[hash]); |
| 207 | list_add_tail(&qp->lru_list, &f->lru_list); |
| 208 | f->nqueues++; |
| 209 | write_unlock(&f->lock); |
| 210 | return qp; |
| 211 | } |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 212 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 213 | static struct inet_frag_queue *inet_frag_alloc(struct inet_frags *f, void *arg) |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 214 | { |
| 215 | struct inet_frag_queue *q; |
| 216 | |
| 217 | q = kzalloc(f->qsize, GFP_ATOMIC); |
| 218 | if (q == NULL) |
| 219 | return NULL; |
| 220 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 221 | f->constructor(q, arg); |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 222 | atomic_add(f->qsize, &f->mem); |
| 223 | setup_timer(&q->timer, f->frag_expire, (unsigned long)q); |
| 224 | spin_lock_init(&q->lock); |
| 225 | atomic_set(&q->refcnt, 1); |
| 226 | |
| 227 | return q; |
| 228 | } |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 229 | |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 230 | static struct inet_frag_queue *inet_frag_create(struct inet_frags *f, |
| 231 | void *arg, unsigned int hash) |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 232 | { |
| 233 | struct inet_frag_queue *q; |
| 234 | |
| 235 | q = inet_frag_alloc(f, arg); |
| 236 | if (q == NULL) |
| 237 | return NULL; |
| 238 | |
Pavel Emelyanov | 48d6005 | 2007-10-17 19:47:56 -0700 | [diff] [blame] | 239 | return inet_frag_intern(q, f, hash, arg); |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 240 | } |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 241 | |
| 242 | struct inet_frag_queue *inet_frag_find(struct inet_frags *f, void *key, |
| 243 | unsigned int hash) |
| 244 | { |
| 245 | struct inet_frag_queue *q; |
| 246 | struct hlist_node *n; |
| 247 | |
| 248 | read_lock(&f->lock); |
| 249 | hlist_for_each_entry(q, n, &f->hash[hash], list) { |
| 250 | if (f->match(q, key)) { |
| 251 | atomic_inc(&q->refcnt); |
| 252 | read_unlock(&f->lock); |
| 253 | return q; |
| 254 | } |
| 255 | } |
| 256 | read_unlock(&f->lock); |
| 257 | |
| 258 | return inet_frag_create(f, key, hash); |
| 259 | } |
| 260 | EXPORT_SYMBOL(inet_frag_find); |