blob: 3f237db0a426ce1606587da6060ce1da5f306b60 [file] [log] [blame]
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -07001#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__
3
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +00004#include <linux/percpu_counter.h>
5
Pavel Emelyanovac18e752008-01-22 06:02:14 -08006struct netns_frags {
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -08007 int nqueues;
Pavel Emelyanov3140c252008-01-22 06:11:48 -08008 struct list_head lru_list;
Jesper Dangaard Brouer3ef0eb02013-01-28 23:45:51 +00009 spinlock_t lru_lock;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -080010
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +000011 /* The percpu_counter "mem" need to be cacheline aligned.
12 * mem.count must not share cacheline with other writers
Jesper Dangaard Brouercd39a782013-01-28 23:44:14 +000013 */
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +000014 struct percpu_counter mem ____cacheline_aligned_in_smp;
15
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -080016 /* sysctls */
17 int timeout;
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -080018 int high_thresh;
19 int low_thresh;
Pavel Emelyanovac18e752008-01-22 06:02:14 -080020};
21
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070022struct inet_frag_queue {
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070023 spinlock_t lock;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070024 struct timer_list timer; /* when will this queue expire? */
Jesper Dangaard Brouer6e34a8b2013-01-28 23:44:49 +000025 struct list_head lru_list; /* lru list member */
26 struct hlist_node list;
27 atomic_t refcnt;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070028 struct sk_buff *fragments; /* list of received fragments */
Changli Gaod6bebca2010-06-29 04:39:37 +000029 struct sk_buff *fragments_tail;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070030 ktime_t stamp;
31 int len; /* total length of orig datagram */
32 int meat;
33 __u8 last_in; /* first/last segment arrived? */
34
Joe Perchesbc578a52008-03-28 16:35:27 -070035#define INET_FRAG_COMPLETE 4
36#define INET_FRAG_FIRST_IN 2
37#define INET_FRAG_LAST_IN 1
Patrick McHardy5f2d04f2012-08-26 19:13:55 +020038
39 u16 max_size;
Jesper Dangaard Brouer6e34a8b2013-01-28 23:44:49 +000040
41 struct netns_frags *net;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070042};
43
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070044#define INETFRAGS_HASHSZ 64
45
46struct inet_frags {
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070047 struct hlist_head hash[INETFRAGS_HASHSZ];
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000048 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
49 * netfilter). Important to keep this on a seperate cacheline.
50 */
51 rwlock_t lock ____cacheline_aligned_in_smp;
Pavel Emelyanov3b4bc4a2008-01-22 06:11:04 -080052 int secret_interval;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070053 struct timer_list secret_timer;
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000054 u32 rnd;
55 int qsize;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070056
57 unsigned int (*hashfn)(struct inet_frag_queue *);
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000058 bool (*match)(struct inet_frag_queue *q, void *arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070059 void (*constructor)(struct inet_frag_queue *q,
60 void *arg);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070061 void (*destructor)(struct inet_frag_queue *);
62 void (*skb_free)(struct sk_buff *);
Pavel Emelyanove521db92007-10-17 19:45:23 -070063 void (*frag_expire)(unsigned long data);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070064};
65
66void inet_frags_init(struct inet_frags *);
67void inet_frags_fini(struct inet_frags *);
68
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -080069void inet_frags_init_net(struct netns_frags *nf);
Pavel Emelyanov81566e82008-01-22 06:12:39 -080070void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -080071
Pavel Emelyanov277e6502007-10-15 02:37:18 -070072void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070073void inet_frag_destroy(struct inet_frag_queue *q,
74 struct inet_frags *f, int *work);
Amerigo Wang6b102862012-09-18 16:50:11 +000075int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
Pavel Emelyanovac18e752008-01-22 06:02:14 -080076struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
Hannes Eder56bca312009-02-25 10:32:52 +000077 struct inet_frags *f, void *key, unsigned int hash)
78 __releases(&f->lock);
Pavel Emelyanov277e6502007-10-15 02:37:18 -070079
Pavel Emelyanov762cc402007-10-15 02:41:56 -070080static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
81{
82 if (atomic_dec_and_test(&q->refcnt))
83 inet_frag_destroy(q, f, NULL);
84}
85
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +000086/* Memory Tracking Functions. */
87
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +000088/* The default percpu_counter batch size is not big enough to scale to
89 * fragmentation mem acct sizes.
90 * The mem size of a 64K fragment is approx:
91 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
92 */
93static unsigned int frag_percpu_counter_batch = 130000;
94
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +000095static inline int frag_mem_limit(struct netns_frags *nf)
96{
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +000097 return percpu_counter_read(&nf->mem);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +000098}
99
100static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
101{
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +0000102 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000103}
104
105static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
106{
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +0000107 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000108}
109
110static inline void init_frag_mem_limit(struct netns_frags *nf)
111{
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +0000112 percpu_counter_init(&nf->mem, 0);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000113}
114
115static inline int sum_frag_mem_limit(struct netns_frags *nf)
116{
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +0000117 return percpu_counter_sum_positive(&nf->mem);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000118}
119
Jesper Dangaard Brouer3ef0eb02013-01-28 23:45:51 +0000120static inline void inet_frag_lru_move(struct inet_frag_queue *q)
121{
122 spin_lock(&q->net->lru_lock);
123 list_move_tail(&q->lru_list, &q->net->lru_list);
124 spin_unlock(&q->net->lru_lock);
125}
126
127static inline void inet_frag_lru_del(struct inet_frag_queue *q)
128{
129 spin_lock(&q->net->lru_lock);
130 list_del(&q->lru_list);
131 spin_unlock(&q->net->lru_lock);
132}
133
134static inline void inet_frag_lru_add(struct netns_frags *nf,
135 struct inet_frag_queue *q)
136{
137 spin_lock(&nf->lru_lock);
138 list_add_tail(&q->lru_list, &nf->lru_list);
139 spin_unlock(&nf->lru_lock);
140}
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700141#endif