blob: 54c1de781c681a63309bb96c6f27afba5308f950 [file] [log] [blame]
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -07001#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__
3
Pavel Emelyanovac18e752008-01-22 06:02:14 -08004struct netns_frags {
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -08005 int nqueues;
Pavel Emelyanov3140c252008-01-22 06:11:48 -08006 struct list_head lru_list;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -08007
Jesper Dangaard Brouercd39a782013-01-28 23:44:14 +00008 /* Its important for performance to keep lru_list and mem on
9 * separate cachelines
10 */
11 atomic_t mem ____cacheline_aligned_in_smp;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -080012 /* sysctls */
13 int timeout;
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -080014 int high_thresh;
15 int low_thresh;
Pavel Emelyanovac18e752008-01-22 06:02:14 -080016};
17
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070018struct inet_frag_queue {
19 struct hlist_node list;
Pavel Emelyanovac18e752008-01-22 06:02:14 -080020 struct netns_frags *net;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070021 struct list_head lru_list; /* lru list member */
22 spinlock_t lock;
23 atomic_t refcnt;
24 struct timer_list timer; /* when will this queue expire? */
25 struct sk_buff *fragments; /* list of received fragments */
Changli Gaod6bebca2010-06-29 04:39:37 +000026 struct sk_buff *fragments_tail;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070027 ktime_t stamp;
28 int len; /* total length of orig datagram */
29 int meat;
30 __u8 last_in; /* first/last segment arrived? */
31
Joe Perchesbc578a52008-03-28 16:35:27 -070032#define INET_FRAG_COMPLETE 4
33#define INET_FRAG_FIRST_IN 2
34#define INET_FRAG_LAST_IN 1
Patrick McHardy5f2d04f2012-08-26 19:13:55 +020035
36 u16 max_size;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070037};
38
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070039#define INETFRAGS_HASHSZ 64
40
41struct inet_frags {
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070042 struct hlist_head hash[INETFRAGS_HASHSZ];
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000043 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
44 * netfilter). Important to keep this on a seperate cacheline.
45 */
46 rwlock_t lock ____cacheline_aligned_in_smp;
Pavel Emelyanov3b4bc4a2008-01-22 06:11:04 -080047 int secret_interval;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070048 struct timer_list secret_timer;
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000049 u32 rnd;
50 int qsize;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070051
52 unsigned int (*hashfn)(struct inet_frag_queue *);
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000053 bool (*match)(struct inet_frag_queue *q, void *arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070054 void (*constructor)(struct inet_frag_queue *q,
55 void *arg);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070056 void (*destructor)(struct inet_frag_queue *);
57 void (*skb_free)(struct sk_buff *);
Pavel Emelyanove521db92007-10-17 19:45:23 -070058 void (*frag_expire)(unsigned long data);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070059};
60
61void inet_frags_init(struct inet_frags *);
62void inet_frags_fini(struct inet_frags *);
63
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -080064void inet_frags_init_net(struct netns_frags *nf);
Pavel Emelyanov81566e82008-01-22 06:12:39 -080065void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -080066
Pavel Emelyanov277e6502007-10-15 02:37:18 -070067void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070068void inet_frag_destroy(struct inet_frag_queue *q,
69 struct inet_frags *f, int *work);
Amerigo Wang6b102862012-09-18 16:50:11 +000070int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
Pavel Emelyanovac18e752008-01-22 06:02:14 -080071struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
Hannes Eder56bca312009-02-25 10:32:52 +000072 struct inet_frags *f, void *key, unsigned int hash)
73 __releases(&f->lock);
Pavel Emelyanov277e6502007-10-15 02:37:18 -070074
Pavel Emelyanov762cc402007-10-15 02:41:56 -070075static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
76{
77 if (atomic_dec_and_test(&q->refcnt))
78 inet_frag_destroy(q, f, NULL);
79}
80
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070081#endif