blob: 8e4c42523f59a1aaf30edfc69274f33a6737e74f [file] [log] [blame]
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -07001#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__
3
Pavel Emelyanovac18e752008-01-22 06:02:14 -08004struct netns_frags {
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -08005 int nqueues;
Pavel Emelyanov3140c252008-01-22 06:11:48 -08006 struct list_head lru_list;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -08007
Jesper Dangaard Brouercd39a782013-01-28 23:44:14 +00008 /* Its important for performance to keep lru_list and mem on
9 * separate cachelines
10 */
11 atomic_t mem ____cacheline_aligned_in_smp;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -080012 /* sysctls */
13 int timeout;
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -080014 int high_thresh;
15 int low_thresh;
Pavel Emelyanovac18e752008-01-22 06:02:14 -080016};
17
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070018struct inet_frag_queue {
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070019 spinlock_t lock;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070020 struct timer_list timer; /* when will this queue expire? */
Jesper Dangaard Brouer6e34a8b2013-01-28 23:44:49 +000021 struct list_head lru_list; /* lru list member */
22 struct hlist_node list;
23 atomic_t refcnt;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070024 struct sk_buff *fragments; /* list of received fragments */
Changli Gaod6bebca2010-06-29 04:39:37 +000025 struct sk_buff *fragments_tail;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070026 ktime_t stamp;
27 int len; /* total length of orig datagram */
28 int meat;
29 __u8 last_in; /* first/last segment arrived? */
30
Joe Perchesbc578a52008-03-28 16:35:27 -070031#define INET_FRAG_COMPLETE 4
32#define INET_FRAG_FIRST_IN 2
33#define INET_FRAG_LAST_IN 1
Patrick McHardy5f2d04f2012-08-26 19:13:55 +020034
35 u16 max_size;
Jesper Dangaard Brouer6e34a8b2013-01-28 23:44:49 +000036
37 struct netns_frags *net;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070038};
39
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070040#define INETFRAGS_HASHSZ 64
41
42struct inet_frags {
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070043 struct hlist_head hash[INETFRAGS_HASHSZ];
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000044 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
45 * netfilter). Important to keep this on a seperate cacheline.
46 */
47 rwlock_t lock ____cacheline_aligned_in_smp;
Pavel Emelyanov3b4bc4a2008-01-22 06:11:04 -080048 int secret_interval;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070049 struct timer_list secret_timer;
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000050 u32 rnd;
51 int qsize;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070052
53 unsigned int (*hashfn)(struct inet_frag_queue *);
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000054 bool (*match)(struct inet_frag_queue *q, void *arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070055 void (*constructor)(struct inet_frag_queue *q,
56 void *arg);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070057 void (*destructor)(struct inet_frag_queue *);
58 void (*skb_free)(struct sk_buff *);
Pavel Emelyanove521db92007-10-17 19:45:23 -070059 void (*frag_expire)(unsigned long data);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070060};
61
62void inet_frags_init(struct inet_frags *);
63void inet_frags_fini(struct inet_frags *);
64
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -080065void inet_frags_init_net(struct netns_frags *nf);
Pavel Emelyanov81566e82008-01-22 06:12:39 -080066void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -080067
Pavel Emelyanov277e6502007-10-15 02:37:18 -070068void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070069void inet_frag_destroy(struct inet_frag_queue *q,
70 struct inet_frags *f, int *work);
Amerigo Wang6b102862012-09-18 16:50:11 +000071int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
Pavel Emelyanovac18e752008-01-22 06:02:14 -080072struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
Hannes Eder56bca312009-02-25 10:32:52 +000073 struct inet_frags *f, void *key, unsigned int hash)
74 __releases(&f->lock);
Pavel Emelyanov277e6502007-10-15 02:37:18 -070075
Pavel Emelyanov762cc402007-10-15 02:41:56 -070076static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
77{
78 if (atomic_dec_and_test(&q->refcnt))
79 inet_frag_destroy(q, f, NULL);
80}
81
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070082#endif