blob: e0eec7450f1570440f515ec62d9a75a916868687 [file] [log] [blame]
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -07001#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__
3
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +00004#include <linux/percpu_counter.h>
5
Pavel Emelyanovac18e752008-01-22 06:02:14 -08006struct netns_frags {
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -08007 int nqueues;
Pavel Emelyanov3140c252008-01-22 06:11:48 -08008 struct list_head lru_list;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -08009
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +000010 /* The percpu_counter "mem" need to be cacheline aligned.
11 * mem.count must not share cacheline with other writers
Jesper Dangaard Brouercd39a782013-01-28 23:44:14 +000012 */
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +000013 struct percpu_counter mem ____cacheline_aligned_in_smp;
14
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -080015 /* sysctls */
16 int timeout;
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -080017 int high_thresh;
18 int low_thresh;
Pavel Emelyanovac18e752008-01-22 06:02:14 -080019};
20
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070021struct inet_frag_queue {
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070022 spinlock_t lock;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070023 struct timer_list timer; /* when will this queue expire? */
Jesper Dangaard Brouer6e34a8b2013-01-28 23:44:49 +000024 struct list_head lru_list; /* lru list member */
25 struct hlist_node list;
26 atomic_t refcnt;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070027 struct sk_buff *fragments; /* list of received fragments */
Changli Gaod6bebca2010-06-29 04:39:37 +000028 struct sk_buff *fragments_tail;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070029 ktime_t stamp;
30 int len; /* total length of orig datagram */
31 int meat;
32 __u8 last_in; /* first/last segment arrived? */
33
Joe Perchesbc578a52008-03-28 16:35:27 -070034#define INET_FRAG_COMPLETE 4
35#define INET_FRAG_FIRST_IN 2
36#define INET_FRAG_LAST_IN 1
Patrick McHardy5f2d04f2012-08-26 19:13:55 +020037
38 u16 max_size;
Jesper Dangaard Brouer6e34a8b2013-01-28 23:44:49 +000039
40 struct netns_frags *net;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070041};
42
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070043#define INETFRAGS_HASHSZ 64
44
45struct inet_frags {
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070046 struct hlist_head hash[INETFRAGS_HASHSZ];
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000047 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
48 * netfilter). Important to keep this on a seperate cacheline.
49 */
50 rwlock_t lock ____cacheline_aligned_in_smp;
Pavel Emelyanov3b4bc4a2008-01-22 06:11:04 -080051 int secret_interval;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070052 struct timer_list secret_timer;
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000053 u32 rnd;
54 int qsize;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070055
56 unsigned int (*hashfn)(struct inet_frag_queue *);
Jesper Dangaard Brouer5f8e1e82013-01-28 23:44:37 +000057 bool (*match)(struct inet_frag_queue *q, void *arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070058 void (*constructor)(struct inet_frag_queue *q,
59 void *arg);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070060 void (*destructor)(struct inet_frag_queue *);
61 void (*skb_free)(struct sk_buff *);
Pavel Emelyanove521db92007-10-17 19:45:23 -070062 void (*frag_expire)(unsigned long data);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070063};
64
65void inet_frags_init(struct inet_frags *);
66void inet_frags_fini(struct inet_frags *);
67
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -080068void inet_frags_init_net(struct netns_frags *nf);
Pavel Emelyanov81566e82008-01-22 06:12:39 -080069void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -080070
Pavel Emelyanov277e6502007-10-15 02:37:18 -070071void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070072void inet_frag_destroy(struct inet_frag_queue *q,
73 struct inet_frags *f, int *work);
Amerigo Wang6b102862012-09-18 16:50:11 +000074int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
Pavel Emelyanovac18e752008-01-22 06:02:14 -080075struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
Hannes Eder56bca312009-02-25 10:32:52 +000076 struct inet_frags *f, void *key, unsigned int hash)
77 __releases(&f->lock);
Pavel Emelyanov277e6502007-10-15 02:37:18 -070078
Pavel Emelyanov762cc402007-10-15 02:41:56 -070079static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
80{
81 if (atomic_dec_and_test(&q->refcnt))
82 inet_frag_destroy(q, f, NULL);
83}
84
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +000085/* Memory Tracking Functions. */
86
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +000087/* The default percpu_counter batch size is not big enough to scale to
88 * fragmentation mem acct sizes.
89 * The mem size of a 64K fragment is approx:
90 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
91 */
92static unsigned int frag_percpu_counter_batch = 130000;
93
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +000094static inline int frag_mem_limit(struct netns_frags *nf)
95{
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +000096 return percpu_counter_read(&nf->mem);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +000097}
98
99static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
100{
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +0000101 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000102}
103
104static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
105{
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +0000106 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000107}
108
109static inline void init_frag_mem_limit(struct netns_frags *nf)
110{
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +0000111 percpu_counter_init(&nf->mem, 0);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000112}
113
114static inline int sum_frag_mem_limit(struct netns_frags *nf)
115{
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +0000116 return percpu_counter_sum_positive(&nf->mem);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000117}
118
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700119#endif