blob: 9ee46314b7d76df47d683c252a92ce97398d592b [file] [log] [blame]
Pavel Emelyanov2787b042012-08-13 05:49:39 +00001#ifndef __PACKET_INTERNAL_H__
2#define __PACKET_INTERNAL_H__
3
4struct packet_mclist {
5 struct packet_mclist *next;
6 int ifindex;
7 int count;
8 unsigned short type;
9 unsigned short alen;
10 unsigned char addr[MAX_ADDR_LEN];
11};
12
13/* kbdq - kernel block descriptor queue */
14struct tpacket_kbdq_core {
15 struct pgv *pkbdq;
16 unsigned int feature_req_word;
17 unsigned int hdrlen;
18 unsigned char reset_pending_on_curr_blk;
19 unsigned char delete_blk_timer;
20 unsigned short kactive_blk_num;
21 unsigned short blk_sizeof_priv;
22
23 /* last_kactive_blk_num:
24 * trick to see if user-space has caught up
25 * in order to avoid refreshing timer when every single pkt arrives.
26 */
27 unsigned short last_kactive_blk_num;
28
29 char *pkblk_start;
30 char *pkblk_end;
31 int kblk_size;
Eric Dumazetdc808112014-08-15 09:16:04 -070032 unsigned int max_frame_len;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000033 unsigned int knum_blocks;
34 uint64_t knxt_seq_num;
35 char *prev;
36 char *nxt_offset;
37 struct sk_buff *skb;
38
39 atomic_t blk_fill_in_prog;
40
41 /* Default is set to 8ms */
42#define DEFAULT_PRB_RETIRE_TOV (8)
43
44 unsigned short retire_blk_tov;
45 unsigned short version;
46 unsigned long tov_in_jiffies;
47
48 /* timer to retire an outstanding block */
49 struct timer_list retire_blk_timer;
50};
51
52struct pgv {
53 char *buffer;
54};
55
56struct packet_ring_buffer {
57 struct pgv *pg_vec;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000058
Pavel Emelyanov2787b042012-08-13 05:49:39 +000059 unsigned int head;
60 unsigned int frames_per_block;
61 unsigned int frame_size;
62 unsigned int frame_max;
63
64 unsigned int pg_vec_order;
65 unsigned int pg_vec_pages;
66 unsigned int pg_vec_len;
67
Daniel Borkmannb0138402014-01-15 16:25:36 +010068 unsigned int __percpu *pending_refcnt;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000069
70 struct tpacket_kbdq_core prb_bdqc;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000071};
72
Pavel Emelyanovfff33212012-08-16 05:36:48 +000073extern struct mutex fanout_mutex;
74#define PACKET_FANOUT_MAX 256
75
76struct packet_fanout {
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -050077 possible_net_t net;
Pavel Emelyanovfff33212012-08-16 05:36:48 +000078 unsigned int num_members;
79 u16 id;
80 u8 type;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +000081 u8 flags;
Willem de Bruijn47dceb82015-08-14 22:31:34 -040082 union {
83 atomic_t rr_cur;
84 struct bpf_prog __rcu *bpf_prog;
85 };
Pavel Emelyanovfff33212012-08-16 05:36:48 +000086 struct list_head list;
87 struct sock *arr[PACKET_FANOUT_MAX];
88 spinlock_t lock;
89 atomic_t sk_ref;
90 struct packet_type prot_hook ____cacheline_aligned_in_smp;
91};
92
Willem de Bruijn0648ab72015-05-12 11:56:46 -040093struct packet_rollover {
94 int sock;
Willem de Bruijn59f21112015-06-16 12:51:37 -040095 struct rcu_head rcu;
Willem de Bruijna9b63912015-05-12 11:56:50 -040096 atomic_long_t num;
97 atomic_long_t num_huge;
98 atomic_long_t num_failed;
Willem de Bruijn3b3a5b02015-05-12 11:56:49 -040099#define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
100 u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
Willem de Bruijn0648ab72015-05-12 11:56:46 -0400101} ____cacheline_aligned_in_smp;
102
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000103struct packet_sock {
104 /* struct sock has to be the first member of packet_sock */
105 struct sock sk;
106 struct packet_fanout *fanout;
Daniel Borkmannee80fbf2013-04-19 06:12:29 +0000107 union tpacket_stats_u stats;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000108 struct packet_ring_buffer rx_ring;
109 struct packet_ring_buffer tx_ring;
110 int copy_thresh;
111 spinlock_t bind_lock;
112 struct mutex pg_vec_lock;
113 unsigned int running:1, /* prot_hook is attached*/
114 auxdata:1,
115 origdev:1,
116 has_vnet_hdr:1;
Willem de Bruijn2ccdbaa2015-05-12 11:56:48 -0400117 int pressure;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000118 int ifindex; /* bound device */
119 __be16 num;
Willem de Bruijn0648ab72015-05-12 11:56:46 -0400120 struct packet_rollover *rollover;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000121 struct packet_mclist *mclist;
122 atomic_t mapped;
123 enum tpacket_versions tp_version;
124 unsigned int tp_hdrlen;
125 unsigned int tp_reserve;
126 unsigned int tp_loss:1;
Paul Chavent5920cd3a2012-11-06 23:10:47 +0000127 unsigned int tp_tx_has_off:1;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000128 unsigned int tp_tstamp;
Daniel Borkmanne40526c2013-11-21 16:50:58 +0100129 struct net_device __rcu *cached_dev;
Daniel Borkmannd346a3f2013-12-06 11:36:17 +0100130 int (*xmit)(struct sk_buff *skb);
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000131 struct packet_type prot_hook ____cacheline_aligned_in_smp;
132};
133
134static struct packet_sock *pkt_sk(struct sock *sk)
135{
136 return (struct packet_sock *)sk;
137}
138
139#endif