blob: c035d263c1e8d119267633971920106b3bf627f3 [file] [log] [blame]
Pavel Emelyanov2787b042012-08-13 05:49:39 +00001#ifndef __PACKET_INTERNAL_H__
2#define __PACKET_INTERNAL_H__
3
4struct packet_mclist {
5 struct packet_mclist *next;
6 int ifindex;
7 int count;
8 unsigned short type;
9 unsigned short alen;
10 unsigned char addr[MAX_ADDR_LEN];
11};
12
13/* kbdq - kernel block descriptor queue */
14struct tpacket_kbdq_core {
15 struct pgv *pkbdq;
16 unsigned int feature_req_word;
17 unsigned int hdrlen;
18 unsigned char reset_pending_on_curr_blk;
19 unsigned char delete_blk_timer;
20 unsigned short kactive_blk_num;
21 unsigned short blk_sizeof_priv;
22
23 /* last_kactive_blk_num:
24 * trick to see if user-space has caught up
25 * in order to avoid refreshing timer when every single pkt arrives.
26 */
27 unsigned short last_kactive_blk_num;
28
29 char *pkblk_start;
30 char *pkblk_end;
31 int kblk_size;
Eric Dumazetdc808112014-08-15 09:16:04 -070032 unsigned int max_frame_len;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000033 unsigned int knum_blocks;
34 uint64_t knxt_seq_num;
35 char *prev;
36 char *nxt_offset;
37 struct sk_buff *skb;
38
39 atomic_t blk_fill_in_prog;
40
41 /* Default is set to 8ms */
42#define DEFAULT_PRB_RETIRE_TOV (8)
43
44 unsigned short retire_blk_tov;
45 unsigned short version;
46 unsigned long tov_in_jiffies;
47
48 /* timer to retire an outstanding block */
49 struct timer_list retire_blk_timer;
50};
51
52struct pgv {
53 char *buffer;
54};
55
56struct packet_ring_buffer {
57 struct pgv *pg_vec;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000058
Pavel Emelyanov2787b042012-08-13 05:49:39 +000059 unsigned int head;
60 unsigned int frames_per_block;
61 unsigned int frame_size;
62 unsigned int frame_max;
63
64 unsigned int pg_vec_order;
65 unsigned int pg_vec_pages;
66 unsigned int pg_vec_len;
67
Daniel Borkmannb0138402014-01-15 16:25:36 +010068 unsigned int __percpu *pending_refcnt;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000069
70 struct tpacket_kbdq_core prb_bdqc;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000071};
72
Pavel Emelyanovfff33212012-08-16 05:36:48 +000073extern struct mutex fanout_mutex;
74#define PACKET_FANOUT_MAX 256
75
76struct packet_fanout {
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -050077 possible_net_t net;
Pavel Emelyanovfff33212012-08-16 05:36:48 +000078 unsigned int num_members;
79 u16 id;
80 u8 type;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +000081 u8 flags;
Pavel Emelyanovfff33212012-08-16 05:36:48 +000082 atomic_t rr_cur;
83 struct list_head list;
84 struct sock *arr[PACKET_FANOUT_MAX];
85 spinlock_t lock;
86 atomic_t sk_ref;
87 struct packet_type prot_hook ____cacheline_aligned_in_smp;
88};
89
Willem de Bruijn0648ab72015-05-12 11:56:46 -040090struct packet_rollover {
91 int sock;
Willem de Bruijna9b63912015-05-12 11:56:50 -040092 atomic_long_t num;
93 atomic_long_t num_huge;
94 atomic_long_t num_failed;
Willem de Bruijn3b3a5b02015-05-12 11:56:49 -040095#define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
96 u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
Willem de Bruijn0648ab72015-05-12 11:56:46 -040097} ____cacheline_aligned_in_smp;
98
Pavel Emelyanov2787b042012-08-13 05:49:39 +000099struct packet_sock {
100 /* struct sock has to be the first member of packet_sock */
101 struct sock sk;
102 struct packet_fanout *fanout;
Daniel Borkmannee80fbf2013-04-19 06:12:29 +0000103 union tpacket_stats_u stats;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000104 struct packet_ring_buffer rx_ring;
105 struct packet_ring_buffer tx_ring;
106 int copy_thresh;
107 spinlock_t bind_lock;
108 struct mutex pg_vec_lock;
109 unsigned int running:1, /* prot_hook is attached*/
110 auxdata:1,
111 origdev:1,
112 has_vnet_hdr:1;
Willem de Bruijn2ccdbaa2015-05-12 11:56:48 -0400113 int pressure;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000114 int ifindex; /* bound device */
115 __be16 num;
Willem de Bruijn0648ab72015-05-12 11:56:46 -0400116 struct packet_rollover *rollover;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000117 struct packet_mclist *mclist;
118 atomic_t mapped;
119 enum tpacket_versions tp_version;
120 unsigned int tp_hdrlen;
121 unsigned int tp_reserve;
122 unsigned int tp_loss:1;
Paul Chavent5920cd3a2012-11-06 23:10:47 +0000123 unsigned int tp_tx_has_off:1;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000124 unsigned int tp_tstamp;
Daniel Borkmanne40526c2013-11-21 16:50:58 +0100125 struct net_device __rcu *cached_dev;
Daniel Borkmannd346a3f2013-12-06 11:36:17 +0100126 int (*xmit)(struct sk_buff *skb);
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000127 struct packet_type prot_hook ____cacheline_aligned_in_smp;
128};
129
130static struct packet_sock *pkt_sk(struct sock *sk)
131{
132 return (struct packet_sock *)sk;
133}
134
135#endif