blob: bbf8dd35df0db9eeba102e6800fd2e81785aabd7 [file] [log] [blame]
Pavel Emelyanov2787b042012-08-13 05:49:39 +00001#ifndef __PACKET_INTERNAL_H__
2#define __PACKET_INTERNAL_H__
3
4struct packet_mclist {
5 struct packet_mclist *next;
6 int ifindex;
7 int count;
8 unsigned short type;
9 unsigned short alen;
10 unsigned char addr[MAX_ADDR_LEN];
11};
12
13/* kbdq - kernel block descriptor queue */
14struct tpacket_kbdq_core {
15 struct pgv *pkbdq;
16 unsigned int feature_req_word;
17 unsigned int hdrlen;
18 unsigned char reset_pending_on_curr_blk;
19 unsigned char delete_blk_timer;
20 unsigned short kactive_blk_num;
21 unsigned short blk_sizeof_priv;
22
23 /* last_kactive_blk_num:
24 * trick to see if user-space has caught up
25 * in order to avoid refreshing timer when every single pkt arrives.
26 */
27 unsigned short last_kactive_blk_num;
28
29 char *pkblk_start;
30 char *pkblk_end;
31 int kblk_size;
Eric Dumazetdc808112014-08-15 09:16:04 -070032 unsigned int max_frame_len;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000033 unsigned int knum_blocks;
34 uint64_t knxt_seq_num;
35 char *prev;
36 char *nxt_offset;
37 struct sk_buff *skb;
38
39 atomic_t blk_fill_in_prog;
40
41 /* Default is set to 8ms */
42#define DEFAULT_PRB_RETIRE_TOV (8)
43
44 unsigned short retire_blk_tov;
45 unsigned short version;
46 unsigned long tov_in_jiffies;
47
48 /* timer to retire an outstanding block */
49 struct timer_list retire_blk_timer;
50};
51
52struct pgv {
53 char *buffer;
54};
55
56struct packet_ring_buffer {
57 struct pgv *pg_vec;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000058
Pavel Emelyanov2787b042012-08-13 05:49:39 +000059 unsigned int head;
60 unsigned int frames_per_block;
61 unsigned int frame_size;
62 unsigned int frame_max;
63
64 unsigned int pg_vec_order;
65 unsigned int pg_vec_pages;
66 unsigned int pg_vec_len;
67
Daniel Borkmannb0138402014-01-15 16:25:36 +010068 unsigned int __percpu *pending_refcnt;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000069
70 struct tpacket_kbdq_core prb_bdqc;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000071};
72
Pavel Emelyanovfff33212012-08-16 05:36:48 +000073extern struct mutex fanout_mutex;
74#define PACKET_FANOUT_MAX 256
75
76struct packet_fanout {
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -050077 possible_net_t net;
Pavel Emelyanovfff33212012-08-16 05:36:48 +000078 unsigned int num_members;
79 u16 id;
80 u8 type;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +000081 u8 flags;
Willem de Bruijn47dceb82015-08-14 22:31:34 -040082 union {
83 atomic_t rr_cur;
84 struct bpf_prog __rcu *bpf_prog;
85 };
Pavel Emelyanovfff33212012-08-16 05:36:48 +000086 struct list_head list;
87 struct sock *arr[PACKET_FANOUT_MAX];
88 spinlock_t lock;
89 atomic_t sk_ref;
90 struct packet_type prot_hook ____cacheline_aligned_in_smp;
91};
92
Willem de Bruijn0648ab72015-05-12 11:56:46 -040093struct packet_rollover {
94 int sock;
Willem de Bruijna9b63912015-05-12 11:56:50 -040095 atomic_long_t num;
96 atomic_long_t num_huge;
97 atomic_long_t num_failed;
Willem de Bruijn3b3a5b02015-05-12 11:56:49 -040098#define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
99 u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
Willem de Bruijn0648ab72015-05-12 11:56:46 -0400100} ____cacheline_aligned_in_smp;
101
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000102struct packet_sock {
103 /* struct sock has to be the first member of packet_sock */
104 struct sock sk;
105 struct packet_fanout *fanout;
Daniel Borkmannee80fbf2013-04-19 06:12:29 +0000106 union tpacket_stats_u stats;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000107 struct packet_ring_buffer rx_ring;
108 struct packet_ring_buffer tx_ring;
109 int copy_thresh;
110 spinlock_t bind_lock;
111 struct mutex pg_vec_lock;
Willem de Bruijnb2a52072018-04-23 17:37:03 -0400112 unsigned int running; /* bind_lock must be held */
113 unsigned int auxdata:1, /* writer must hold sock lock */
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000114 origdev:1,
Willem de Bruijnb2a52072018-04-23 17:37:03 -0400115 has_vnet_hdr:1,
116 tp_loss:1,
117 tp_tx_has_off:1;
Willem de Bruijn2ccdbaa2015-05-12 11:56:48 -0400118 int pressure;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000119 int ifindex; /* bound device */
120 __be16 num;
Willem de Bruijn0648ab72015-05-12 11:56:46 -0400121 struct packet_rollover *rollover;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000122 struct packet_mclist *mclist;
123 atomic_t mapped;
124 enum tpacket_versions tp_version;
125 unsigned int tp_hdrlen;
126 unsigned int tp_reserve;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000127 unsigned int tp_tstamp;
Neil Horman0c76cea2019-06-25 17:57:49 -0400128 struct completion skb_completion;
Daniel Borkmanne40526c2013-11-21 16:50:58 +0100129 struct net_device __rcu *cached_dev;
Daniel Borkmannd346a3f2013-12-06 11:36:17 +0100130 int (*xmit)(struct sk_buff *skb);
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000131 struct packet_type prot_hook ____cacheline_aligned_in_smp;
132};
133
134static struct packet_sock *pkt_sk(struct sock *sk)
135{
136 return (struct packet_sock *)sk;
137}
138
139#endif