blob: 650f8a3e3ad5d7099e90f81212a89200b81984d7 [file] [log] [blame]
Pavel Emelyanov2787b042012-08-13 05:49:39 +00001#ifndef __PACKET_INTERNAL_H__
2#define __PACKET_INTERNAL_H__
3
Reshetova, Elena35b2e8c2017-06-30 13:08:10 +03004#include <linux/refcount.h>
5
Pavel Emelyanov2787b042012-08-13 05:49:39 +00006struct packet_mclist {
7 struct packet_mclist *next;
8 int ifindex;
9 int count;
10 unsigned short type;
11 unsigned short alen;
12 unsigned char addr[MAX_ADDR_LEN];
13};
14
15/* kbdq - kernel block descriptor queue */
16struct tpacket_kbdq_core {
17 struct pgv *pkbdq;
18 unsigned int feature_req_word;
19 unsigned int hdrlen;
20 unsigned char reset_pending_on_curr_blk;
21 unsigned char delete_blk_timer;
22 unsigned short kactive_blk_num;
23 unsigned short blk_sizeof_priv;
24
25 /* last_kactive_blk_num:
26 * trick to see if user-space has caught up
27 * in order to avoid refreshing timer when every single pkt arrives.
28 */
29 unsigned short last_kactive_blk_num;
30
31 char *pkblk_start;
32 char *pkblk_end;
33 int kblk_size;
Eric Dumazetdc808112014-08-15 09:16:04 -070034 unsigned int max_frame_len;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000035 unsigned int knum_blocks;
36 uint64_t knxt_seq_num;
37 char *prev;
38 char *nxt_offset;
39 struct sk_buff *skb;
40
41 atomic_t blk_fill_in_prog;
42
43 /* Default is set to 8ms */
44#define DEFAULT_PRB_RETIRE_TOV (8)
45
46 unsigned short retire_blk_tov;
47 unsigned short version;
48 unsigned long tov_in_jiffies;
49
50 /* timer to retire an outstanding block */
51 struct timer_list retire_blk_timer;
52};
53
54struct pgv {
55 char *buffer;
56};
57
58struct packet_ring_buffer {
59 struct pgv *pg_vec;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000060
Pavel Emelyanov2787b042012-08-13 05:49:39 +000061 unsigned int head;
62 unsigned int frames_per_block;
63 unsigned int frame_size;
64 unsigned int frame_max;
65
66 unsigned int pg_vec_order;
67 unsigned int pg_vec_pages;
68 unsigned int pg_vec_len;
69
Daniel Borkmannb0138402014-01-15 16:25:36 +010070 unsigned int __percpu *pending_refcnt;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000071
72 struct tpacket_kbdq_core prb_bdqc;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000073};
74
Pavel Emelyanovfff33212012-08-16 05:36:48 +000075extern struct mutex fanout_mutex;
76#define PACKET_FANOUT_MAX 256
77
78struct packet_fanout {
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -050079 possible_net_t net;
Pavel Emelyanovfff33212012-08-16 05:36:48 +000080 unsigned int num_members;
81 u16 id;
82 u8 type;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +000083 u8 flags;
Willem de Bruijn47dceb82015-08-14 22:31:34 -040084 union {
85 atomic_t rr_cur;
86 struct bpf_prog __rcu *bpf_prog;
87 };
Pavel Emelyanovfff33212012-08-16 05:36:48 +000088 struct list_head list;
89 struct sock *arr[PACKET_FANOUT_MAX];
90 spinlock_t lock;
Reshetova, Elena35b2e8c2017-06-30 13:08:10 +030091 refcount_t sk_ref;
Pavel Emelyanovfff33212012-08-16 05:36:48 +000092 struct packet_type prot_hook ____cacheline_aligned_in_smp;
93};
94
Willem de Bruijn0648ab72015-05-12 11:56:46 -040095struct packet_rollover {
96 int sock;
Willem de Bruijna9b63912015-05-12 11:56:50 -040097 atomic_long_t num;
98 atomic_long_t num_huge;
99 atomic_long_t num_failed;
Willem de Bruijn3b3a5b02015-05-12 11:56:49 -0400100#define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
101 u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
Willem de Bruijn0648ab72015-05-12 11:56:46 -0400102} ____cacheline_aligned_in_smp;
103
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000104struct packet_sock {
105 /* struct sock has to be the first member of packet_sock */
106 struct sock sk;
107 struct packet_fanout *fanout;
Daniel Borkmannee80fbf2013-04-19 06:12:29 +0000108 union tpacket_stats_u stats;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000109 struct packet_ring_buffer rx_ring;
110 struct packet_ring_buffer tx_ring;
111 int copy_thresh;
112 spinlock_t bind_lock;
113 struct mutex pg_vec_lock;
Willem de Bruijnb2a52072018-04-23 17:37:03 -0400114 unsigned int running; /* bind_lock must be held */
115 unsigned int auxdata:1, /* writer must hold sock lock */
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000116 origdev:1,
Willem de Bruijnb2a52072018-04-23 17:37:03 -0400117 has_vnet_hdr:1,
118 tp_loss:1,
119 tp_tx_has_off:1;
Willem de Bruijn2ccdbaa2015-05-12 11:56:48 -0400120 int pressure;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000121 int ifindex; /* bound device */
122 __be16 num;
Willem de Bruijn0648ab72015-05-12 11:56:46 -0400123 struct packet_rollover *rollover;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000124 struct packet_mclist *mclist;
125 atomic_t mapped;
126 enum tpacket_versions tp_version;
127 unsigned int tp_hdrlen;
128 unsigned int tp_reserve;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000129 unsigned int tp_tstamp;
Neil Horman0c76cea2019-06-25 17:57:49 -0400130 struct completion skb_completion;
Daniel Borkmanne40526c2013-11-21 16:50:58 +0100131 struct net_device __rcu *cached_dev;
Daniel Borkmannd346a3f2013-12-06 11:36:17 +0100132 int (*xmit)(struct sk_buff *skb);
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000133 struct packet_type prot_hook ____cacheline_aligned_in_smp;
134};
135
136static struct packet_sock *pkt_sk(struct sock *sk)
137{
138 return (struct packet_sock *)sk;
139}
140
141#endif