blob: f10294800aafb3673083acd87c21944e313d9a6a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Pavel Emelyanov2787b042012-08-13 05:49:39 +00002#ifndef __PACKET_INTERNAL_H__
3#define __PACKET_INTERNAL_H__
4
Reshetova, Elenafb5c2c12017-06-30 13:08:10 +03005#include <linux/refcount.h>
6
Pavel Emelyanov2787b042012-08-13 05:49:39 +00007struct packet_mclist {
8 struct packet_mclist *next;
9 int ifindex;
10 int count;
11 unsigned short type;
12 unsigned short alen;
13 unsigned char addr[MAX_ADDR_LEN];
14};
15
16/* kbdq - kernel block descriptor queue */
17struct tpacket_kbdq_core {
18 struct pgv *pkbdq;
19 unsigned int feature_req_word;
20 unsigned int hdrlen;
21 unsigned char reset_pending_on_curr_blk;
22 unsigned char delete_blk_timer;
23 unsigned short kactive_blk_num;
24 unsigned short blk_sizeof_priv;
25
26 /* last_kactive_blk_num:
27 * trick to see if user-space has caught up
28 * in order to avoid refreshing timer when every single pkt arrives.
29 */
30 unsigned short last_kactive_blk_num;
31
32 char *pkblk_start;
33 char *pkblk_end;
34 int kblk_size;
Eric Dumazetdc808112014-08-15 09:16:04 -070035 unsigned int max_frame_len;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000036 unsigned int knum_blocks;
37 uint64_t knxt_seq_num;
38 char *prev;
39 char *nxt_offset;
40 struct sk_buff *skb;
41
42 atomic_t blk_fill_in_prog;
43
44 /* Default is set to 8ms */
45#define DEFAULT_PRB_RETIRE_TOV (8)
46
47 unsigned short retire_blk_tov;
48 unsigned short version;
49 unsigned long tov_in_jiffies;
50
51 /* timer to retire an outstanding block */
52 struct timer_list retire_blk_timer;
53};
54
55struct pgv {
56 char *buffer;
57};
58
59struct packet_ring_buffer {
60 struct pgv *pg_vec;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000061
Pavel Emelyanov2787b042012-08-13 05:49:39 +000062 unsigned int head;
63 unsigned int frames_per_block;
64 unsigned int frame_size;
65 unsigned int frame_max;
66
Eric Dumazet3a7ad062018-08-29 11:50:12 -070067 unsigned int pg_vec_order;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000068 unsigned int pg_vec_pages;
69 unsigned int pg_vec_len;
70
Daniel Borkmannb0138402014-01-15 16:25:36 +010071 unsigned int __percpu *pending_refcnt;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000072
Willem de Bruijn6fb0e432020-03-13 12:18:09 -040073 union {
74 unsigned long *rx_owner_map;
75 struct tpacket_kbdq_core prb_bdqc;
76 };
Pavel Emelyanov2787b042012-08-13 05:49:39 +000077};
78
Pavel Emelyanovfff33212012-08-16 05:36:48 +000079extern struct mutex fanout_mutex;
80#define PACKET_FANOUT_MAX 256
81
82struct packet_fanout {
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -050083 possible_net_t net;
Pavel Emelyanovfff33212012-08-16 05:36:48 +000084 unsigned int num_members;
85 u16 id;
86 u8 type;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +000087 u8 flags;
Willem de Bruijn47dceb82015-08-14 22:31:34 -040088 union {
89 atomic_t rr_cur;
90 struct bpf_prog __rcu *bpf_prog;
91 };
Pavel Emelyanovfff33212012-08-16 05:36:48 +000092 struct list_head list;
93 struct sock *arr[PACKET_FANOUT_MAX];
94 spinlock_t lock;
Reshetova, Elenafb5c2c12017-06-30 13:08:10 +030095 refcount_t sk_ref;
Pavel Emelyanovfff33212012-08-16 05:36:48 +000096 struct packet_type prot_hook ____cacheline_aligned_in_smp;
97};
98
Willem de Bruijn0648ab72015-05-12 11:56:46 -040099struct packet_rollover {
100 int sock;
Willem de Bruijna9b63912015-05-12 11:56:50 -0400101 atomic_long_t num;
102 atomic_long_t num_huge;
103 atomic_long_t num_failed;
Willem de Bruijn3b3a5b02015-05-12 11:56:49 -0400104#define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
105 u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
Willem de Bruijn0648ab72015-05-12 11:56:46 -0400106} ____cacheline_aligned_in_smp;
107
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000108struct packet_sock {
109 /* struct sock has to be the first member of packet_sock */
110 struct sock sk;
111 struct packet_fanout *fanout;
Daniel Borkmannee80fbf2013-04-19 06:12:29 +0000112 union tpacket_stats_u stats;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000113 struct packet_ring_buffer rx_ring;
114 struct packet_ring_buffer tx_ring;
115 int copy_thresh;
116 spinlock_t bind_lock;
117 struct mutex pg_vec_lock;
Willem de Bruijna6361f02018-04-23 17:37:03 -0400118 unsigned int running; /* bind_lock must be held */
119 unsigned int auxdata:1, /* writer must hold sock lock */
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000120 origdev:1,
Willem de Bruijna6361f02018-04-23 17:37:03 -0400121 has_vnet_hdr:1,
122 tp_loss:1,
123 tp_tx_has_off:1;
Willem de Bruijn2ccdbaa2015-05-12 11:56:48 -0400124 int pressure;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000125 int ifindex; /* bound device */
126 __be16 num;
Willem de Bruijn0648ab72015-05-12 11:56:46 -0400127 struct packet_rollover *rollover;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000128 struct packet_mclist *mclist;
129 atomic_t mapped;
130 enum tpacket_versions tp_version;
131 unsigned int tp_hdrlen;
132 unsigned int tp_reserve;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000133 unsigned int tp_tstamp;
Neil Hormana4709122019-06-25 17:57:49 -0400134 struct completion skb_completion;
Daniel Borkmanne40526c2013-11-21 16:50:58 +0100135 struct net_device __rcu *cached_dev;
Daniel Borkmannd346a3f2013-12-06 11:36:17 +0100136 int (*xmit)(struct sk_buff *skb);
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000137 struct packet_type prot_hook ____cacheline_aligned_in_smp;
138};
139
140static struct packet_sock *pkt_sk(struct sock *sk)
141{
142 return (struct packet_sock *)sk;
143}
144
145#endif