blob: 3bb7c5fb3bff2fd5d91c3d973d006d0cdde29a0b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Pavel Emelyanov2787b042012-08-13 05:49:39 +00002#ifndef __PACKET_INTERNAL_H__
3#define __PACKET_INTERNAL_H__
4
Reshetova, Elenafb5c2c12017-06-30 13:08:10 +03005#include <linux/refcount.h>
6
Pavel Emelyanov2787b042012-08-13 05:49:39 +00007struct packet_mclist {
8 struct packet_mclist *next;
9 int ifindex;
10 int count;
11 unsigned short type;
12 unsigned short alen;
13 unsigned char addr[MAX_ADDR_LEN];
14};
15
16/* kbdq - kernel block descriptor queue */
17struct tpacket_kbdq_core {
18 struct pgv *pkbdq;
19 unsigned int feature_req_word;
20 unsigned int hdrlen;
21 unsigned char reset_pending_on_curr_blk;
22 unsigned char delete_blk_timer;
23 unsigned short kactive_blk_num;
24 unsigned short blk_sizeof_priv;
25
26 /* last_kactive_blk_num:
27 * trick to see if user-space has caught up
28 * in order to avoid refreshing timer when every single pkt arrives.
29 */
30 unsigned short last_kactive_blk_num;
31
32 char *pkblk_start;
33 char *pkblk_end;
34 int kblk_size;
Eric Dumazetdc808112014-08-15 09:16:04 -070035 unsigned int max_frame_len;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000036 unsigned int knum_blocks;
37 uint64_t knxt_seq_num;
38 char *prev;
39 char *nxt_offset;
40 struct sk_buff *skb;
41
42 atomic_t blk_fill_in_prog;
43
44 /* Default is set to 8ms */
45#define DEFAULT_PRB_RETIRE_TOV (8)
46
47 unsigned short retire_blk_tov;
48 unsigned short version;
49 unsigned long tov_in_jiffies;
50
51 /* timer to retire an outstanding block */
52 struct timer_list retire_blk_timer;
53};
54
55struct pgv {
56 char *buffer;
57};
58
59struct packet_ring_buffer {
60 struct pgv *pg_vec;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000061
Pavel Emelyanov2787b042012-08-13 05:49:39 +000062 unsigned int head;
63 unsigned int frames_per_block;
64 unsigned int frame_size;
65 unsigned int frame_max;
66
67 unsigned int pg_vec_order;
68 unsigned int pg_vec_pages;
69 unsigned int pg_vec_len;
70
Daniel Borkmannb0138402014-01-15 16:25:36 +010071 unsigned int __percpu *pending_refcnt;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000072
73 struct tpacket_kbdq_core prb_bdqc;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000074};
75
Pavel Emelyanovfff33212012-08-16 05:36:48 +000076extern struct mutex fanout_mutex;
77#define PACKET_FANOUT_MAX 256
78
79struct packet_fanout {
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -050080 possible_net_t net;
Pavel Emelyanovfff33212012-08-16 05:36:48 +000081 unsigned int num_members;
82 u16 id;
83 u8 type;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +000084 u8 flags;
Willem de Bruijn47dceb82015-08-14 22:31:34 -040085 union {
86 atomic_t rr_cur;
87 struct bpf_prog __rcu *bpf_prog;
88 };
Pavel Emelyanovfff33212012-08-16 05:36:48 +000089 struct list_head list;
90 struct sock *arr[PACKET_FANOUT_MAX];
91 spinlock_t lock;
Reshetova, Elenafb5c2c12017-06-30 13:08:10 +030092 refcount_t sk_ref;
Pavel Emelyanovfff33212012-08-16 05:36:48 +000093 struct packet_type prot_hook ____cacheline_aligned_in_smp;
94};
95
Willem de Bruijn0648ab72015-05-12 11:56:46 -040096struct packet_rollover {
97 int sock;
Willem de Bruijna9b63912015-05-12 11:56:50 -040098 atomic_long_t num;
99 atomic_long_t num_huge;
100 atomic_long_t num_failed;
Willem de Bruijn3b3a5b02015-05-12 11:56:49 -0400101#define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
102 u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
Willem de Bruijn0648ab72015-05-12 11:56:46 -0400103} ____cacheline_aligned_in_smp;
104
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000105struct packet_sock {
106 /* struct sock has to be the first member of packet_sock */
107 struct sock sk;
108 struct packet_fanout *fanout;
Daniel Borkmannee80fbf2013-04-19 06:12:29 +0000109 union tpacket_stats_u stats;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000110 struct packet_ring_buffer rx_ring;
111 struct packet_ring_buffer tx_ring;
112 int copy_thresh;
113 spinlock_t bind_lock;
114 struct mutex pg_vec_lock;
Willem de Bruijna6361f02018-04-23 17:37:03 -0400115 unsigned int running; /* bind_lock must be held */
116 unsigned int auxdata:1, /* writer must hold sock lock */
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000117 origdev:1,
Willem de Bruijna6361f02018-04-23 17:37:03 -0400118 has_vnet_hdr:1,
119 tp_loss:1,
120 tp_tx_has_off:1;
Willem de Bruijn2ccdbaa2015-05-12 11:56:48 -0400121 int pressure;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000122 int ifindex; /* bound device */
123 __be16 num;
Willem de Bruijn0648ab72015-05-12 11:56:46 -0400124 struct packet_rollover *rollover;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000125 struct packet_mclist *mclist;
126 atomic_t mapped;
127 enum tpacket_versions tp_version;
128 unsigned int tp_hdrlen;
129 unsigned int tp_reserve;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000130 unsigned int tp_tstamp;
Daniel Borkmanne40526c2013-11-21 16:50:58 +0100131 struct net_device __rcu *cached_dev;
Daniel Borkmannd346a3f2013-12-06 11:36:17 +0100132 int (*xmit)(struct sk_buff *skb);
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000133 struct packet_type prot_hook ____cacheline_aligned_in_smp;
134};
135
136static struct packet_sock *pkt_sk(struct sock *sk)
137{
138 return (struct packet_sock *)sk;
139}
140
141#endif