blob: eb9580a6b25ff4474a7af54900efbe2579325d00 [file] [log] [blame]
Pavel Emelyanov2787b042012-08-13 05:49:39 +00001#ifndef __PACKET_INTERNAL_H__
2#define __PACKET_INTERNAL_H__
3
4struct packet_mclist {
5 struct packet_mclist *next;
6 int ifindex;
7 int count;
8 unsigned short type;
9 unsigned short alen;
10 unsigned char addr[MAX_ADDR_LEN];
11};
12
13/* kbdq - kernel block descriptor queue */
14struct tpacket_kbdq_core {
15 struct pgv *pkbdq;
16 unsigned int feature_req_word;
17 unsigned int hdrlen;
18 unsigned char reset_pending_on_curr_blk;
19 unsigned char delete_blk_timer;
20 unsigned short kactive_blk_num;
21 unsigned short blk_sizeof_priv;
22
23 /* last_kactive_blk_num:
24 * trick to see if user-space has caught up
25 * in order to avoid refreshing timer when every single pkt arrives.
26 */
27 unsigned short last_kactive_blk_num;
28
29 char *pkblk_start;
30 char *pkblk_end;
31 int kblk_size;
32 unsigned int knum_blocks;
33 uint64_t knxt_seq_num;
34 char *prev;
35 char *nxt_offset;
36 struct sk_buff *skb;
37
38 atomic_t blk_fill_in_prog;
39
40 /* Default is set to 8ms */
41#define DEFAULT_PRB_RETIRE_TOV (8)
42
43 unsigned short retire_blk_tov;
44 unsigned short version;
45 unsigned long tov_in_jiffies;
46
47 /* timer to retire an outstanding block */
48 struct timer_list retire_blk_timer;
49};
50
51struct pgv {
52 char *buffer;
53};
54
55struct packet_ring_buffer {
56 struct pgv *pg_vec;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000057
Pavel Emelyanov2787b042012-08-13 05:49:39 +000058 unsigned int head;
59 unsigned int frames_per_block;
60 unsigned int frame_size;
61 unsigned int frame_max;
62
63 unsigned int pg_vec_order;
64 unsigned int pg_vec_pages;
65 unsigned int pg_vec_len;
66
Daniel Borkmannb0138402014-01-15 16:25:36 +010067 unsigned int __percpu *pending_refcnt;
Daniel Borkmann0578edc2013-04-19 06:12:28 +000068
69 struct tpacket_kbdq_core prb_bdqc;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000070};
71
Pavel Emelyanovfff33212012-08-16 05:36:48 +000072extern struct mutex fanout_mutex;
73#define PACKET_FANOUT_MAX 256
74
75struct packet_fanout {
76#ifdef CONFIG_NET_NS
77 struct net *net;
78#endif
79 unsigned int num_members;
80 u16 id;
81 u8 type;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +000082 u8 flags;
Pavel Emelyanovfff33212012-08-16 05:36:48 +000083 atomic_t rr_cur;
84 struct list_head list;
85 struct sock *arr[PACKET_FANOUT_MAX];
Willem de Bruijn77f65eb2013-03-19 10:18:11 +000086 int next[PACKET_FANOUT_MAX];
Pavel Emelyanovfff33212012-08-16 05:36:48 +000087 spinlock_t lock;
88 atomic_t sk_ref;
89 struct packet_type prot_hook ____cacheline_aligned_in_smp;
90};
91
Pavel Emelyanov2787b042012-08-13 05:49:39 +000092struct packet_sock {
93 /* struct sock has to be the first member of packet_sock */
94 struct sock sk;
95 struct packet_fanout *fanout;
Daniel Borkmannee80fbf2013-04-19 06:12:29 +000096 union tpacket_stats_u stats;
Pavel Emelyanov2787b042012-08-13 05:49:39 +000097 struct packet_ring_buffer rx_ring;
98 struct packet_ring_buffer tx_ring;
99 int copy_thresh;
100 spinlock_t bind_lock;
101 struct mutex pg_vec_lock;
102 unsigned int running:1, /* prot_hook is attached*/
103 auxdata:1,
104 origdev:1,
105 has_vnet_hdr:1;
106 int ifindex; /* bound device */
107 __be16 num;
108 struct packet_mclist *mclist;
109 atomic_t mapped;
110 enum tpacket_versions tp_version;
111 unsigned int tp_hdrlen;
112 unsigned int tp_reserve;
113 unsigned int tp_loss:1;
Paul Chavent5920cd3a2012-11-06 23:10:47 +0000114 unsigned int tp_tx_has_off:1;
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000115 unsigned int tp_tstamp;
Daniel Borkmanne40526c2013-11-21 16:50:58 +0100116 struct net_device __rcu *cached_dev;
Daniel Borkmannd346a3f2013-12-06 11:36:17 +0100117 int (*xmit)(struct sk_buff *skb);
Pavel Emelyanov2787b042012-08-13 05:49:39 +0000118 struct packet_type prot_hook ____cacheline_aligned_in_smp;
119};
120
121static struct packet_sock *pkt_sk(struct sock *sk)
122{
123 return (struct packet_sock *)sk;
124}
125
126#endif