Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 2 | #ifndef __PACKET_INTERNAL_H__ |
| 3 | #define __PACKET_INTERNAL_H__ |
| 4 | |
Reshetova, Elena | fb5c2c1 | 2017-06-30 13:08:10 +0300 | [diff] [blame] | 5 | #include <linux/refcount.h> |
| 6 | |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 7 | struct packet_mclist { |
| 8 | struct packet_mclist *next; |
| 9 | int ifindex; |
| 10 | int count; |
| 11 | unsigned short type; |
| 12 | unsigned short alen; |
| 13 | unsigned char addr[MAX_ADDR_LEN]; |
| 14 | }; |
| 15 | |
| 16 | /* kbdq - kernel block descriptor queue */ |
| 17 | struct tpacket_kbdq_core { |
| 18 | struct pgv *pkbdq; |
| 19 | unsigned int feature_req_word; |
| 20 | unsigned int hdrlen; |
| 21 | unsigned char reset_pending_on_curr_blk; |
| 22 | unsigned char delete_blk_timer; |
| 23 | unsigned short kactive_blk_num; |
| 24 | unsigned short blk_sizeof_priv; |
| 25 | |
| 26 | /* last_kactive_blk_num: |
| 27 | * trick to see if user-space has caught up |
| 28 | * in order to avoid refreshing timer when every single pkt arrives. |
| 29 | */ |
| 30 | unsigned short last_kactive_blk_num; |
| 31 | |
| 32 | char *pkblk_start; |
| 33 | char *pkblk_end; |
| 34 | int kblk_size; |
Eric Dumazet | dc80811 | 2014-08-15 09:16:04 -0700 | [diff] [blame] | 35 | unsigned int max_frame_len; |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 36 | unsigned int knum_blocks; |
| 37 | uint64_t knxt_seq_num; |
| 38 | char *prev; |
| 39 | char *nxt_offset; |
| 40 | struct sk_buff *skb; |
| 41 | |
| 42 | atomic_t blk_fill_in_prog; |
| 43 | |
| 44 | /* Default is set to 8ms */ |
| 45 | #define DEFAULT_PRB_RETIRE_TOV (8) |
| 46 | |
| 47 | unsigned short retire_blk_tov; |
| 48 | unsigned short version; |
| 49 | unsigned long tov_in_jiffies; |
| 50 | |
| 51 | /* timer to retire an outstanding block */ |
| 52 | struct timer_list retire_blk_timer; |
| 53 | }; |
| 54 | |
| 55 | struct pgv { |
| 56 | char *buffer; |
| 57 | }; |
| 58 | |
| 59 | struct packet_ring_buffer { |
| 60 | struct pgv *pg_vec; |
Daniel Borkmann | 0578edc | 2013-04-19 06:12:28 +0000 | [diff] [blame] | 61 | |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 62 | unsigned int head; |
| 63 | unsigned int frames_per_block; |
| 64 | unsigned int frame_size; |
| 65 | unsigned int frame_max; |
| 66 | |
Eric Dumazet | 3a7ad06 | 2018-08-29 11:50:12 -0700 | [diff] [blame] | 67 | unsigned int pg_vec_order; |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 68 | unsigned int pg_vec_pages; |
| 69 | unsigned int pg_vec_len; |
| 70 | |
Daniel Borkmann | b013840 | 2014-01-15 16:25:36 +0100 | [diff] [blame] | 71 | unsigned int __percpu *pending_refcnt; |
Daniel Borkmann | 0578edc | 2013-04-19 06:12:28 +0000 | [diff] [blame] | 72 | |
| 73 | struct tpacket_kbdq_core prb_bdqc; |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 74 | }; |
| 75 | |
Pavel Emelyanov | fff3321 | 2012-08-16 05:36:48 +0000 | [diff] [blame] | 76 | extern struct mutex fanout_mutex; |
| 77 | #define PACKET_FANOUT_MAX 256 |
| 78 | |
| 79 | struct packet_fanout { |
Eric W. Biederman | 0c5c9fb | 2015-03-11 23:06:44 -0500 | [diff] [blame] | 80 | possible_net_t net; |
Pavel Emelyanov | fff3321 | 2012-08-16 05:36:48 +0000 | [diff] [blame] | 81 | unsigned int num_members; |
| 82 | u16 id; |
| 83 | u8 type; |
Willem de Bruijn | 77f65eb | 2013-03-19 10:18:11 +0000 | [diff] [blame] | 84 | u8 flags; |
Willem de Bruijn | 47dceb8 | 2015-08-14 22:31:34 -0400 | [diff] [blame] | 85 | union { |
| 86 | atomic_t rr_cur; |
| 87 | struct bpf_prog __rcu *bpf_prog; |
| 88 | }; |
Pavel Emelyanov | fff3321 | 2012-08-16 05:36:48 +0000 | [diff] [blame] | 89 | struct list_head list; |
| 90 | struct sock *arr[PACKET_FANOUT_MAX]; |
| 91 | spinlock_t lock; |
Reshetova, Elena | fb5c2c1 | 2017-06-30 13:08:10 +0300 | [diff] [blame] | 92 | refcount_t sk_ref; |
Pavel Emelyanov | fff3321 | 2012-08-16 05:36:48 +0000 | [diff] [blame] | 93 | struct packet_type prot_hook ____cacheline_aligned_in_smp; |
| 94 | }; |
| 95 | |
Willem de Bruijn | 0648ab7 | 2015-05-12 11:56:46 -0400 | [diff] [blame] | 96 | struct packet_rollover { |
| 97 | int sock; |
Willem de Bruijn | a9b6391 | 2015-05-12 11:56:50 -0400 | [diff] [blame] | 98 | atomic_long_t num; |
| 99 | atomic_long_t num_huge; |
| 100 | atomic_long_t num_failed; |
Willem de Bruijn | 3b3a5b0 | 2015-05-12 11:56:49 -0400 | [diff] [blame] | 101 | #define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32)) |
| 102 | u32 history[ROLLOVER_HLEN] ____cacheline_aligned; |
Willem de Bruijn | 0648ab7 | 2015-05-12 11:56:46 -0400 | [diff] [blame] | 103 | } ____cacheline_aligned_in_smp; |
| 104 | |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 105 | struct packet_sock { |
| 106 | /* struct sock has to be the first member of packet_sock */ |
| 107 | struct sock sk; |
| 108 | struct packet_fanout *fanout; |
Daniel Borkmann | ee80fbf | 2013-04-19 06:12:29 +0000 | [diff] [blame] | 109 | union tpacket_stats_u stats; |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 110 | struct packet_ring_buffer rx_ring; |
| 111 | struct packet_ring_buffer tx_ring; |
| 112 | int copy_thresh; |
| 113 | spinlock_t bind_lock; |
| 114 | struct mutex pg_vec_lock; |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 115 | unsigned int running; /* bind_lock must be held */ |
| 116 | unsigned int auxdata:1, /* writer must hold sock lock */ |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 117 | origdev:1, |
Willem de Bruijn | a6361f0 | 2018-04-23 17:37:03 -0400 | [diff] [blame] | 118 | has_vnet_hdr:1, |
| 119 | tp_loss:1, |
| 120 | tp_tx_has_off:1; |
Willem de Bruijn | 2ccdbaa | 2015-05-12 11:56:48 -0400 | [diff] [blame] | 121 | int pressure; |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 122 | int ifindex; /* bound device */ |
| 123 | __be16 num; |
Willem de Bruijn | 0648ab7 | 2015-05-12 11:56:46 -0400 | [diff] [blame] | 124 | struct packet_rollover *rollover; |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 125 | struct packet_mclist *mclist; |
| 126 | atomic_t mapped; |
| 127 | enum tpacket_versions tp_version; |
| 128 | unsigned int tp_hdrlen; |
| 129 | unsigned int tp_reserve; |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 130 | unsigned int tp_tstamp; |
Daniel Borkmann | e40526c | 2013-11-21 16:50:58 +0100 | [diff] [blame] | 131 | struct net_device __rcu *cached_dev; |
Daniel Borkmann | d346a3f | 2013-12-06 11:36:17 +0100 | [diff] [blame] | 132 | int (*xmit)(struct sk_buff *skb); |
Pavel Emelyanov | 2787b04 | 2012-08-13 05:49:39 +0000 | [diff] [blame] | 133 | struct packet_type prot_hook ____cacheline_aligned_in_smp; |
| 134 | }; |
| 135 | |
| 136 | static struct packet_sock *pkt_sk(struct sock *sk) |
| 137 | { |
| 138 | return (struct packet_sock *)sk; |
| 139 | } |
| 140 | |
| 141 | #endif |