blob: cd5d343ea2321a90a90085510a0d3ecb8dcc52dc [file] [log] [blame]
David S. Miller4c521e42007-07-09 22:23:51 -07001#ifndef _SUNVNET_H
2#define _SUNVNET_H
3
Sowmini Varadhan1d311ad2014-08-13 10:29:41 -04004#include <linux/interrupt.h>
5
David S. Miller4c521e42007-07-09 22:23:51 -07006#define DESC_NCOOKIES(entry_size) \
7 ((entry_size) - sizeof(struct vio_net_desc))
8
9/* length of time before we decide the hardware is borked,
10 * and dev->tx_timeout() should be called to fix the problem
11 */
12#define VNET_TX_TIMEOUT (5 * HZ)
13
David L Stevens8e845f4c2014-09-29 19:48:11 -040014/* length of time (or less) we expect pending descriptors to be marked
15 * as VIO_DESC_DONE and skbs ready to be freed
16 */
17#define VNET_CLEAN_TIMEOUT ((HZ/100)+1)
18
David L Stevens42db6722014-09-29 19:48:18 -040019#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN)
David S. Miller4c521e42007-07-09 22:23:51 -070020#define VNET_TX_RING_SIZE 512
21#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
22
23/* VNET packets are sent in buffers with the first 6 bytes skipped
24 * so that after the ethernet header the IPv4/IPv6 headers are aligned
25 * properly.
26 */
27#define VNET_PACKET_SKIP 6
28
David L Stevens42db6722014-09-29 19:48:18 -040029#define VNET_MAXCOOKIES (VNET_MAXPACKET/PAGE_SIZE + 1)
30
David S. Miller4c521e42007-07-09 22:23:51 -070031struct vnet_tx_entry {
David L Stevens8e845f4c2014-09-29 19:48:11 -040032 struct sk_buff *skb;
David S. Miller4c521e42007-07-09 22:23:51 -070033 unsigned int ncookies;
David L Stevens42db6722014-09-29 19:48:18 -040034 struct ldc_trans_cookie cookies[VNET_MAXCOOKIES];
David S. Miller4c521e42007-07-09 22:23:51 -070035};
36
37struct vnet;
38struct vnet_port {
39 struct vio_driver_state vio;
40
41 struct hlist_node hash;
42 u8 raddr[ETH_ALEN];
David S. Miller028ebff2007-07-20 02:30:25 -070043 u8 switch_port;
44 u8 __pad;
David S. Miller4c521e42007-07-09 22:23:51 -070045
46 struct vnet *vp;
47
48 struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
49
50 struct list_head list;
Sowmini Varadhand1015642014-09-11 09:57:22 -040051
52 u32 stop_rx_idx;
53 bool stop_rx;
54 bool start_cons;
David L Stevense4defc72014-09-29 19:47:59 -040055
David L Stevens8e845f4c2014-09-29 19:48:11 -040056 struct timer_list clean_timer;
57
David L Stevense4defc72014-09-29 19:47:59 -040058 u64 rmtu;
Sowmini Varadhan69088822014-10-25 15:12:12 -040059
60 struct napi_struct napi;
61 u32 napi_stop_idx;
62 bool napi_resume;
63 int rx_event;
Sowmini Varadhand51bffd2014-10-30 12:46:09 -040064 u16 q_index;
David S. Miller4c521e42007-07-09 22:23:51 -070065};
66
67static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
68{
69 return container_of(vio, struct vnet_port, vio);
70}
71
72#define VNET_PORT_HASH_SIZE 16
73#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1)
74
75static inline unsigned int vnet_hashfn(u8 *mac)
76{
77 unsigned int val = mac[4] ^ mac[5];
78
79 return val & (VNET_PORT_HASH_MASK);
80}
81
David S. Miller028ebff2007-07-20 02:30:25 -070082struct vnet_mcast_entry {
83 u8 addr[ETH_ALEN];
84 u8 sent;
85 u8 hit;
86 struct vnet_mcast_entry *next;
87};
88
David S. Miller4c521e42007-07-09 22:23:51 -070089struct vnet {
90 /* Protects port_list and port_hash. */
91 spinlock_t lock;
92
93 struct net_device *dev;
94
95 u32 msg_enable;
David S. Miller4c521e42007-07-09 22:23:51 -070096
97 struct list_head port_list;
98
99 struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
David S. Miller9184a042007-07-17 22:19:10 -0700100
David S. Miller028ebff2007-07-20 02:30:25 -0700101 struct vnet_mcast_entry *mcast_list;
102
David S. Miller9184a042007-07-17 22:19:10 -0700103 struct list_head list;
104 u64 local_mac;
Sowmini Varadhan1d311ad2014-08-13 10:29:41 -0400105
Sowmini Varadhand51bffd2014-10-30 12:46:09 -0400106 int nports;
David S. Miller4c521e42007-07-09 22:23:51 -0700107};
108
109#endif /* _SUNVNET_H */