blob: c8a862e471dd13b3e982d8ddba6b880d0f9ade52 [file] [log] [blame]
David S. Miller4c521e42007-07-09 22:23:51 -07001#ifndef _SUNVNET_H
2#define _SUNVNET_H
3
Sowmini Varadhan1d311ad2014-08-13 10:29:41 -04004#include <linux/interrupt.h>
5
David S. Miller4c521e42007-07-09 22:23:51 -07006#define DESC_NCOOKIES(entry_size) \
7 ((entry_size) - sizeof(struct vio_net_desc))
8
9/* length of time before we decide the hardware is borked,
10 * and dev->tx_timeout() should be called to fix the problem
11 */
12#define VNET_TX_TIMEOUT (5 * HZ)
13
David L Stevens8e845f4c2014-09-29 19:48:11 -040014/* length of time (or less) we expect pending descriptors to be marked
15 * as VIO_DESC_DONE and skbs ready to be freed
16 */
17#define VNET_CLEAN_TIMEOUT ((HZ/100)+1)
18
David L Stevens42db6722014-09-29 19:48:18 -040019#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN)
David S. Miller4c521e42007-07-09 22:23:51 -070020#define VNET_TX_RING_SIZE 512
21#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
22
23/* VNET packets are sent in buffers with the first 6 bytes skipped
24 * so that after the ethernet header the IPv4/IPv6 headers are aligned
25 * properly.
26 */
27#define VNET_PACKET_SKIP 6
28
David L Stevens42db6722014-09-29 19:48:18 -040029#define VNET_MAXCOOKIES (VNET_MAXPACKET/PAGE_SIZE + 1)
30
David S. Miller4c521e42007-07-09 22:23:51 -070031struct vnet_tx_entry {
David L Stevens8e845f4c2014-09-29 19:48:11 -040032 struct sk_buff *skb;
David S. Miller4c521e42007-07-09 22:23:51 -070033 unsigned int ncookies;
David L Stevens42db6722014-09-29 19:48:18 -040034 struct ldc_trans_cookie cookies[VNET_MAXCOOKIES];
David S. Miller4c521e42007-07-09 22:23:51 -070035};
36
37struct vnet;
38struct vnet_port {
39 struct vio_driver_state vio;
40
41 struct hlist_node hash;
42 u8 raddr[ETH_ALEN];
David S. Miller028ebff2007-07-20 02:30:25 -070043 u8 switch_port;
44 u8 __pad;
David S. Miller4c521e42007-07-09 22:23:51 -070045
46 struct vnet *vp;
47
48 struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
49
50 struct list_head list;
Sowmini Varadhand1015642014-09-11 09:57:22 -040051
52 u32 stop_rx_idx;
53 bool stop_rx;
54 bool start_cons;
David L Stevense4defc72014-09-29 19:47:59 -040055
David L Stevens8e845f4c2014-09-29 19:48:11 -040056 struct timer_list clean_timer;
57
David L Stevense4defc72014-09-29 19:47:59 -040058 u64 rmtu;
Sowmini Varadhan69088822014-10-25 15:12:12 -040059
60 struct napi_struct napi;
61 u32 napi_stop_idx;
62 bool napi_resume;
63 int rx_event;
David S. Miller4c521e42007-07-09 22:23:51 -070064};
65
66static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
67{
68 return container_of(vio, struct vnet_port, vio);
69}
70
71#define VNET_PORT_HASH_SIZE 16
72#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1)
73
74static inline unsigned int vnet_hashfn(u8 *mac)
75{
76 unsigned int val = mac[4] ^ mac[5];
77
78 return val & (VNET_PORT_HASH_MASK);
79}
80
David S. Miller028ebff2007-07-20 02:30:25 -070081struct vnet_mcast_entry {
82 u8 addr[ETH_ALEN];
83 u8 sent;
84 u8 hit;
85 struct vnet_mcast_entry *next;
86};
87
David S. Miller4c521e42007-07-09 22:23:51 -070088struct vnet {
89 /* Protects port_list and port_hash. */
90 spinlock_t lock;
91
92 struct net_device *dev;
93
94 u32 msg_enable;
David S. Miller4c521e42007-07-09 22:23:51 -070095
96 struct list_head port_list;
97
98 struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
David S. Miller9184a042007-07-17 22:19:10 -070099
David S. Miller028ebff2007-07-20 02:30:25 -0700100 struct vnet_mcast_entry *mcast_list;
101
David S. Miller9184a042007-07-17 22:19:10 -0700102 struct list_head list;
103 u64 local_mac;
Sowmini Varadhan1d311ad2014-08-13 10:29:41 -0400104
David S. Miller4c521e42007-07-09 22:23:51 -0700105};
106
107#endif /* _SUNVNET_H */