blob: e08d75e3f170a3ae523957fde0ae80b1e60e3a20 [file] [log] [blame]
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27#ifndef _VMXNET3_INT_H
28#define _VMXNET3_INT_H
29
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070030#include <linux/ethtool.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/pci.h>
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070034#include <linux/compiler.h>
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070035#include <linux/slab.h>
36#include <linux/spinlock.h>
37#include <linux/ioport.h>
38#include <linux/highmem.h>
39#include <linux/init.h>
40#include <linux/timer.h>
41#include <linux/skbuff.h>
42#include <linux/interrupt.h>
43#include <linux/workqueue.h>
44#include <linux/uaccess.h>
45#include <asm/dma.h>
46#include <asm/page.h>
47
48#include <linux/tcp.h>
49#include <linux/udp.h>
50#include <linux/ip.h>
51#include <linux/ipv6.h>
52#include <linux/in.h>
53#include <linux/etherdevice.h>
54#include <asm/checksum.h>
55#include <linux/if_vlan.h>
56#include <linux/if_arp.h>
57#include <linux/inetdevice.h>
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -070058#include <linux/log2.h>
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070059
60#include "vmxnet3_defs.h"
61
62#ifdef DEBUG
63# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
64#else
65# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
66#endif
67
68
69/*
70 * Version numbers
71 */
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -070072#define VMXNET3_DRIVER_VERSION_STRING "1.1.18.0-k"
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070073
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -070075#define VMXNET3_DRIVER_VERSION_NUM 0x01011200
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070076
Shreyas Bhatewara09c50882010-11-19 10:55:24 +000077#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */
79 #define VMXNET3_RSS
80#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070081
82/*
83 * Capabilities
84 */
85
86enum {
87 VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */
88 VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over
89 * IPv4 */
90 VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */
91 VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */
92 VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */
93 VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation
94 * offload */
95 VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */
96 VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */
97 VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */
98 VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */
99 VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */
100 VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
101 VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */
102 VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */
103 VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries
104 * for a pkt */
105 VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */
106 VMNET_CAP_LPD = 0x10000, /* large pkt delivery */
107 VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/
108 VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
109 /* pages transmits */
110 VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */
111 VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */
112 VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */
113 /* pkts up to 256kB. */
114 VMNET_CAP_UPT = 0x400000 /* Support UPT */
115};
116
117/*
118 * PCI vendor and device IDs.
119 */
120#define PCI_VENDOR_ID_VMWARE 0x15AD
121#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
122#define MAX_ETHERNET_CARDS 10
123#define MAX_PCI_PASSTHRU_DEVICE 6
124
125struct vmxnet3_cmd_ring {
126 union Vmxnet3_GenericDesc *base;
127 u32 size;
128 u32 next2fill;
129 u32 next2comp;
130 u8 gen;
131 dma_addr_t basePA;
132};
133
134static inline void
135vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
136{
137 ring->next2fill++;
138 if (unlikely(ring->next2fill == ring->size)) {
139 ring->next2fill = 0;
140 VMXNET3_FLIP_RING_GEN(ring->gen);
141 }
142}
143
144static inline void
145vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
146{
147 VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
148}
149
150static inline int
151vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
152{
153 return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
154 ring->next2comp - ring->next2fill - 1;
155}
156
157struct vmxnet3_comp_ring {
158 union Vmxnet3_GenericDesc *base;
159 u32 size;
160 u32 next2proc;
161 u8 gen;
162 u8 intr_idx;
163 dma_addr_t basePA;
164};
165
166static inline void
167vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
168{
169 ring->next2proc++;
170 if (unlikely(ring->next2proc == ring->size)) {
171 ring->next2proc = 0;
172 VMXNET3_FLIP_RING_GEN(ring->gen);
173 }
174}
175
176struct vmxnet3_tx_data_ring {
177 struct Vmxnet3_TxDataDesc *base;
178 u32 size;
179 dma_addr_t basePA;
180};
181
182enum vmxnet3_buf_map_type {
183 VMXNET3_MAP_INVALID = 0,
184 VMXNET3_MAP_NONE,
185 VMXNET3_MAP_SINGLE,
186 VMXNET3_MAP_PAGE,
187};
188
189struct vmxnet3_tx_buf_info {
190 u32 map_type;
191 u16 len;
192 u16 sop_idx;
193 dma_addr_t dma_addr;
194 struct sk_buff *skb;
195};
196
197struct vmxnet3_tq_driver_stats {
198 u64 drop_total; /* # of pkts dropped by the driver, the
199 * counters below track droppings due to
200 * different reasons
201 */
202 u64 drop_too_many_frags;
203 u64 drop_oversized_hdr;
204 u64 drop_hdr_inspect_err;
205 u64 drop_tso;
206
207 u64 tx_ring_full;
208 u64 linearized; /* # of pkts linearized */
209 u64 copy_skb_header; /* # of times we have to copy skb header */
210 u64 oversized_hdr;
211};
212
213struct vmxnet3_tx_ctx {
214 bool ipv4;
215 u16 mss;
216 u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
217 * offloading
218 */
219 u32 l4_hdr_size; /* only valid if mss != 0 */
220 u32 copy_size; /* # of bytes copied into the data ring */
221 union Vmxnet3_GenericDesc *sop_txd;
222 union Vmxnet3_GenericDesc *eop_txd;
223};
224
225struct vmxnet3_tx_queue {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000226 char name[IFNAMSIZ+8]; /* To identify interrupt */
227 struct vmxnet3_adapter *adapter;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700228 spinlock_t tx_lock;
229 struct vmxnet3_cmd_ring tx_ring;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000230 struct vmxnet3_tx_buf_info *buf_info;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700231 struct vmxnet3_tx_data_ring data_ring;
232 struct vmxnet3_comp_ring comp_ring;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000233 struct Vmxnet3_TxQueueCtrl *shared;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700234 struct vmxnet3_tq_driver_stats stats;
235 bool stopped;
236 int num_stop; /* # of times the queue is
237 * stopped */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000238 int qid;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700239} __attribute__((__aligned__(SMP_CACHE_BYTES)));
240
241enum vmxnet3_rx_buf_type {
242 VMXNET3_RX_BUF_NONE = 0,
243 VMXNET3_RX_BUF_SKB = 1,
244 VMXNET3_RX_BUF_PAGE = 2
245};
246
247struct vmxnet3_rx_buf_info {
248 enum vmxnet3_rx_buf_type buf_type;
249 u16 len;
250 union {
251 struct sk_buff *skb;
252 struct page *page;
253 };
254 dma_addr_t dma_addr;
255};
256
257struct vmxnet3_rx_ctx {
258 struct sk_buff *skb;
259 u32 sop_idx;
260};
261
262struct vmxnet3_rq_driver_stats {
263 u64 drop_total;
264 u64 drop_err;
265 u64 drop_fcs;
266 u64 rx_buf_alloc_failure;
267};
268
269struct vmxnet3_rx_queue {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000270 char name[IFNAMSIZ + 8]; /* To identify interrupt */
271 struct vmxnet3_adapter *adapter;
272 struct napi_struct napi;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700273 struct vmxnet3_cmd_ring rx_ring[2];
274 struct vmxnet3_comp_ring comp_ring;
275 struct vmxnet3_rx_ctx rx_ctx;
276 u32 qid; /* rqID in RCD for buffer from 1st ring */
277 u32 qid2; /* rqID in RCD for buffer from 2nd ring */
278 u32 uncommitted[2]; /* # of buffers allocated since last RXPROD
279 * update */
280 struct vmxnet3_rx_buf_info *buf_info[2];
281 struct Vmxnet3_RxQueueCtrl *shared;
282 struct vmxnet3_rq_driver_stats stats;
283} __attribute__((__aligned__(SMP_CACHE_BYTES)));
284
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000285#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
286#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
287
288/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
289#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
290
291#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
292 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
Shreyas Bhatewara7e96fbf2011-01-14 15:00:03 +0000293#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000294
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700295
296struct vmxnet3_intr {
297 enum vmxnet3_intr_mask_mode mask_mode;
298 enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */
299 u8 num_intrs; /* # of intr vectors */
300 u8 event_intr_idx; /* idx of the intr vector for event */
301 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000302 char event_msi_vector_name[IFNAMSIZ+11];
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700303#ifdef CONFIG_PCI_MSI
304 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
305#endif
306};
307
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000308/* Interrupt sharing schemes, share_intr */
309#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
310#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
311#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
312
313
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700314#define VMXNET3_STATE_BIT_RESETTING 0
315#define VMXNET3_STATE_BIT_QUIESCED 1
316struct vmxnet3_adapter {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000317 struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
318 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
319 struct vlan_group *vlan_grp;
320 struct vmxnet3_intr intr;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000321 spinlock_t cmd_lock;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000322 struct Vmxnet3_DriverShared *shared;
323 struct Vmxnet3_PMConf *pm_conf;
324 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
325 struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
326 struct net_device *netdev;
327 struct net_device_stats net_stats;
328 struct pci_dev *pdev;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700329
Harvey Harrison81e8e562010-10-21 18:05:33 +0000330 u8 __iomem *hw_addr0; /* for BAR 0 */
331 u8 __iomem *hw_addr1; /* for BAR 1 */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700332
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000333#ifdef VMXNET3_RSS
334 struct UPT1_RSSConf *rss_conf;
335 bool rss;
336#endif
337 u32 num_rx_queues;
338 u32 num_tx_queues;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700339
340 /* rx buffer related */
341 unsigned skb_buf_size;
342 int rx_buf_per_pkt; /* only apply to the 1st ring */
343 dma_addr_t shared_pa;
344 dma_addr_t queue_desc_pa;
345
346 /* Wake-on-LAN */
347 u32 wol;
348
349 /* Link speed */
350 u32 link_speed; /* in mbps */
351
352 u64 tx_timeout_count;
353 struct work_struct work;
354
355 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
356
357 int dev_number;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000358 int share_intr;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700359};
360
361#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
Harvey Harrisonb8744ca2010-10-30 16:19:18 -0700362 writel((val), (adapter)->hw_addr0 + (reg))
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700363#define VMXNET3_READ_BAR0_REG(adapter, reg) \
Harvey Harrisonb8744ca2010-10-30 16:19:18 -0700364 readl((adapter)->hw_addr0 + (reg))
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700365
366#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
Harvey Harrisonb8744ca2010-10-30 16:19:18 -0700367 writel((val), (adapter)->hw_addr1 + (reg))
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700368#define VMXNET3_READ_BAR1_REG(adapter, reg) \
Harvey Harrisonb8744ca2010-10-30 16:19:18 -0700369 readl((adapter)->hw_addr1 + (reg))
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700370
371#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
372#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
373 ((rq)->rx_ring[ring_idx].size >> 3)
374
375#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
376#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
377
378/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
379#define VMXNET3_DEF_TX_RING_SIZE 512
380#define VMXNET3_DEF_RX_RING_SIZE 256
381
382#define VMXNET3_MAX_ETH_HDR_SIZE 22
383#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
384
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700385int
386vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
387
388int
389vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
390
391void
392vmxnet3_force_close(struct vmxnet3_adapter *adapter);
393
394void
395vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
396
397void
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000398vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700399
400void
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000401vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700402
403int
Michał Mirosława0d27302011-04-18 13:31:21 +0000404vmxnet3_set_features(struct net_device *netdev, u32 features);
405
406int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700407vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
408 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
409
410extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
411extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev);
412
413extern char vmxnet3_driver_name[];
414#endif