blob: 4051ec404613c4788485f58125f9d76888a1dc69 [file] [log] [blame]
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
Mitch A Williams2a06ed92012-01-17 04:09:05 +00004 Copyright(c) 2009 - 2012 Intel Corporation.
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Jeff Kirshera4ba8cb2011-10-21 19:42:26 +000028#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
Alexander Duyckd4e0fe02009-04-07 14:37:34 +000030#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
33#include <linux/pci.h>
34#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38#include <linux/tcp.h>
39#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Alexander Duyckd4e0fe02009-04-07 14:37:34 +000041#include <net/checksum.h>
42#include <net/ip6_checksum.h>
43#include <linux/mii.h>
44#include <linux/ethtool.h>
45#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040046#include <linux/prefetch.h>
Alexander Duyckd4e0fe02009-04-07 14:37:34 +000047
48#include "igbvf.h"
49
Williams, Mitch A7d94eb82011-10-18 06:39:43 +000050#define DRV_VERSION "2.0.1-k"
Alexander Duyckd4e0fe02009-04-07 14:37:34 +000051char igbvf_driver_name[] = "igbvf";
52const char igbvf_driver_version[] = DRV_VERSION;
53static const char igbvf_driver_string[] =
Williams, Mitch A10090752011-10-18 06:39:37 +000054 "Intel(R) Gigabit Virtual Function Network Driver";
Greg Rose2c20ebb2010-11-16 19:41:35 -080055static const char igbvf_copyright[] =
Mitch A Williams2a06ed92012-01-17 04:09:05 +000056 "Copyright (c) 2009 - 2012 Intel Corporation.";
Alexander Duyckd4e0fe02009-04-07 14:37:34 +000057
stephen hemmingerb3f4d592012-03-13 06:04:20 +000058#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
59static int debug = -1;
60module_param(debug, int, 0);
61MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
62
Alexander Duyckd4e0fe02009-04-07 14:37:34 +000063static int igbvf_poll(struct napi_struct *napi, int budget);
Alexander Duyck2d165772009-04-09 22:49:20 +000064static void igbvf_reset(struct igbvf_adapter *);
65static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
66static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +000067
68static struct igbvf_info igbvf_vf_info = {
69 .mac = e1000_vfadapt,
Alexander Duyck0364d6f2009-05-06 10:25:01 +000070 .flags = 0,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +000071 .pba = 10,
72 .init_ops = e1000_init_function_pointers_vf,
73};
74
Williams, Mitch A031d7952010-12-09 03:23:56 +000075static struct igbvf_info igbvf_i350_vf_info = {
76 .mac = e1000_vfadapt_i350,
77 .flags = 0,
78 .pba = 10,
79 .init_ops = e1000_init_function_pointers_vf,
80};
81
Alexander Duyckd4e0fe02009-04-07 14:37:34 +000082static const struct igbvf_info *igbvf_info_tbl[] = {
83 [board_vf] = &igbvf_vf_info,
Williams, Mitch A031d7952010-12-09 03:23:56 +000084 [board_i350_vf] = &igbvf_i350_vf_info,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +000085};
86
87/**
88 * igbvf_desc_unused - calculate if we have unused descriptors
89 **/
90static int igbvf_desc_unused(struct igbvf_ring *ring)
91{
92 if (ring->next_to_clean > ring->next_to_use)
93 return ring->next_to_clean - ring->next_to_use - 1;
94
95 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
96}
97
98/**
99 * igbvf_receive_skb - helper function to handle Rx indications
100 * @adapter: board private structure
101 * @status: descriptor status field as written by hardware
102 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
103 * @skb: pointer to sk_buff to be indicated to stack
104 **/
105static void igbvf_receive_skb(struct igbvf_adapter *adapter,
106 struct net_device *netdev,
107 struct sk_buff *skb,
108 u32 status, u16 vlan)
109{
Jiri Pirkoa0f1d602011-07-21 06:30:00 +0000110 if (status & E1000_RXD_STAT_VP) {
111 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
Greg Rose4d2d55a2011-08-27 06:24:59 +0000112 if (test_bit(vid, adapter->active_vlans))
113 __vlan_hwaccel_put_tag(skb, vid);
Jiri Pirkoa0f1d602011-07-21 06:30:00 +0000114 }
115 netif_receive_skb(skb);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000116}
117
118static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
119 u32 status_err, struct sk_buff *skb)
120{
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700121 skb_checksum_none_assert(skb);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000122
123 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck0364d6f2009-05-06 10:25:01 +0000124 if ((status_err & E1000_RXD_STAT_IXSM) ||
125 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000126 return;
Alexander Duyck0364d6f2009-05-06 10:25:01 +0000127
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000128 /* TCP/UDP checksum error bit is set */
129 if (status_err &
130 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
131 /* let the stack verify checksum errors */
132 adapter->hw_csum_err++;
133 return;
134 }
Alexander Duyck0364d6f2009-05-06 10:25:01 +0000135
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000136 /* It must be a TCP or UDP packet with a valid checksum */
137 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
138 skb->ip_summed = CHECKSUM_UNNECESSARY;
139
140 adapter->hw_csum_good++;
141}
142
143/**
144 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
145 * @rx_ring: address of ring structure to repopulate
146 * @cleaned_count: number of buffers to repopulate
147 **/
148static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
149 int cleaned_count)
150{
151 struct igbvf_adapter *adapter = rx_ring->adapter;
152 struct net_device *netdev = adapter->netdev;
153 struct pci_dev *pdev = adapter->pdev;
154 union e1000_adv_rx_desc *rx_desc;
155 struct igbvf_buffer *buffer_info;
156 struct sk_buff *skb;
157 unsigned int i;
158 int bufsz;
159
160 i = rx_ring->next_to_use;
161 buffer_info = &rx_ring->buffer_info[i];
162
163 if (adapter->rx_ps_hdr_size)
164 bufsz = adapter->rx_ps_hdr_size;
165 else
166 bufsz = adapter->rx_buffer_len;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000167
168 while (cleaned_count--) {
169 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
170
171 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
172 if (!buffer_info->page) {
173 buffer_info->page = alloc_page(GFP_ATOMIC);
174 if (!buffer_info->page) {
175 adapter->alloc_rx_buff_failed++;
176 goto no_buffers;
177 }
178 buffer_info->page_offset = 0;
179 } else {
180 buffer_info->page_offset ^= PAGE_SIZE / 2;
181 }
182 buffer_info->page_dma =
Nick Nunley123e9f12010-04-27 13:09:44 +0000183 dma_map_page(&pdev->dev, buffer_info->page,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000184 buffer_info->page_offset,
185 PAGE_SIZE / 2,
Nick Nunley123e9f12010-04-27 13:09:44 +0000186 DMA_FROM_DEVICE);
Greg Rose91ffb8e2012-09-21 00:21:39 +0000187 if (dma_mapping_error(&pdev->dev,
188 buffer_info->page_dma)) {
189 __free_page(buffer_info->page);
190 buffer_info->page = NULL;
191 dev_err(&pdev->dev, "RX DMA map failed\n");
192 break;
193 }
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000194 }
195
196 if (!buffer_info->skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +0000197 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000198 if (!skb) {
199 adapter->alloc_rx_buff_failed++;
200 goto no_buffers;
201 }
202
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000203 buffer_info->skb = skb;
Nick Nunley123e9f12010-04-27 13:09:44 +0000204 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000205 bufsz,
Nick Nunley123e9f12010-04-27 13:09:44 +0000206 DMA_FROM_DEVICE);
Greg Rose91ffb8e2012-09-21 00:21:39 +0000207 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
208 dev_kfree_skb(buffer_info->skb);
209 buffer_info->skb = NULL;
210 dev_err(&pdev->dev, "RX DMA map failed\n");
211 goto no_buffers;
212 }
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000213 }
214 /* Refresh the desc even if buffer_addrs didn't change because
215 * each write-back erases this info. */
216 if (adapter->rx_ps_hdr_size) {
217 rx_desc->read.pkt_addr =
218 cpu_to_le64(buffer_info->page_dma);
219 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
220 } else {
221 rx_desc->read.pkt_addr =
222 cpu_to_le64(buffer_info->dma);
223 rx_desc->read.hdr_addr = 0;
224 }
225
226 i++;
227 if (i == rx_ring->count)
228 i = 0;
229 buffer_info = &rx_ring->buffer_info[i];
230 }
231
232no_buffers:
233 if (rx_ring->next_to_use != i) {
234 rx_ring->next_to_use = i;
235 if (i == 0)
236 i = (rx_ring->count - 1);
237 else
238 i--;
239
240 /* Force memory writes to complete before letting h/w
241 * know there are new descriptors to fetch. (Only
242 * applicable for weak-ordered memory model archs,
243 * such as IA-64). */
244 wmb();
245 writel(i, adapter->hw.hw_addr + rx_ring->tail);
246 }
247}
248
249/**
250 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
251 * @adapter: board private structure
252 *
253 * the return value indicates whether actual cleaning was done, there
254 * is no guarantee that everything was cleaned
255 **/
256static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
257 int *work_done, int work_to_do)
258{
259 struct igbvf_ring *rx_ring = adapter->rx_ring;
260 struct net_device *netdev = adapter->netdev;
261 struct pci_dev *pdev = adapter->pdev;
262 union e1000_adv_rx_desc *rx_desc, *next_rxd;
263 struct igbvf_buffer *buffer_info, *next_buffer;
264 struct sk_buff *skb;
265 bool cleaned = false;
266 int cleaned_count = 0;
267 unsigned int total_bytes = 0, total_packets = 0;
268 unsigned int i;
269 u32 length, hlen, staterr;
270
271 i = rx_ring->next_to_clean;
272 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
273 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
274
275 while (staterr & E1000_RXD_STAT_DD) {
276 if (*work_done >= work_to_do)
277 break;
278 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +0000279 rmb(); /* read descriptor and rx_buffer_info after status DD */
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000280
281 buffer_info = &rx_ring->buffer_info[i];
282
283 /* HW will not DMA in data larger than the given buffer, even
284 * if it parses the (NFS, of course) header to be larger. In
285 * that case, it fills the header buffer and spills the rest
286 * into the page.
287 */
288 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
289 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
290 if (hlen > adapter->rx_ps_hdr_size)
291 hlen = adapter->rx_ps_hdr_size;
292
293 length = le16_to_cpu(rx_desc->wb.upper.length);
294 cleaned = true;
295 cleaned_count++;
296
297 skb = buffer_info->skb;
298 prefetch(skb->data - NET_IP_ALIGN);
299 buffer_info->skb = NULL;
300 if (!adapter->rx_ps_hdr_size) {
Nick Nunley123e9f12010-04-27 13:09:44 +0000301 dma_unmap_single(&pdev->dev, buffer_info->dma,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000302 adapter->rx_buffer_len,
Nick Nunley123e9f12010-04-27 13:09:44 +0000303 DMA_FROM_DEVICE);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000304 buffer_info->dma = 0;
305 skb_put(skb, length);
306 goto send_up;
307 }
308
309 if (!skb_shinfo(skb)->nr_frags) {
Nick Nunley123e9f12010-04-27 13:09:44 +0000310 dma_unmap_single(&pdev->dev, buffer_info->dma,
Alexander Duyck92d947b2009-07-23 18:11:01 +0000311 adapter->rx_ps_hdr_size,
Nick Nunley123e9f12010-04-27 13:09:44 +0000312 DMA_FROM_DEVICE);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000313 skb_put(skb, hlen);
314 }
315
316 if (length) {
Nick Nunley123e9f12010-04-27 13:09:44 +0000317 dma_unmap_page(&pdev->dev, buffer_info->page_dma,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000318 PAGE_SIZE / 2,
Nick Nunley123e9f12010-04-27 13:09:44 +0000319 DMA_FROM_DEVICE);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000320 buffer_info->page_dma = 0;
321
Koki Sanagiec857fd2010-04-27 01:01:39 +0000322 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000323 buffer_info->page,
324 buffer_info->page_offset,
325 length);
326
327 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
328 (page_count(buffer_info->page) != 1))
329 buffer_info->page = NULL;
330 else
331 get_page(buffer_info->page);
332
333 skb->len += length;
334 skb->data_len += length;
Eric Dumazet7b8b5962011-10-20 09:22:18 +0000335 skb->truesize += PAGE_SIZE / 2;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000336 }
337send_up:
338 i++;
339 if (i == rx_ring->count)
340 i = 0;
341 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
342 prefetch(next_rxd);
343 next_buffer = &rx_ring->buffer_info[i];
344
345 if (!(staterr & E1000_RXD_STAT_EOP)) {
346 buffer_info->skb = next_buffer->skb;
347 buffer_info->dma = next_buffer->dma;
348 next_buffer->skb = skb;
349 next_buffer->dma = 0;
350 goto next_desc;
351 }
352
353 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
354 dev_kfree_skb_irq(skb);
355 goto next_desc;
356 }
357
358 total_bytes += skb->len;
359 total_packets++;
360
361 igbvf_rx_checksum_adv(adapter, staterr, skb);
362
363 skb->protocol = eth_type_trans(skb, netdev);
364
365 igbvf_receive_skb(adapter, netdev, skb, staterr,
366 rx_desc->wb.upper.vlan);
367
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000368next_desc:
369 rx_desc->wb.upper.status_error = 0;
370
371 /* return some buffers to hardware, one at a time is too slow */
372 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
373 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
374 cleaned_count = 0;
375 }
376
377 /* use prefetched values */
378 rx_desc = next_rxd;
379 buffer_info = next_buffer;
380
381 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
382 }
383
384 rx_ring->next_to_clean = i;
385 cleaned_count = igbvf_desc_unused(rx_ring);
386
387 if (cleaned_count)
388 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
389
390 adapter->total_rx_packets += total_packets;
391 adapter->total_rx_bytes += total_bytes;
392 adapter->net_stats.rx_bytes += total_bytes;
393 adapter->net_stats.rx_packets += total_packets;
394 return cleaned;
395}
396
397static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
398 struct igbvf_buffer *buffer_info)
399{
Alexander Duycka7d5ca402009-12-02 16:47:37 +0000400 if (buffer_info->dma) {
401 if (buffer_info->mapped_as_page)
Nick Nunley123e9f12010-04-27 13:09:44 +0000402 dma_unmap_page(&adapter->pdev->dev,
Alexander Duycka7d5ca402009-12-02 16:47:37 +0000403 buffer_info->dma,
404 buffer_info->length,
Nick Nunley123e9f12010-04-27 13:09:44 +0000405 DMA_TO_DEVICE);
Alexander Duycka7d5ca402009-12-02 16:47:37 +0000406 else
Nick Nunley123e9f12010-04-27 13:09:44 +0000407 dma_unmap_single(&adapter->pdev->dev,
Alexander Duycka7d5ca402009-12-02 16:47:37 +0000408 buffer_info->dma,
409 buffer_info->length,
Nick Nunley123e9f12010-04-27 13:09:44 +0000410 DMA_TO_DEVICE);
Alexander Duycka7d5ca402009-12-02 16:47:37 +0000411 buffer_info->dma = 0;
412 }
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000413 if (buffer_info->skb) {
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000414 dev_kfree_skb_any(buffer_info->skb);
415 buffer_info->skb = NULL;
416 }
417 buffer_info->time_stamp = 0;
418}
419
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000420/**
421 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
422 * @adapter: board private structure
423 *
424 * Return 0 on success, negative on failure
425 **/
426int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
427 struct igbvf_ring *tx_ring)
428{
429 struct pci_dev *pdev = adapter->pdev;
430 int size;
431
432 size = sizeof(struct igbvf_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +0000433 tx_ring->buffer_info = vzalloc(size);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000434 if (!tx_ring->buffer_info)
435 goto err;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000436
437 /* round up to nearest 4K */
438 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
439 tx_ring->size = ALIGN(tx_ring->size, 4096);
440
Nick Nunley123e9f12010-04-27 13:09:44 +0000441 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
442 &tx_ring->dma, GFP_KERNEL);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000443
444 if (!tx_ring->desc)
445 goto err;
446
447 tx_ring->adapter = adapter;
448 tx_ring->next_to_use = 0;
449 tx_ring->next_to_clean = 0;
450
451 return 0;
452err:
453 vfree(tx_ring->buffer_info);
454 dev_err(&adapter->pdev->dev,
455 "Unable to allocate memory for the transmit descriptor ring\n");
456 return -ENOMEM;
457}
458
459/**
460 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
461 * @adapter: board private structure
462 *
463 * Returns 0 on success, negative on failure
464 **/
465int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
466 struct igbvf_ring *rx_ring)
467{
468 struct pci_dev *pdev = adapter->pdev;
469 int size, desc_len;
470
471 size = sizeof(struct igbvf_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +0000472 rx_ring->buffer_info = vzalloc(size);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000473 if (!rx_ring->buffer_info)
474 goto err;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000475
476 desc_len = sizeof(union e1000_adv_rx_desc);
477
478 /* Round up to nearest 4K */
479 rx_ring->size = rx_ring->count * desc_len;
480 rx_ring->size = ALIGN(rx_ring->size, 4096);
481
Nick Nunley123e9f12010-04-27 13:09:44 +0000482 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
483 &rx_ring->dma, GFP_KERNEL);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000484
485 if (!rx_ring->desc)
486 goto err;
487
488 rx_ring->next_to_clean = 0;
489 rx_ring->next_to_use = 0;
490
491 rx_ring->adapter = adapter;
492
493 return 0;
494
495err:
496 vfree(rx_ring->buffer_info);
497 rx_ring->buffer_info = NULL;
498 dev_err(&adapter->pdev->dev,
499 "Unable to allocate memory for the receive descriptor ring\n");
500 return -ENOMEM;
501}
502
503/**
504 * igbvf_clean_tx_ring - Free Tx Buffers
505 * @tx_ring: ring to be cleaned
506 **/
507static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
508{
509 struct igbvf_adapter *adapter = tx_ring->adapter;
510 struct igbvf_buffer *buffer_info;
511 unsigned long size;
512 unsigned int i;
513
514 if (!tx_ring->buffer_info)
515 return;
516
517 /* Free all the Tx ring sk_buffs */
518 for (i = 0; i < tx_ring->count; i++) {
519 buffer_info = &tx_ring->buffer_info[i];
520 igbvf_put_txbuf(adapter, buffer_info);
521 }
522
523 size = sizeof(struct igbvf_buffer) * tx_ring->count;
524 memset(tx_ring->buffer_info, 0, size);
525
526 /* Zero out the descriptor ring */
527 memset(tx_ring->desc, 0, tx_ring->size);
528
529 tx_ring->next_to_use = 0;
530 tx_ring->next_to_clean = 0;
531
532 writel(0, adapter->hw.hw_addr + tx_ring->head);
533 writel(0, adapter->hw.hw_addr + tx_ring->tail);
534}
535
536/**
537 * igbvf_free_tx_resources - Free Tx Resources per Queue
538 * @tx_ring: ring to free resources from
539 *
540 * Free all transmit software resources
541 **/
542void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
543{
544 struct pci_dev *pdev = tx_ring->adapter->pdev;
545
546 igbvf_clean_tx_ring(tx_ring);
547
548 vfree(tx_ring->buffer_info);
549 tx_ring->buffer_info = NULL;
550
Nick Nunley123e9f12010-04-27 13:09:44 +0000551 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
552 tx_ring->dma);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000553
554 tx_ring->desc = NULL;
555}
556
557/**
558 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
559 * @adapter: board private structure
560 **/
561static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
562{
563 struct igbvf_adapter *adapter = rx_ring->adapter;
564 struct igbvf_buffer *buffer_info;
565 struct pci_dev *pdev = adapter->pdev;
566 unsigned long size;
567 unsigned int i;
568
569 if (!rx_ring->buffer_info)
570 return;
571
572 /* Free all the Rx ring sk_buffs */
573 for (i = 0; i < rx_ring->count; i++) {
574 buffer_info = &rx_ring->buffer_info[i];
575 if (buffer_info->dma) {
576 if (adapter->rx_ps_hdr_size){
Nick Nunley123e9f12010-04-27 13:09:44 +0000577 dma_unmap_single(&pdev->dev, buffer_info->dma,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000578 adapter->rx_ps_hdr_size,
Nick Nunley123e9f12010-04-27 13:09:44 +0000579 DMA_FROM_DEVICE);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000580 } else {
Nick Nunley123e9f12010-04-27 13:09:44 +0000581 dma_unmap_single(&pdev->dev, buffer_info->dma,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000582 adapter->rx_buffer_len,
Nick Nunley123e9f12010-04-27 13:09:44 +0000583 DMA_FROM_DEVICE);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000584 }
585 buffer_info->dma = 0;
586 }
587
588 if (buffer_info->skb) {
589 dev_kfree_skb(buffer_info->skb);
590 buffer_info->skb = NULL;
591 }
592
593 if (buffer_info->page) {
594 if (buffer_info->page_dma)
Nick Nunley123e9f12010-04-27 13:09:44 +0000595 dma_unmap_page(&pdev->dev,
596 buffer_info->page_dma,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000597 PAGE_SIZE / 2,
Nick Nunley123e9f12010-04-27 13:09:44 +0000598 DMA_FROM_DEVICE);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000599 put_page(buffer_info->page);
600 buffer_info->page = NULL;
601 buffer_info->page_dma = 0;
602 buffer_info->page_offset = 0;
603 }
604 }
605
606 size = sizeof(struct igbvf_buffer) * rx_ring->count;
607 memset(rx_ring->buffer_info, 0, size);
608
609 /* Zero out the descriptor ring */
610 memset(rx_ring->desc, 0, rx_ring->size);
611
612 rx_ring->next_to_clean = 0;
613 rx_ring->next_to_use = 0;
614
615 writel(0, adapter->hw.hw_addr + rx_ring->head);
616 writel(0, adapter->hw.hw_addr + rx_ring->tail);
617}
618
619/**
620 * igbvf_free_rx_resources - Free Rx Resources
621 * @rx_ring: ring to clean the resources from
622 *
623 * Free all receive software resources
624 **/
625
626void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
627{
628 struct pci_dev *pdev = rx_ring->adapter->pdev;
629
630 igbvf_clean_rx_ring(rx_ring);
631
632 vfree(rx_ring->buffer_info);
633 rx_ring->buffer_info = NULL;
634
635 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
636 rx_ring->dma);
637 rx_ring->desc = NULL;
638}
639
640/**
641 * igbvf_update_itr - update the dynamic ITR value based on statistics
642 * @adapter: pointer to adapter
643 * @itr_setting: current adapter->itr
644 * @packets: the number of packets during this measurement interval
645 * @bytes: the number of bytes during this measurement interval
646 *
647 * Stores a new ITR value based on packets and byte
648 * counts during the last interrupt. The advantage of per interrupt
649 * computation is faster updates and more accurate ITR for the current
650 * traffic pattern. Constants in this function were computed
651 * based on theoretical maximum wire speed and thresholds were set based
652 * on testing data as well as attempting to minimize response time
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000653 * while increasing bulk throughput.
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000654 **/
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000655static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
656 enum latency_range itr_setting,
657 int packets, int bytes)
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000658{
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000659 enum latency_range retval = itr_setting;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000660
661 if (packets == 0)
662 goto update_itr_done;
663
664 switch (itr_setting) {
665 case lowest_latency:
666 /* handle TSO and jumbo frames */
667 if (bytes/packets > 8000)
668 retval = bulk_latency;
669 else if ((packets < 5) && (bytes > 512))
670 retval = low_latency;
671 break;
672 case low_latency: /* 50 usec aka 20000 ints/s */
673 if (bytes > 10000) {
674 /* this if handles the TSO accounting */
675 if (bytes/packets > 8000)
676 retval = bulk_latency;
677 else if ((packets < 10) || ((bytes/packets) > 1200))
678 retval = bulk_latency;
679 else if ((packets > 35))
680 retval = lowest_latency;
681 } else if (bytes/packets > 2000) {
682 retval = bulk_latency;
683 } else if (packets <= 2 && bytes < 512) {
684 retval = lowest_latency;
685 }
686 break;
687 case bulk_latency: /* 250 usec aka 4000 ints/s */
688 if (bytes > 25000) {
689 if (packets > 35)
690 retval = low_latency;
691 } else if (bytes < 6000) {
692 retval = low_latency;
693 }
694 break;
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000695 default:
696 break;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000697 }
698
699update_itr_done:
700 return retval;
701}
702
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000703static int igbvf_range_to_itr(enum latency_range current_range)
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000704{
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000705 int new_itr;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000706
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000707 switch (current_range) {
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000708 /* counts and packets in update_itr are dependent on these numbers */
709 case lowest_latency:
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000710 new_itr = IGBVF_70K_ITR;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000711 break;
712 case low_latency:
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000713 new_itr = IGBVF_20K_ITR;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000714 break;
715 case bulk_latency:
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000716 new_itr = IGBVF_4K_ITR;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000717 break;
718 default:
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000719 new_itr = IGBVF_START_ITR;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000720 break;
721 }
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000722 return new_itr;
723}
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000724
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000725static void igbvf_set_itr(struct igbvf_adapter *adapter)
726{
727 u32 new_itr;
728
729 adapter->tx_ring->itr_range =
730 igbvf_update_itr(adapter,
731 adapter->tx_ring->itr_val,
732 adapter->total_tx_packets,
733 adapter->total_tx_bytes);
734
735 /* conservative mode (itr 3) eliminates the lowest_latency setting */
736 if (adapter->requested_itr == 3 &&
737 adapter->tx_ring->itr_range == lowest_latency)
738 adapter->tx_ring->itr_range = low_latency;
739
740 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
741
742
743 if (new_itr != adapter->tx_ring->itr_val) {
744 u32 current_itr = adapter->tx_ring->itr_val;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000745 /*
746 * this attempts to bias the interrupt rate towards Bulk
747 * by adding intermediate steps when interrupt rate is
748 * increasing
749 */
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000750 new_itr = new_itr > current_itr ?
751 min(current_itr + (new_itr >> 2), new_itr) :
752 new_itr;
753 adapter->tx_ring->itr_val = new_itr;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000754
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000755 adapter->tx_ring->set_itr = 1;
756 }
757
758 adapter->rx_ring->itr_range =
759 igbvf_update_itr(adapter, adapter->rx_ring->itr_val,
760 adapter->total_rx_packets,
761 adapter->total_rx_bytes);
762 if (adapter->requested_itr == 3 &&
763 adapter->rx_ring->itr_range == lowest_latency)
764 adapter->rx_ring->itr_range = low_latency;
765
766 new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range);
767
768 if (new_itr != adapter->rx_ring->itr_val) {
769 u32 current_itr = adapter->rx_ring->itr_val;
770 new_itr = new_itr > current_itr ?
771 min(current_itr + (new_itr >> 2), new_itr) :
772 new_itr;
773 adapter->rx_ring->itr_val = new_itr;
774
775 adapter->rx_ring->set_itr = 1;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000776 }
777}
778
779/**
780 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
781 * @adapter: board private structure
Ben Hutchings49ce9c22012-07-10 10:56:00 +0000782 *
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000783 * returns true if ring is completely cleaned
784 **/
785static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
786{
787 struct igbvf_adapter *adapter = tx_ring->adapter;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000788 struct net_device *netdev = adapter->netdev;
789 struct igbvf_buffer *buffer_info;
790 struct sk_buff *skb;
791 union e1000_adv_tx_desc *tx_desc, *eop_desc;
792 unsigned int total_bytes = 0, total_packets = 0;
793 unsigned int i, eop, count = 0;
794 bool cleaned = false;
795
796 i = tx_ring->next_to_clean;
797 eop = tx_ring->buffer_info[i].next_to_watch;
798 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
799
800 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
801 (count < tx_ring->count)) {
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +0000802 rmb(); /* read buffer_info after eop_desc status */
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000803 for (cleaned = false; !cleaned; count++) {
804 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
805 buffer_info = &tx_ring->buffer_info[i];
806 cleaned = (i == eop);
807 skb = buffer_info->skb;
808
809 if (skb) {
810 unsigned int segs, bytecount;
811
812 /* gso_segs is currently only valid for tcp */
813 segs = skb_shinfo(skb)->gso_segs ?: 1;
814 /* multiply data chunks by size of headers */
815 bytecount = ((segs - 1) * skb_headlen(skb)) +
816 skb->len;
817 total_packets += segs;
818 total_bytes += bytecount;
819 }
820
821 igbvf_put_txbuf(adapter, buffer_info);
822 tx_desc->wb.status = 0;
823
824 i++;
825 if (i == tx_ring->count)
826 i = 0;
827 }
828 eop = tx_ring->buffer_info[i].next_to_watch;
829 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
830 }
831
832 tx_ring->next_to_clean = i;
833
834 if (unlikely(count &&
835 netif_carrier_ok(netdev) &&
836 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
837 /* Make sure that anybody stopping the queue after this
838 * sees the new next_to_clean.
839 */
840 smp_mb();
841 if (netif_queue_stopped(netdev) &&
842 !(test_bit(__IGBVF_DOWN, &adapter->state))) {
843 netif_wake_queue(netdev);
844 ++adapter->restart_queue;
845 }
846 }
847
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000848 adapter->net_stats.tx_bytes += total_bytes;
849 adapter->net_stats.tx_packets += total_packets;
Eric Dumazet807540b2010-09-23 05:40:09 +0000850 return count < tx_ring->count;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000851}
852
853static irqreturn_t igbvf_msix_other(int irq, void *data)
854{
855 struct net_device *netdev = data;
856 struct igbvf_adapter *adapter = netdev_priv(netdev);
857 struct e1000_hw *hw = &adapter->hw;
858
859 adapter->int_counter1++;
860
861 netif_carrier_off(netdev);
862 hw->mac.get_link_status = 1;
863 if (!test_bit(__IGBVF_DOWN, &adapter->state))
864 mod_timer(&adapter->watchdog_timer, jiffies + 1);
865
866 ew32(EIMS, adapter->eims_other);
867
868 return IRQ_HANDLED;
869}
870
871static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
872{
873 struct net_device *netdev = data;
874 struct igbvf_adapter *adapter = netdev_priv(netdev);
875 struct e1000_hw *hw = &adapter->hw;
876 struct igbvf_ring *tx_ring = adapter->tx_ring;
877
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000878 if (tx_ring->set_itr) {
879 writel(tx_ring->itr_val,
880 adapter->hw.hw_addr + tx_ring->itr_register);
881 adapter->tx_ring->set_itr = 0;
882 }
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000883
884 adapter->total_tx_bytes = 0;
885 adapter->total_tx_packets = 0;
886
887 /* auto mask will automatically reenable the interrupt when we write
888 * EICS */
889 if (!igbvf_clean_tx_irq(tx_ring))
890 /* Ring was not completely cleaned, so fire another interrupt */
891 ew32(EICS, tx_ring->eims_value);
892 else
893 ew32(EIMS, tx_ring->eims_value);
894
895 return IRQ_HANDLED;
896}
897
898static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
899{
900 struct net_device *netdev = data;
901 struct igbvf_adapter *adapter = netdev_priv(netdev);
902
903 adapter->int_counter0++;
904
905 /* Write the ITR value calculated at the end of the
906 * previous interrupt.
907 */
908 if (adapter->rx_ring->set_itr) {
909 writel(adapter->rx_ring->itr_val,
910 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
911 adapter->rx_ring->set_itr = 0;
912 }
913
914 if (napi_schedule_prep(&adapter->rx_ring->napi)) {
915 adapter->total_rx_bytes = 0;
916 adapter->total_rx_packets = 0;
917 __napi_schedule(&adapter->rx_ring->napi);
918 }
919
920 return IRQ_HANDLED;
921}
922
923#define IGBVF_NO_QUEUE -1
924
925static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
926 int tx_queue, int msix_vector)
927{
928 struct e1000_hw *hw = &adapter->hw;
929 u32 ivar, index;
930
931 /* 82576 uses a table-based method for assigning vectors.
932 Each queue has a single entry in the table to which we write
933 a vector number along with a "valid" bit. Sadly, the layout
934 of the table is somewhat counterintuitive. */
935 if (rx_queue > IGBVF_NO_QUEUE) {
936 index = (rx_queue >> 1);
937 ivar = array_er32(IVAR0, index);
938 if (rx_queue & 0x1) {
939 /* vector goes into third byte of register */
940 ivar = ivar & 0xFF00FFFF;
941 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
942 } else {
943 /* vector goes into low byte of register */
944 ivar = ivar & 0xFFFFFF00;
945 ivar |= msix_vector | E1000_IVAR_VALID;
946 }
947 adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
948 array_ew32(IVAR0, index, ivar);
949 }
950 if (tx_queue > IGBVF_NO_QUEUE) {
951 index = (tx_queue >> 1);
952 ivar = array_er32(IVAR0, index);
953 if (tx_queue & 0x1) {
954 /* vector goes into high byte of register */
955 ivar = ivar & 0x00FFFFFF;
956 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
957 } else {
958 /* vector goes into second byte of register */
959 ivar = ivar & 0xFFFF00FF;
960 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
961 }
962 adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
963 array_ew32(IVAR0, index, ivar);
964 }
965}
966
967/**
968 * igbvf_configure_msix - Configure MSI-X hardware
969 *
970 * igbvf_configure_msix sets up the hardware to properly
971 * generate MSI-X interrupts.
972 **/
973static void igbvf_configure_msix(struct igbvf_adapter *adapter)
974{
975 u32 tmp;
976 struct e1000_hw *hw = &adapter->hw;
977 struct igbvf_ring *tx_ring = adapter->tx_ring;
978 struct igbvf_ring *rx_ring = adapter->rx_ring;
979 int vector = 0;
980
981 adapter->eims_enable_mask = 0;
982
983 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
984 adapter->eims_enable_mask |= tx_ring->eims_value;
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000985 writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000986 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
987 adapter->eims_enable_mask |= rx_ring->eims_value;
Mitch A Williamsab50a2a2012-01-14 08:10:50 +0000988 writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +0000989
990 /* set vector for other causes, i.e. link changes */
991
992 tmp = (vector++ | E1000_IVAR_VALID);
993
994 ew32(IVAR_MISC, tmp);
995
996 adapter->eims_enable_mask = (1 << (vector)) - 1;
997 adapter->eims_other = 1 << (vector - 1);
998 e1e_flush();
999}
1000
Alexander Duyck2d165772009-04-09 22:49:20 +00001001static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001002{
1003 if (adapter->msix_entries) {
1004 pci_disable_msix(adapter->pdev);
1005 kfree(adapter->msix_entries);
1006 adapter->msix_entries = NULL;
1007 }
1008}
1009
1010/**
1011 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1012 *
1013 * Attempt to configure interrupts using the best available
1014 * capabilities of the hardware and kernel.
1015 **/
Alexander Duyck2d165772009-04-09 22:49:20 +00001016static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001017{
1018 int err = -ENOMEM;
1019 int i;
1020
1021 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
1022 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
1023 GFP_KERNEL);
1024 if (adapter->msix_entries) {
1025 for (i = 0; i < 3; i++)
1026 adapter->msix_entries[i].entry = i;
1027
1028 err = pci_enable_msix(adapter->pdev,
1029 adapter->msix_entries, 3);
1030 }
1031
1032 if (err) {
1033 /* MSI-X failed */
1034 dev_err(&adapter->pdev->dev,
1035 "Failed to initialize MSI-X interrupts.\n");
1036 igbvf_reset_interrupt_capability(adapter);
1037 }
1038}
1039
1040/**
1041 * igbvf_request_msix - Initialize MSI-X interrupts
1042 *
1043 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1044 * kernel.
1045 **/
1046static int igbvf_request_msix(struct igbvf_adapter *adapter)
1047{
1048 struct net_device *netdev = adapter->netdev;
1049 int err = 0, vector = 0;
1050
1051 if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1052 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1053 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1054 } else {
1055 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1056 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1057 }
1058
1059 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -08001060 igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001061 netdev);
1062 if (err)
1063 goto out;
1064
1065 adapter->tx_ring->itr_register = E1000_EITR(vector);
Mitch A Williamsab50a2a2012-01-14 08:10:50 +00001066 adapter->tx_ring->itr_val = adapter->current_itr;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001067 vector++;
1068
1069 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -08001070 igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001071 netdev);
1072 if (err)
1073 goto out;
1074
1075 adapter->rx_ring->itr_register = E1000_EITR(vector);
Mitch A Williamsab50a2a2012-01-14 08:10:50 +00001076 adapter->rx_ring->itr_val = adapter->current_itr;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001077 vector++;
1078
1079 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -08001080 igbvf_msix_other, 0, netdev->name, netdev);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001081 if (err)
1082 goto out;
1083
1084 igbvf_configure_msix(adapter);
1085 return 0;
1086out:
1087 return err;
1088}
1089
1090/**
1091 * igbvf_alloc_queues - Allocate memory for all rings
1092 * @adapter: board private structure to initialize
1093 **/
1094static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
1095{
1096 struct net_device *netdev = adapter->netdev;
1097
1098 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1099 if (!adapter->tx_ring)
1100 return -ENOMEM;
1101
1102 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1103 if (!adapter->rx_ring) {
1104 kfree(adapter->tx_ring);
1105 return -ENOMEM;
1106 }
1107
1108 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1109
1110 return 0;
1111}
1112
1113/**
1114 * igbvf_request_irq - initialize interrupts
1115 *
1116 * Attempts to configure interrupts using the best available
1117 * capabilities of the hardware and kernel.
1118 **/
1119static int igbvf_request_irq(struct igbvf_adapter *adapter)
1120{
1121 int err = -1;
1122
1123 /* igbvf supports msi-x only */
1124 if (adapter->msix_entries)
1125 err = igbvf_request_msix(adapter);
1126
1127 if (!err)
1128 return err;
1129
1130 dev_err(&adapter->pdev->dev,
1131 "Unable to allocate interrupt, Error: %d\n", err);
1132
1133 return err;
1134}
1135
1136static void igbvf_free_irq(struct igbvf_adapter *adapter)
1137{
1138 struct net_device *netdev = adapter->netdev;
1139 int vector;
1140
1141 if (adapter->msix_entries) {
1142 for (vector = 0; vector < 3; vector++)
1143 free_irq(adapter->msix_entries[vector].vector, netdev);
1144 }
1145}
1146
1147/**
1148 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1149 **/
1150static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1151{
1152 struct e1000_hw *hw = &adapter->hw;
1153
1154 ew32(EIMC, ~0);
1155
1156 if (adapter->msix_entries)
1157 ew32(EIAC, 0);
1158}
1159
1160/**
1161 * igbvf_irq_enable - Enable default interrupt generation settings
1162 **/
1163static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1164{
1165 struct e1000_hw *hw = &adapter->hw;
1166
1167 ew32(EIAC, adapter->eims_enable_mask);
1168 ew32(EIAM, adapter->eims_enable_mask);
1169 ew32(EIMS, adapter->eims_enable_mask);
1170}
1171
1172/**
1173 * igbvf_poll - NAPI Rx polling callback
1174 * @napi: struct associated with this polling callback
1175 * @budget: amount of packets driver is allowed to process this poll
1176 **/
1177static int igbvf_poll(struct napi_struct *napi, int budget)
1178{
1179 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1180 struct igbvf_adapter *adapter = rx_ring->adapter;
1181 struct e1000_hw *hw = &adapter->hw;
1182 int work_done = 0;
1183
1184 igbvf_clean_rx_irq(adapter, &work_done, budget);
1185
1186 /* If not enough Rx work done, exit the polling mode */
1187 if (work_done < budget) {
1188 napi_complete(napi);
1189
Mitch A Williamsab50a2a2012-01-14 08:10:50 +00001190 if (adapter->requested_itr & 3)
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001191 igbvf_set_itr(adapter);
1192
1193 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1194 ew32(EIMS, adapter->rx_ring->eims_value);
1195 }
1196
1197 return work_done;
1198}
1199
1200/**
1201 * igbvf_set_rlpml - set receive large packet maximum length
1202 * @adapter: board private structure
1203 *
1204 * Configure the maximum size of packets that will be received
1205 */
1206static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1207{
Jiri Pirkoa0f1d602011-07-21 06:30:00 +00001208 int max_frame_size;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001209 struct e1000_hw *hw = &adapter->hw;
1210
Jiri Pirkoa0f1d602011-07-21 06:30:00 +00001211 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001212 e1000_rlpml_set_vf(hw, max_frame_size);
1213}
1214
Jiri Pirko8e586132011-12-08 19:52:37 -05001215static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001216{
1217 struct igbvf_adapter *adapter = netdev_priv(netdev);
1218 struct e1000_hw *hw = &adapter->hw;
1219
Jiri Pirko8e586132011-12-08 19:52:37 -05001220 if (hw->mac.ops.set_vfta(hw, vid, true)) {
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001221 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
Jiri Pirko8e586132011-12-08 19:52:37 -05001222 return -EINVAL;
1223 }
1224 set_bit(vid, adapter->active_vlans);
1225 return 0;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001226}
1227
Jiri Pirko8e586132011-12-08 19:52:37 -05001228static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001229{
1230 struct igbvf_adapter *adapter = netdev_priv(netdev);
1231 struct e1000_hw *hw = &adapter->hw;
1232
Jiri Pirko8e586132011-12-08 19:52:37 -05001233 if (hw->mac.ops.set_vfta(hw, vid, false)) {
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001234 dev_err(&adapter->pdev->dev,
1235 "Failed to remove vlan id %d\n", vid);
Jiri Pirko8e586132011-12-08 19:52:37 -05001236 return -EINVAL;
1237 }
1238 clear_bit(vid, adapter->active_vlans);
1239 return 0;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001240}
1241
1242static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1243{
1244 u16 vid;
1245
Jiri Pirkoa0f1d602011-07-21 06:30:00 +00001246 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001247 igbvf_vlan_rx_add_vid(adapter->netdev, vid);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001248}
1249
1250/**
1251 * igbvf_configure_tx - Configure Transmit Unit after Reset
1252 * @adapter: board private structure
1253 *
1254 * Configure the Tx unit of the MAC after a reset.
1255 **/
1256static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1257{
1258 struct e1000_hw *hw = &adapter->hw;
1259 struct igbvf_ring *tx_ring = adapter->tx_ring;
1260 u64 tdba;
1261 u32 txdctl, dca_txctrl;
1262
1263 /* disable transmits */
1264 txdctl = er32(TXDCTL(0));
1265 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001266 e1e_flush();
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001267 msleep(10);
1268
1269 /* Setup the HW Tx Head and Tail descriptor pointers */
1270 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1271 tdba = tx_ring->dma;
Andrew Morton8e20ce92009-06-18 16:49:17 -07001272 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001273 ew32(TDBAH(0), (tdba >> 32));
1274 ew32(TDH(0), 0);
1275 ew32(TDT(0), 0);
1276 tx_ring->head = E1000_TDH(0);
1277 tx_ring->tail = E1000_TDT(0);
1278
1279 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1280 * MUST be delivered in order or it will completely screw up
1281 * our bookeeping.
1282 */
1283 dca_txctrl = er32(DCA_TXCTRL(0));
1284 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1285 ew32(DCA_TXCTRL(0), dca_txctrl);
1286
1287 /* enable transmits */
1288 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1289 ew32(TXDCTL(0), txdctl);
1290
1291 /* Setup Transmit Descriptor Settings for eop descriptor */
1292 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1293
1294 /* enable Report Status bit */
1295 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001296}
1297
1298/**
1299 * igbvf_setup_srrctl - configure the receive control registers
1300 * @adapter: Board private structure
1301 **/
1302static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1303{
1304 struct e1000_hw *hw = &adapter->hw;
1305 u32 srrctl = 0;
1306
1307 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1308 E1000_SRRCTL_BSIZEHDR_MASK |
1309 E1000_SRRCTL_BSIZEPKT_MASK);
1310
1311 /* Enable queue drop to avoid head of line blocking */
1312 srrctl |= E1000_SRRCTL_DROP_EN;
1313
1314 /* Setup buffer sizes */
1315 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1316 E1000_SRRCTL_BSIZEPKT_SHIFT;
1317
1318 if (adapter->rx_buffer_len < 2048) {
1319 adapter->rx_ps_hdr_size = 0;
1320 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1321 } else {
1322 adapter->rx_ps_hdr_size = 128;
1323 srrctl |= adapter->rx_ps_hdr_size <<
1324 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1325 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1326 }
1327
1328 ew32(SRRCTL(0), srrctl);
1329}
1330
1331/**
1332 * igbvf_configure_rx - Configure Receive Unit after Reset
1333 * @adapter: board private structure
1334 *
1335 * Configure the Rx unit of the MAC after a reset.
1336 **/
1337static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1338{
1339 struct e1000_hw *hw = &adapter->hw;
1340 struct igbvf_ring *rx_ring = adapter->rx_ring;
1341 u64 rdba;
1342 u32 rdlen, rxdctl;
1343
1344 /* disable receives */
1345 rxdctl = er32(RXDCTL(0));
1346 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001347 e1e_flush();
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001348 msleep(10);
1349
1350 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
1351
1352 /*
1353 * Setup the HW Rx Head and Tail Descriptor Pointers and
1354 * the Base and Length of the Rx Descriptor Ring
1355 */
1356 rdba = rx_ring->dma;
Andrew Morton8e20ce92009-06-18 16:49:17 -07001357 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001358 ew32(RDBAH(0), (rdba >> 32));
1359 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1360 rx_ring->head = E1000_RDH(0);
1361 rx_ring->tail = E1000_RDT(0);
1362 ew32(RDH(0), 0);
1363 ew32(RDT(0), 0);
1364
1365 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1366 rxdctl &= 0xFFF00000;
1367 rxdctl |= IGBVF_RX_PTHRESH;
1368 rxdctl |= IGBVF_RX_HTHRESH << 8;
1369 rxdctl |= IGBVF_RX_WTHRESH << 16;
1370
1371 igbvf_set_rlpml(adapter);
1372
1373 /* enable receives */
1374 ew32(RXDCTL(0), rxdctl);
1375}
1376
1377/**
1378 * igbvf_set_multi - Multicast and Promiscuous mode set
1379 * @netdev: network interface device structure
1380 *
1381 * The set_multi entry point is called whenever the multicast address
1382 * list or the network interface flags are updated. This routine is
1383 * responsible for configuring the hardware for proper multicast,
1384 * promiscuous mode, and all-multi behavior.
1385 **/
1386static void igbvf_set_multi(struct net_device *netdev)
1387{
1388 struct igbvf_adapter *adapter = netdev_priv(netdev);
1389 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00001390 struct netdev_hw_addr *ha;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001391 u8 *mta_list = NULL;
1392 int i;
1393
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001394 if (!netdev_mc_empty(netdev)) {
1395 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001396 if (!mta_list) {
1397 dev_err(&adapter->pdev->dev,
1398 "failed to allocate multicast filter list\n");
1399 return;
1400 }
1401 }
1402
1403 /* prepare a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00001404 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00001405 netdev_for_each_mc_addr(ha, netdev)
1406 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001407
1408 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1409 kfree(mta_list);
1410}
1411
1412/**
1413 * igbvf_configure - configure the hardware for Rx and Tx
1414 * @adapter: private board structure
1415 **/
1416static void igbvf_configure(struct igbvf_adapter *adapter)
1417{
1418 igbvf_set_multi(adapter->netdev);
1419
1420 igbvf_restore_vlan(adapter);
1421
1422 igbvf_configure_tx(adapter);
1423 igbvf_setup_srrctl(adapter);
1424 igbvf_configure_rx(adapter);
1425 igbvf_alloc_rx_buffers(adapter->rx_ring,
1426 igbvf_desc_unused(adapter->rx_ring));
1427}
1428
1429/* igbvf_reset - bring the hardware into a known good state
1430 *
1431 * This function boots the hardware and enables some settings that
1432 * require a configuration cycle of the hardware - those cannot be
1433 * set/changed during runtime. After reset the device needs to be
1434 * properly configured for Rx, Tx etc.
1435 */
Alexander Duyck2d165772009-04-09 22:49:20 +00001436static void igbvf_reset(struct igbvf_adapter *adapter)
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001437{
1438 struct e1000_mac_info *mac = &adapter->hw.mac;
1439 struct net_device *netdev = adapter->netdev;
1440 struct e1000_hw *hw = &adapter->hw;
1441
1442 /* Allow time for pending master requests to run */
1443 if (mac->ops.reset_hw(hw))
1444 dev_err(&adapter->pdev->dev, "PF still resetting\n");
1445
1446 mac->ops.init_hw(hw);
1447
1448 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1449 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1450 netdev->addr_len);
1451 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1452 netdev->addr_len);
1453 }
Alexander Duyck72279092009-12-11 22:58:14 -08001454
1455 adapter->last_reset = jiffies;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001456}
1457
1458int igbvf_up(struct igbvf_adapter *adapter)
1459{
1460 struct e1000_hw *hw = &adapter->hw;
1461
1462 /* hardware has been reset, we need to reload some things */
1463 igbvf_configure(adapter);
1464
1465 clear_bit(__IGBVF_DOWN, &adapter->state);
1466
1467 napi_enable(&adapter->rx_ring->napi);
1468 if (adapter->msix_entries)
1469 igbvf_configure_msix(adapter);
1470
1471 /* Clear any pending interrupts. */
1472 er32(EICR);
1473 igbvf_irq_enable(adapter);
1474
1475 /* start the watchdog */
1476 hw->mac.get_link_status = 1;
1477 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1478
1479
1480 return 0;
1481}
1482
1483void igbvf_down(struct igbvf_adapter *adapter)
1484{
1485 struct net_device *netdev = adapter->netdev;
1486 struct e1000_hw *hw = &adapter->hw;
1487 u32 rxdctl, txdctl;
1488
1489 /*
1490 * signal that we're down so the interrupt handler does not
1491 * reschedule our watchdog timer
1492 */
1493 set_bit(__IGBVF_DOWN, &adapter->state);
1494
1495 /* disable receives in the hardware */
1496 rxdctl = er32(RXDCTL(0));
1497 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1498
1499 netif_stop_queue(netdev);
1500
1501 /* disable transmits in the hardware */
1502 txdctl = er32(TXDCTL(0));
1503 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1504
1505 /* flush both disables and wait for them to finish */
1506 e1e_flush();
1507 msleep(10);
1508
1509 napi_disable(&adapter->rx_ring->napi);
1510
1511 igbvf_irq_disable(adapter);
1512
1513 del_timer_sync(&adapter->watchdog_timer);
1514
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001515 netif_carrier_off(netdev);
1516
1517 /* record the stats before reset*/
1518 igbvf_update_stats(adapter);
1519
1520 adapter->link_speed = 0;
1521 adapter->link_duplex = 0;
1522
1523 igbvf_reset(adapter);
1524 igbvf_clean_tx_ring(adapter->tx_ring);
1525 igbvf_clean_rx_ring(adapter->rx_ring);
1526}
1527
1528void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1529{
1530 might_sleep();
1531 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1532 msleep(1);
1533 igbvf_down(adapter);
1534 igbvf_up(adapter);
1535 clear_bit(__IGBVF_RESETTING, &adapter->state);
1536}
1537
1538/**
1539 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1540 * @adapter: board private structure to initialize
1541 *
1542 * igbvf_sw_init initializes the Adapter private data structure.
1543 * Fields are initialized based on PCI device information and
1544 * OS network device settings (MTU size).
1545 **/
1546static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
1547{
1548 struct net_device *netdev = adapter->netdev;
1549 s32 rc;
1550
1551 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
1552 adapter->rx_ps_hdr_size = 0;
1553 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1554 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1555
1556 adapter->tx_int_delay = 8;
1557 adapter->tx_abs_int_delay = 32;
1558 adapter->rx_int_delay = 0;
1559 adapter->rx_abs_int_delay = 8;
Mitch A Williamsab50a2a2012-01-14 08:10:50 +00001560 adapter->requested_itr = 3;
1561 adapter->current_itr = IGBVF_START_ITR;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001562
1563 /* Set various function pointers */
1564 adapter->ei->init_ops(&adapter->hw);
1565
1566 rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1567 if (rc)
1568 return rc;
1569
1570 rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1571 if (rc)
1572 return rc;
1573
1574 igbvf_set_interrupt_capability(adapter);
1575
1576 if (igbvf_alloc_queues(adapter))
1577 return -ENOMEM;
1578
1579 spin_lock_init(&adapter->tx_queue_lock);
1580
1581 /* Explicitly disable IRQ since the NIC can be in any state. */
1582 igbvf_irq_disable(adapter);
1583
1584 spin_lock_init(&adapter->stats_lock);
1585
1586 set_bit(__IGBVF_DOWN, &adapter->state);
1587 return 0;
1588}
1589
1590static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1591{
1592 struct e1000_hw *hw = &adapter->hw;
1593
1594 adapter->stats.last_gprc = er32(VFGPRC);
1595 adapter->stats.last_gorc = er32(VFGORC);
1596 adapter->stats.last_gptc = er32(VFGPTC);
1597 adapter->stats.last_gotc = er32(VFGOTC);
1598 adapter->stats.last_mprc = er32(VFMPRC);
1599 adapter->stats.last_gotlbc = er32(VFGOTLBC);
1600 adapter->stats.last_gptlbc = er32(VFGPTLBC);
1601 adapter->stats.last_gorlbc = er32(VFGORLBC);
1602 adapter->stats.last_gprlbc = er32(VFGPRLBC);
1603
1604 adapter->stats.base_gprc = er32(VFGPRC);
1605 adapter->stats.base_gorc = er32(VFGORC);
1606 adapter->stats.base_gptc = er32(VFGPTC);
1607 adapter->stats.base_gotc = er32(VFGOTC);
1608 adapter->stats.base_mprc = er32(VFMPRC);
1609 adapter->stats.base_gotlbc = er32(VFGOTLBC);
1610 adapter->stats.base_gptlbc = er32(VFGPTLBC);
1611 adapter->stats.base_gorlbc = er32(VFGORLBC);
1612 adapter->stats.base_gprlbc = er32(VFGPRLBC);
1613}
1614
1615/**
1616 * igbvf_open - Called when a network interface is made active
1617 * @netdev: network interface device structure
1618 *
1619 * Returns 0 on success, negative value on failure
1620 *
1621 * The open entry point is called when a network interface is made
1622 * active by the system (IFF_UP). At this point all resources needed
1623 * for transmit and receive operations are allocated, the interrupt
1624 * handler is registered with the OS, the watchdog timer is started,
1625 * and the stack is notified that the interface is ready.
1626 **/
1627static int igbvf_open(struct net_device *netdev)
1628{
1629 struct igbvf_adapter *adapter = netdev_priv(netdev);
1630 struct e1000_hw *hw = &adapter->hw;
1631 int err;
1632
1633 /* disallow open during test */
1634 if (test_bit(__IGBVF_TESTING, &adapter->state))
1635 return -EBUSY;
1636
1637 /* allocate transmit descriptors */
1638 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1639 if (err)
1640 goto err_setup_tx;
1641
1642 /* allocate receive descriptors */
1643 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1644 if (err)
1645 goto err_setup_rx;
1646
1647 /*
1648 * before we allocate an interrupt, we must be ready to handle it.
1649 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1650 * as soon as we call pci_request_irq, so we have to setup our
1651 * clean_rx handler before we do so.
1652 */
1653 igbvf_configure(adapter);
1654
1655 err = igbvf_request_irq(adapter);
1656 if (err)
1657 goto err_req_irq;
1658
1659 /* From here on the code is the same as igbvf_up() */
1660 clear_bit(__IGBVF_DOWN, &adapter->state);
1661
1662 napi_enable(&adapter->rx_ring->napi);
1663
1664 /* clear any pending interrupts */
1665 er32(EICR);
1666
1667 igbvf_irq_enable(adapter);
1668
1669 /* start the watchdog */
1670 hw->mac.get_link_status = 1;
1671 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1672
1673 return 0;
1674
1675err_req_irq:
1676 igbvf_free_rx_resources(adapter->rx_ring);
1677err_setup_rx:
1678 igbvf_free_tx_resources(adapter->tx_ring);
1679err_setup_tx:
1680 igbvf_reset(adapter);
1681
1682 return err;
1683}
1684
1685/**
1686 * igbvf_close - Disables a network interface
1687 * @netdev: network interface device structure
1688 *
1689 * Returns 0, this is not allowed to fail
1690 *
1691 * The close entry point is called when an interface is de-activated
1692 * by the OS. The hardware is still under the drivers control, but
1693 * needs to be disabled. A global MAC reset is issued to stop the
1694 * hardware, and all transmit and receive resources are freed.
1695 **/
1696static int igbvf_close(struct net_device *netdev)
1697{
1698 struct igbvf_adapter *adapter = netdev_priv(netdev);
1699
1700 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1701 igbvf_down(adapter);
1702
1703 igbvf_free_irq(adapter);
1704
1705 igbvf_free_tx_resources(adapter->tx_ring);
1706 igbvf_free_rx_resources(adapter->rx_ring);
1707
1708 return 0;
1709}
1710/**
1711 * igbvf_set_mac - Change the Ethernet Address of the NIC
1712 * @netdev: network interface device structure
1713 * @p: pointer to an address structure
1714 *
1715 * Returns 0 on success, negative on failure
1716 **/
1717static int igbvf_set_mac(struct net_device *netdev, void *p)
1718{
1719 struct igbvf_adapter *adapter = netdev_priv(netdev);
1720 struct e1000_hw *hw = &adapter->hw;
1721 struct sockaddr *addr = p;
1722
1723 if (!is_valid_ether_addr(addr->sa_data))
1724 return -EADDRNOTAVAIL;
1725
1726 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1727
1728 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1729
1730 if (memcmp(addr->sa_data, hw->mac.addr, 6))
1731 return -EADDRNOTAVAIL;
1732
1733 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Danny Kukawka067fb4c2012-02-17 05:43:26 +00001734 netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001735
1736 return 0;
1737}
1738
1739#define UPDATE_VF_COUNTER(reg, name) \
1740 { \
1741 u32 current_counter = er32(reg); \
1742 if (current_counter < adapter->stats.last_##name) \
1743 adapter->stats.name += 0x100000000LL; \
1744 adapter->stats.last_##name = current_counter; \
1745 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1746 adapter->stats.name |= current_counter; \
1747 }
1748
1749/**
1750 * igbvf_update_stats - Update the board statistics counters
1751 * @adapter: board private structure
1752**/
1753void igbvf_update_stats(struct igbvf_adapter *adapter)
1754{
1755 struct e1000_hw *hw = &adapter->hw;
1756 struct pci_dev *pdev = adapter->pdev;
1757
1758 /*
1759 * Prevent stats update while adapter is being reset, link is down
1760 * or if the pci connection is down.
1761 */
1762 if (adapter->link_speed == 0)
1763 return;
1764
1765 if (test_bit(__IGBVF_RESETTING, &adapter->state))
1766 return;
1767
1768 if (pci_channel_offline(pdev))
1769 return;
1770
1771 UPDATE_VF_COUNTER(VFGPRC, gprc);
1772 UPDATE_VF_COUNTER(VFGORC, gorc);
1773 UPDATE_VF_COUNTER(VFGPTC, gptc);
1774 UPDATE_VF_COUNTER(VFGOTC, gotc);
1775 UPDATE_VF_COUNTER(VFMPRC, mprc);
1776 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1777 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1778 UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1779 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1780
1781 /* Fill out the OS statistics structure */
1782 adapter->net_stats.multicast = adapter->stats.mprc;
1783}
1784
1785static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1786{
Jeff Kirshera4ba8cb2011-10-21 19:42:26 +00001787 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
1788 adapter->link_speed,
1789 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001790}
1791
1792static bool igbvf_has_link(struct igbvf_adapter *adapter)
1793{
1794 struct e1000_hw *hw = &adapter->hw;
1795 s32 ret_val = E1000_SUCCESS;
1796 bool link_active;
1797
Alexander Duyck72279092009-12-11 22:58:14 -08001798 /* If interface is down, stay link down */
1799 if (test_bit(__IGBVF_DOWN, &adapter->state))
1800 return false;
1801
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001802 ret_val = hw->mac.ops.check_for_link(hw);
1803 link_active = !hw->mac.get_link_status;
1804
1805 /* if check for link returns error we will need to reset */
Alexander Duyck72279092009-12-11 22:58:14 -08001806 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001807 schedule_work(&adapter->reset_task);
1808
1809 return link_active;
1810}
1811
1812/**
1813 * igbvf_watchdog - Timer Call-back
1814 * @data: pointer to adapter cast into an unsigned long
1815 **/
1816static void igbvf_watchdog(unsigned long data)
1817{
1818 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
1819
1820 /* Do the rest outside of interrupt context */
1821 schedule_work(&adapter->watchdog_task);
1822}
1823
1824static void igbvf_watchdog_task(struct work_struct *work)
1825{
1826 struct igbvf_adapter *adapter = container_of(work,
1827 struct igbvf_adapter,
1828 watchdog_task);
1829 struct net_device *netdev = adapter->netdev;
1830 struct e1000_mac_info *mac = &adapter->hw.mac;
1831 struct igbvf_ring *tx_ring = adapter->tx_ring;
1832 struct e1000_hw *hw = &adapter->hw;
1833 u32 link;
1834 int tx_pending = 0;
1835
1836 link = igbvf_has_link(adapter);
1837
1838 if (link) {
1839 if (!netif_carrier_ok(netdev)) {
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001840 mac->ops.get_link_up_info(&adapter->hw,
1841 &adapter->link_speed,
1842 &adapter->link_duplex);
1843 igbvf_print_link_info(adapter);
1844
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001845 netif_carrier_on(netdev);
1846 netif_wake_queue(netdev);
1847 }
1848 } else {
1849 if (netif_carrier_ok(netdev)) {
1850 adapter->link_speed = 0;
1851 adapter->link_duplex = 0;
1852 dev_info(&adapter->pdev->dev, "Link is Down\n");
1853 netif_carrier_off(netdev);
1854 netif_stop_queue(netdev);
1855 }
1856 }
1857
1858 if (netif_carrier_ok(netdev)) {
1859 igbvf_update_stats(adapter);
1860 } else {
1861 tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1862 tx_ring->count);
1863 if (tx_pending) {
1864 /*
1865 * We've lost link, so the controller stops DMA,
1866 * but we've got queued Tx work that's never going
1867 * to get done, so reset controller to flush Tx.
1868 * (Do the reset outside of interrupt context).
1869 */
1870 adapter->tx_timeout_count++;
1871 schedule_work(&adapter->reset_task);
1872 }
1873 }
1874
1875 /* Cause software interrupt to ensure Rx ring is cleaned */
1876 ew32(EICS, adapter->rx_ring->eims_value);
1877
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001878 /* Reset the timer */
1879 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1880 mod_timer(&adapter->watchdog_timer,
1881 round_jiffies(jiffies + (2 * HZ)));
1882}
1883
1884#define IGBVF_TX_FLAGS_CSUM 0x00000001
1885#define IGBVF_TX_FLAGS_VLAN 0x00000002
1886#define IGBVF_TX_FLAGS_TSO 0x00000004
1887#define IGBVF_TX_FLAGS_IPV4 0x00000008
1888#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1889#define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1890
1891static int igbvf_tso(struct igbvf_adapter *adapter,
1892 struct igbvf_ring *tx_ring,
1893 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
1894{
1895 struct e1000_adv_tx_context_desc *context_desc;
1896 unsigned int i;
1897 int err;
1898 struct igbvf_buffer *buffer_info;
1899 u32 info = 0, tu_cmd = 0;
1900 u32 mss_l4len_idx, l4len;
1901 *hdr_len = 0;
1902
1903 if (skb_header_cloned(skb)) {
1904 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1905 if (err) {
1906 dev_err(&adapter->pdev->dev,
1907 "igbvf_tso returning an error\n");
1908 return err;
1909 }
1910 }
1911
1912 l4len = tcp_hdrlen(skb);
1913 *hdr_len += l4len;
1914
1915 if (skb->protocol == htons(ETH_P_IP)) {
1916 struct iphdr *iph = ip_hdr(skb);
1917 iph->tot_len = 0;
1918 iph->check = 0;
1919 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1920 iph->daddr, 0,
1921 IPPROTO_TCP,
1922 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08001923 } else if (skb_is_gso_v6(skb)) {
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00001924 ipv6_hdr(skb)->payload_len = 0;
1925 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1926 &ipv6_hdr(skb)->daddr,
1927 0, IPPROTO_TCP, 0);
1928 }
1929
1930 i = tx_ring->next_to_use;
1931
1932 buffer_info = &tx_ring->buffer_info[i];
1933 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1934 /* VLAN MACLEN IPLEN */
1935 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1936 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1937 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1938 *hdr_len += skb_network_offset(skb);
1939 info |= (skb_transport_header(skb) - skb_network_header(skb));
1940 *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
1941 context_desc->vlan_macip_lens = cpu_to_le32(info);
1942
1943 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1944 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1945
1946 if (skb->protocol == htons(ETH_P_IP))
1947 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1948 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1949
1950 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
1951
1952 /* MSS L4LEN IDX */
1953 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
1954 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
1955
1956 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1957 context_desc->seqnum_seed = 0;
1958
1959 buffer_info->time_stamp = jiffies;
1960 buffer_info->next_to_watch = i;
1961 buffer_info->dma = 0;
1962 i++;
1963 if (i == tx_ring->count)
1964 i = 0;
1965
1966 tx_ring->next_to_use = i;
1967
1968 return true;
1969}
1970
1971static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
1972 struct igbvf_ring *tx_ring,
1973 struct sk_buff *skb, u32 tx_flags)
1974{
1975 struct e1000_adv_tx_context_desc *context_desc;
1976 unsigned int i;
1977 struct igbvf_buffer *buffer_info;
1978 u32 info = 0, tu_cmd = 0;
1979
1980 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
1981 (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
1982 i = tx_ring->next_to_use;
1983 buffer_info = &tx_ring->buffer_info[i];
1984 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1985
1986 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1987 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1988
1989 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1990 if (skb->ip_summed == CHECKSUM_PARTIAL)
1991 info |= (skb_transport_header(skb) -
1992 skb_network_header(skb));
1993
1994
1995 context_desc->vlan_macip_lens = cpu_to_le32(info);
1996
1997 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1998
1999 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2000 switch (skb->protocol) {
2001 case __constant_htons(ETH_P_IP):
2002 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2003 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2004 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2005 break;
2006 case __constant_htons(ETH_P_IPV6):
2007 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2008 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2009 break;
2010 default:
2011 break;
2012 }
2013 }
2014
2015 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2016 context_desc->seqnum_seed = 0;
2017 context_desc->mss_l4len_idx = 0;
2018
2019 buffer_info->time_stamp = jiffies;
2020 buffer_info->next_to_watch = i;
2021 buffer_info->dma = 0;
2022 i++;
2023 if (i == tx_ring->count)
2024 i = 0;
2025 tx_ring->next_to_use = i;
2026
2027 return true;
2028 }
2029
2030 return false;
2031}
2032
2033static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2034{
2035 struct igbvf_adapter *adapter = netdev_priv(netdev);
2036
2037 /* there is enough descriptors then we don't need to worry */
2038 if (igbvf_desc_unused(adapter->tx_ring) >= size)
2039 return 0;
2040
2041 netif_stop_queue(netdev);
2042
2043 smp_mb();
2044
2045 /* We need to check again just in case room has been made available */
2046 if (igbvf_desc_unused(adapter->tx_ring) < size)
2047 return -EBUSY;
2048
2049 netif_wake_queue(netdev);
2050
2051 ++adapter->restart_queue;
2052 return 0;
2053}
2054
2055#define IGBVF_MAX_TXD_PWR 16
2056#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2057
2058static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2059 struct igbvf_ring *tx_ring,
2060 struct sk_buff *skb,
2061 unsigned int first)
2062{
2063 struct igbvf_buffer *buffer_info;
Alexander Duycka7d5ca402009-12-02 16:47:37 +00002064 struct pci_dev *pdev = adapter->pdev;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002065 unsigned int len = skb_headlen(skb);
2066 unsigned int count = 0, i;
2067 unsigned int f;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002068
2069 i = tx_ring->next_to_use;
2070
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002071 buffer_info = &tx_ring->buffer_info[i];
2072 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2073 buffer_info->length = len;
2074 /* set time_stamp *before* dma to help avoid a possible race */
2075 buffer_info->time_stamp = jiffies;
2076 buffer_info->next_to_watch = i;
Alexander Duyckac26d7d2010-01-27 15:30:39 +00002077 buffer_info->mapped_as_page = false;
Nick Nunley123e9f12010-04-27 13:09:44 +00002078 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2079 DMA_TO_DEVICE);
2080 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duycka7d5ca402009-12-02 16:47:37 +00002081 goto dma_error;
2082
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002083
2084 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002085 const struct skb_frag_struct *frag;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002086
Alexander Duyck85811452010-01-23 01:35:00 -08002087 count++;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002088 i++;
2089 if (i == tx_ring->count)
2090 i = 0;
2091
2092 frag = &skb_shinfo(skb)->frags[f];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002093 len = skb_frag_size(frag);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002094
2095 buffer_info = &tx_ring->buffer_info[i];
2096 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2097 buffer_info->length = len;
2098 buffer_info->time_stamp = jiffies;
2099 buffer_info->next_to_watch = i;
Alexander Duycka7d5ca402009-12-02 16:47:37 +00002100 buffer_info->mapped_as_page = true;
Ian Campbell877749b2011-08-29 23:18:26 +00002101 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
Nick Nunley123e9f12010-04-27 13:09:44 +00002102 DMA_TO_DEVICE);
2103 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duycka7d5ca402009-12-02 16:47:37 +00002104 goto dma_error;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002105 }
2106
2107 tx_ring->buffer_info[i].skb = skb;
2108 tx_ring->buffer_info[first].next_to_watch = i;
2109
Alexander Duycka7d5ca402009-12-02 16:47:37 +00002110 return ++count;
2111
2112dma_error:
2113 dev_err(&pdev->dev, "TX DMA map failed\n");
2114
2115 /* clear timestamp and dma mappings for failed buffer_info mapping */
2116 buffer_info->dma = 0;
2117 buffer_info->time_stamp = 0;
2118 buffer_info->length = 0;
2119 buffer_info->next_to_watch = 0;
2120 buffer_info->mapped_as_page = false;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002121 if (count)
2122 count--;
Alexander Duycka7d5ca402009-12-02 16:47:37 +00002123
2124 /* clear timestamp and dma mappings for remaining portion of packet */
Roel Kluinc1fa3472010-01-19 14:21:45 +00002125 while (count--) {
2126 if (i==0)
Alexander Duycka7d5ca402009-12-02 16:47:37 +00002127 i += tx_ring->count;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002128 i--;
Alexander Duycka7d5ca402009-12-02 16:47:37 +00002129 buffer_info = &tx_ring->buffer_info[i];
2130 igbvf_put_txbuf(adapter, buffer_info);
2131 }
2132
2133 return 0;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002134}
2135
2136static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2137 struct igbvf_ring *tx_ring,
2138 int tx_flags, int count, u32 paylen,
2139 u8 hdr_len)
2140{
2141 union e1000_adv_tx_desc *tx_desc = NULL;
2142 struct igbvf_buffer *buffer_info;
2143 u32 olinfo_status = 0, cmd_type_len;
2144 unsigned int i;
2145
2146 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2147 E1000_ADVTXD_DCMD_DEXT);
2148
2149 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2150 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2151
2152 if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2153 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2154
2155 /* insert tcp checksum */
2156 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2157
2158 /* insert ip checksum */
2159 if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2160 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2161
2162 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2163 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2164 }
2165
2166 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2167
2168 i = tx_ring->next_to_use;
2169 while (count--) {
2170 buffer_info = &tx_ring->buffer_info[i];
2171 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2172 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2173 tx_desc->read.cmd_type_len =
2174 cpu_to_le32(cmd_type_len | buffer_info->length);
2175 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2176 i++;
2177 if (i == tx_ring->count)
2178 i = 0;
2179 }
2180
2181 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2182 /* Force memory writes to complete before letting h/w
2183 * know there are new descriptors to fetch. (Only
2184 * applicable for weak-ordered memory model archs,
2185 * such as IA-64). */
2186 wmb();
2187
2188 tx_ring->next_to_use = i;
2189 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2190 /* we need this if more than one processor can write to our tail
2191 * at a time, it syncronizes IO on IA64/Altix systems */
2192 mmiowb();
2193}
2194
Stephen Hemminger3b29a562009-08-31 19:50:55 +00002195static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2196 struct net_device *netdev,
2197 struct igbvf_ring *tx_ring)
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002198{
2199 struct igbvf_adapter *adapter = netdev_priv(netdev);
2200 unsigned int first, tx_flags = 0;
2201 u8 hdr_len = 0;
2202 int count = 0;
2203 int tso = 0;
2204
2205 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2206 dev_kfree_skb_any(skb);
2207 return NETDEV_TX_OK;
2208 }
2209
2210 if (skb->len <= 0) {
2211 dev_kfree_skb_any(skb);
2212 return NETDEV_TX_OK;
2213 }
2214
2215 /*
2216 * need: count + 4 desc gap to keep tail from touching
2217 * + 2 desc gap to keep tail from touching head,
2218 * + 1 desc for skb->data,
2219 * + 1 desc for context descriptor,
2220 * head, otherwise try next time
2221 */
2222 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2223 /* this is a hard error */
2224 return NETDEV_TX_BUSY;
2225 }
2226
Jiri Pirkoa0f1d602011-07-21 06:30:00 +00002227 if (vlan_tx_tag_present(skb)) {
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002228 tx_flags |= IGBVF_TX_FLAGS_VLAN;
2229 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
2230 }
2231
2232 if (skb->protocol == htons(ETH_P_IP))
2233 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2234
2235 first = tx_ring->next_to_use;
2236
2237 tso = skb_is_gso(skb) ?
2238 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
2239 if (unlikely(tso < 0)) {
2240 dev_kfree_skb_any(skb);
2241 return NETDEV_TX_OK;
2242 }
2243
2244 if (tso)
2245 tx_flags |= IGBVF_TX_FLAGS_TSO;
2246 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
2247 (skb->ip_summed == CHECKSUM_PARTIAL))
2248 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2249
2250 /*
2251 * count reflects descriptors mapped, if 0 then mapping error
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002252 * has occurred and we need to rewind the descriptor queue
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002253 */
2254 count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
2255
2256 if (count) {
2257 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2258 skb->len, hdr_len);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002259 /* Make sure there is space in the ring for the next send. */
2260 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2261 } else {
2262 dev_kfree_skb_any(skb);
2263 tx_ring->buffer_info[first].time_stamp = 0;
2264 tx_ring->next_to_use = first;
2265 }
2266
2267 return NETDEV_TX_OK;
2268}
2269
Stephen Hemminger3b29a562009-08-31 19:50:55 +00002270static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
2271 struct net_device *netdev)
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002272{
2273 struct igbvf_adapter *adapter = netdev_priv(netdev);
2274 struct igbvf_ring *tx_ring;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002275
2276 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2277 dev_kfree_skb_any(skb);
2278 return NETDEV_TX_OK;
2279 }
2280
2281 tx_ring = &adapter->tx_ring[0];
2282
Stephen Hemminger3b29a562009-08-31 19:50:55 +00002283 return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002284}
2285
2286/**
2287 * igbvf_tx_timeout - Respond to a Tx Hang
2288 * @netdev: network interface device structure
2289 **/
2290static void igbvf_tx_timeout(struct net_device *netdev)
2291{
2292 struct igbvf_adapter *adapter = netdev_priv(netdev);
2293
2294 /* Do the reset outside of interrupt context */
2295 adapter->tx_timeout_count++;
2296 schedule_work(&adapter->reset_task);
2297}
2298
2299static void igbvf_reset_task(struct work_struct *work)
2300{
2301 struct igbvf_adapter *adapter;
2302 adapter = container_of(work, struct igbvf_adapter, reset_task);
2303
2304 igbvf_reinit_locked(adapter);
2305}
2306
2307/**
2308 * igbvf_get_stats - Get System Network Statistics
2309 * @netdev: network interface device structure
2310 *
2311 * Returns the address of the device statistics structure.
2312 * The statistics are actually updated from the timer callback.
2313 **/
2314static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
2315{
2316 struct igbvf_adapter *adapter = netdev_priv(netdev);
2317
2318 /* only return the current stats */
2319 return &adapter->net_stats;
2320}
2321
2322/**
2323 * igbvf_change_mtu - Change the Maximum Transfer Unit
2324 * @netdev: network interface device structure
2325 * @new_mtu: new value for maximum frame size
2326 *
2327 * Returns 0 on success, negative on failure
2328 **/
2329static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2330{
2331 struct igbvf_adapter *adapter = netdev_priv(netdev);
2332 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2333
2334 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2335 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
2336 return -EINVAL;
2337 }
2338
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002339#define MAX_STD_JUMBO_FRAME_SIZE 9234
2340 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2341 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
2342 return -EINVAL;
2343 }
2344
2345 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2346 msleep(1);
2347 /* igbvf_down has a dependency on max_frame_size */
2348 adapter->max_frame_size = max_frame;
2349 if (netif_running(netdev))
2350 igbvf_down(adapter);
2351
2352 /*
2353 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2354 * means we reserve 2 more, this pushes us to allocate from the next
2355 * larger slab size.
2356 * i.e. RXBUFFER_2048 --> size-4096 slab
2357 * However with the new *_jumbo_rx* routines, jumbo receives will use
2358 * fragmented skbs
2359 */
2360
2361 if (max_frame <= 1024)
2362 adapter->rx_buffer_len = 1024;
2363 else if (max_frame <= 2048)
2364 adapter->rx_buffer_len = 2048;
2365 else
2366#if (PAGE_SIZE / 2) > 16384
2367 adapter->rx_buffer_len = 16384;
2368#else
2369 adapter->rx_buffer_len = PAGE_SIZE / 2;
2370#endif
2371
2372
2373 /* adjust allocation if LPE protects us, and we aren't using SBP */
2374 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2375 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2376 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2377 ETH_FCS_LEN;
2378
2379 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2380 netdev->mtu, new_mtu);
2381 netdev->mtu = new_mtu;
2382
2383 if (netif_running(netdev))
2384 igbvf_up(adapter);
2385 else
2386 igbvf_reset(adapter);
2387
2388 clear_bit(__IGBVF_RESETTING, &adapter->state);
2389
2390 return 0;
2391}
2392
2393static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2394{
2395 switch (cmd) {
2396 default:
2397 return -EOPNOTSUPP;
2398 }
2399}
2400
2401static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
2402{
2403 struct net_device *netdev = pci_get_drvdata(pdev);
2404 struct igbvf_adapter *adapter = netdev_priv(netdev);
2405#ifdef CONFIG_PM
2406 int retval = 0;
2407#endif
2408
2409 netif_device_detach(netdev);
2410
2411 if (netif_running(netdev)) {
2412 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2413 igbvf_down(adapter);
2414 igbvf_free_irq(adapter);
2415 }
2416
2417#ifdef CONFIG_PM
2418 retval = pci_save_state(pdev);
2419 if (retval)
2420 return retval;
2421#endif
2422
2423 pci_disable_device(pdev);
2424
2425 return 0;
2426}
2427
2428#ifdef CONFIG_PM
2429static int igbvf_resume(struct pci_dev *pdev)
2430{
2431 struct net_device *netdev = pci_get_drvdata(pdev);
2432 struct igbvf_adapter *adapter = netdev_priv(netdev);
2433 u32 err;
2434
2435 pci_restore_state(pdev);
2436 err = pci_enable_device_mem(pdev);
2437 if (err) {
2438 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2439 return err;
2440 }
2441
2442 pci_set_master(pdev);
2443
2444 if (netif_running(netdev)) {
2445 err = igbvf_request_irq(adapter);
2446 if (err)
2447 return err;
2448 }
2449
2450 igbvf_reset(adapter);
2451
2452 if (netif_running(netdev))
2453 igbvf_up(adapter);
2454
2455 netif_device_attach(netdev);
2456
2457 return 0;
2458}
2459#endif
2460
2461static void igbvf_shutdown(struct pci_dev *pdev)
2462{
2463 igbvf_suspend(pdev, PMSG_SUSPEND);
2464}
2465
2466#ifdef CONFIG_NET_POLL_CONTROLLER
2467/*
2468 * Polling 'interrupt' - used by things like netconsole to send skbs
2469 * without having to re-enable interrupts. It's not called while
2470 * the interrupt routine is executing.
2471 */
2472static void igbvf_netpoll(struct net_device *netdev)
2473{
2474 struct igbvf_adapter *adapter = netdev_priv(netdev);
2475
2476 disable_irq(adapter->pdev->irq);
2477
2478 igbvf_clean_tx_irq(adapter->tx_ring);
2479
2480 enable_irq(adapter->pdev->irq);
2481}
2482#endif
2483
2484/**
2485 * igbvf_io_error_detected - called when PCI error is detected
2486 * @pdev: Pointer to PCI device
2487 * @state: The current pci connection state
2488 *
2489 * This function is called after a PCI bus error affecting
2490 * this device has been detected.
2491 */
2492static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2493 pci_channel_state_t state)
2494{
2495 struct net_device *netdev = pci_get_drvdata(pdev);
2496 struct igbvf_adapter *adapter = netdev_priv(netdev);
2497
2498 netif_device_detach(netdev);
2499
Dean Nelsonc06c4302009-07-31 09:13:33 +00002500 if (state == pci_channel_io_perm_failure)
2501 return PCI_ERS_RESULT_DISCONNECT;
2502
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002503 if (netif_running(netdev))
2504 igbvf_down(adapter);
2505 pci_disable_device(pdev);
2506
2507 /* Request a slot slot reset. */
2508 return PCI_ERS_RESULT_NEED_RESET;
2509}
2510
2511/**
2512 * igbvf_io_slot_reset - called after the pci bus has been reset.
2513 * @pdev: Pointer to PCI device
2514 *
2515 * Restart the card from scratch, as if from a cold-boot. Implementation
2516 * resembles the first-half of the igbvf_resume routine.
2517 */
2518static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2519{
2520 struct net_device *netdev = pci_get_drvdata(pdev);
2521 struct igbvf_adapter *adapter = netdev_priv(netdev);
2522
2523 if (pci_enable_device_mem(pdev)) {
2524 dev_err(&pdev->dev,
2525 "Cannot re-enable PCI device after reset.\n");
2526 return PCI_ERS_RESULT_DISCONNECT;
2527 }
2528 pci_set_master(pdev);
2529
2530 igbvf_reset(adapter);
2531
2532 return PCI_ERS_RESULT_RECOVERED;
2533}
2534
2535/**
2536 * igbvf_io_resume - called when traffic can start flowing again.
2537 * @pdev: Pointer to PCI device
2538 *
2539 * This callback is called when the error recovery driver tells us that
2540 * its OK to resume normal operation. Implementation resembles the
2541 * second-half of the igbvf_resume routine.
2542 */
2543static void igbvf_io_resume(struct pci_dev *pdev)
2544{
2545 struct net_device *netdev = pci_get_drvdata(pdev);
2546 struct igbvf_adapter *adapter = netdev_priv(netdev);
2547
2548 if (netif_running(netdev)) {
2549 if (igbvf_up(adapter)) {
2550 dev_err(&pdev->dev,
2551 "can't bring device back up after reset\n");
2552 return;
2553 }
2554 }
2555
2556 netif_device_attach(netdev);
2557}
2558
2559static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2560{
2561 struct e1000_hw *hw = &adapter->hw;
2562 struct net_device *netdev = adapter->netdev;
2563 struct pci_dev *pdev = adapter->pdev;
2564
Williams, Mitch A10090752011-10-18 06:39:37 +00002565 if (hw->mac.type == e1000_vfadapt_i350)
2566 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
2567 else
2568 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
H Hartley Sweeten753cdc32009-12-29 20:02:29 -08002569 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002570}
2571
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002572static int igbvf_set_features(struct net_device *netdev,
2573 netdev_features_t features)
Michał Mirosławfd38f7342011-08-30 17:07:11 +00002574{
2575 struct igbvf_adapter *adapter = netdev_priv(netdev);
2576
2577 if (features & NETIF_F_RXCSUM)
2578 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
2579 else
2580 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
2581
2582 return 0;
2583}
2584
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002585static const struct net_device_ops igbvf_netdev_ops = {
2586 .ndo_open = igbvf_open,
2587 .ndo_stop = igbvf_close,
2588 .ndo_start_xmit = igbvf_xmit_frame,
2589 .ndo_get_stats = igbvf_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00002590 .ndo_set_rx_mode = igbvf_set_multi,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002591 .ndo_set_mac_address = igbvf_set_mac,
2592 .ndo_change_mtu = igbvf_change_mtu,
2593 .ndo_do_ioctl = igbvf_ioctl,
2594 .ndo_tx_timeout = igbvf_tx_timeout,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002595 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
2596 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
2597#ifdef CONFIG_NET_POLL_CONTROLLER
2598 .ndo_poll_controller = igbvf_netpoll,
2599#endif
Michał Mirosławfd38f7342011-08-30 17:07:11 +00002600 .ndo_set_features = igbvf_set_features,
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002601};
2602
2603/**
2604 * igbvf_probe - Device Initialization Routine
2605 * @pdev: PCI device information struct
2606 * @ent: entry in igbvf_pci_tbl
2607 *
2608 * Returns 0 on success, negative on failure
2609 *
2610 * igbvf_probe initializes an adapter identified by a pci_dev structure.
2611 * The OS initialization, configuring of the adapter private structure,
2612 * and a hardware reset occur.
2613 **/
2614static int __devinit igbvf_probe(struct pci_dev *pdev,
2615 const struct pci_device_id *ent)
2616{
2617 struct net_device *netdev;
2618 struct igbvf_adapter *adapter;
2619 struct e1000_hw *hw;
2620 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2621
2622 static int cards_found;
2623 int err, pci_using_dac;
2624
2625 err = pci_enable_device_mem(pdev);
2626 if (err)
2627 return err;
2628
2629 pci_using_dac = 0;
Nick Nunley123e9f12010-04-27 13:09:44 +00002630 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002631 if (!err) {
Nick Nunley123e9f12010-04-27 13:09:44 +00002632 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002633 if (!err)
2634 pci_using_dac = 1;
2635 } else {
Nick Nunley123e9f12010-04-27 13:09:44 +00002636 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002637 if (err) {
Nick Nunley123e9f12010-04-27 13:09:44 +00002638 err = dma_set_coherent_mask(&pdev->dev,
2639 DMA_BIT_MASK(32));
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002640 if (err) {
2641 dev_err(&pdev->dev, "No usable DMA "
2642 "configuration, aborting\n");
2643 goto err_dma;
2644 }
2645 }
2646 }
2647
2648 err = pci_request_regions(pdev, igbvf_driver_name);
2649 if (err)
2650 goto err_pci_reg;
2651
2652 pci_set_master(pdev);
2653
2654 err = -ENOMEM;
2655 netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2656 if (!netdev)
2657 goto err_alloc_etherdev;
2658
2659 SET_NETDEV_DEV(netdev, &pdev->dev);
2660
2661 pci_set_drvdata(pdev, netdev);
2662 adapter = netdev_priv(netdev);
2663 hw = &adapter->hw;
2664 adapter->netdev = netdev;
2665 adapter->pdev = pdev;
2666 adapter->ei = ei;
2667 adapter->pba = ei->pba;
2668 adapter->flags = ei->flags;
2669 adapter->hw.back = adapter;
2670 adapter->hw.mac.type = ei->mac;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00002671 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002672
2673 /* PCI config space info */
2674
2675 hw->vendor_id = pdev->vendor;
2676 hw->device_id = pdev->device;
2677 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2678 hw->subsystem_device_id = pdev->subsystem_device;
Sergei Shtylyovff938e42011-02-28 11:57:33 -08002679 hw->revision_id = pdev->revision;
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002680
2681 err = -EIO;
2682 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2683 pci_resource_len(pdev, 0));
2684
2685 if (!adapter->hw.hw_addr)
2686 goto err_ioremap;
2687
2688 if (ei->get_variants) {
2689 err = ei->get_variants(adapter);
2690 if (err)
2691 goto err_ioremap;
2692 }
2693
2694 /* setup adapter struct */
2695 err = igbvf_sw_init(adapter);
2696 if (err)
2697 goto err_sw_init;
2698
2699 /* construct the net_device struct */
2700 netdev->netdev_ops = &igbvf_netdev_ops;
2701
2702 igbvf_set_ethtool_ops(netdev);
2703 netdev->watchdog_timeo = 5 * HZ;
2704 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2705
2706 adapter->bd_number = cards_found++;
2707
Michał Mirosławfd38f7342011-08-30 17:07:11 +00002708 netdev->hw_features = NETIF_F_SG |
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002709 NETIF_F_IP_CSUM |
Michał Mirosławfd38f7342011-08-30 17:07:11 +00002710 NETIF_F_IPV6_CSUM |
2711 NETIF_F_TSO |
2712 NETIF_F_TSO6 |
2713 NETIF_F_RXCSUM;
2714
2715 netdev->features = netdev->hw_features |
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002716 NETIF_F_HW_VLAN_TX |
2717 NETIF_F_HW_VLAN_RX |
2718 NETIF_F_HW_VLAN_FILTER;
2719
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002720 if (pci_using_dac)
2721 netdev->features |= NETIF_F_HIGHDMA;
2722
2723 netdev->vlan_features |= NETIF_F_TSO;
2724 netdev->vlan_features |= NETIF_F_TSO6;
2725 netdev->vlan_features |= NETIF_F_IP_CSUM;
2726 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
2727 netdev->vlan_features |= NETIF_F_SG;
2728
2729 /*reset the controller to put the device in a known good state */
2730 err = hw->mac.ops.reset_hw(hw);
2731 if (err) {
2732 dev_info(&pdev->dev,
Williams, Mitch A1242b6f2009-12-23 13:22:43 +00002733 "PF still in reset state, assigning new address."
2734 " Is the PF interface up?\n");
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00002735 eth_hw_addr_random(netdev);
2736 memcpy(adapter->hw.mac.addr, netdev->dev_addr,
2737 netdev->addr_len);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002738 } else {
2739 err = hw->mac.ops.read_mac_addr(hw);
2740 if (err) {
2741 dev_err(&pdev->dev, "Error reading MAC address\n");
2742 goto err_hw_init;
2743 }
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00002744 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2745 netdev->addr_len);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002746 }
2747
Samuel Liao9bd1be452012-04-27 17:09:27 +00002748 if (!is_valid_ether_addr(netdev->dev_addr)) {
H Hartley Sweeten753cdc32009-12-29 20:02:29 -08002749 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
2750 netdev->dev_addr);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002751 err = -EIO;
2752 goto err_hw_init;
2753 }
2754
Samuel Liao9bd1be452012-04-27 17:09:27 +00002755 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00002756
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002757 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2758 (unsigned long) adapter);
2759
2760 INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2761 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2762
2763 /* ring size defaults */
2764 adapter->rx_ring->count = 1024;
2765 adapter->tx_ring->count = 1024;
2766
2767 /* reset the hardware with the new settings */
2768 igbvf_reset(adapter);
2769
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002770 strcpy(netdev->name, "eth%d");
2771 err = register_netdev(netdev);
2772 if (err)
2773 goto err_hw_init;
2774
Emil Tantilovde7fe782010-10-28 00:59:51 +00002775 /* tell the stack to leave us alone until igbvf_open() is called */
2776 netif_carrier_off(netdev);
2777 netif_stop_queue(netdev);
2778
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002779 igbvf_print_device_info(adapter);
2780
2781 igbvf_initialize_last_counter_stats(adapter);
2782
2783 return 0;
2784
2785err_hw_init:
2786 kfree(adapter->tx_ring);
2787 kfree(adapter->rx_ring);
2788err_sw_init:
2789 igbvf_reset_interrupt_capability(adapter);
2790 iounmap(adapter->hw.hw_addr);
2791err_ioremap:
2792 free_netdev(netdev);
2793err_alloc_etherdev:
2794 pci_release_regions(pdev);
2795err_pci_reg:
2796err_dma:
2797 pci_disable_device(pdev);
2798 return err;
2799}
2800
2801/**
2802 * igbvf_remove - Device Removal Routine
2803 * @pdev: PCI device information struct
2804 *
2805 * igbvf_remove is called by the PCI subsystem to alert the driver
2806 * that it should release a PCI device. The could be caused by a
2807 * Hot-Plug event, or because the driver is going to be removed from
2808 * memory.
2809 **/
2810static void __devexit igbvf_remove(struct pci_dev *pdev)
2811{
2812 struct net_device *netdev = pci_get_drvdata(pdev);
2813 struct igbvf_adapter *adapter = netdev_priv(netdev);
2814 struct e1000_hw *hw = &adapter->hw;
2815
2816 /*
Tejun Heo760141a2010-12-12 16:45:14 +01002817 * The watchdog timer may be rescheduled, so explicitly
2818 * disable it from being rescheduled.
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002819 */
2820 set_bit(__IGBVF_DOWN, &adapter->state);
2821 del_timer_sync(&adapter->watchdog_timer);
2822
Tejun Heo760141a2010-12-12 16:45:14 +01002823 cancel_work_sync(&adapter->reset_task);
2824 cancel_work_sync(&adapter->watchdog_task);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002825
2826 unregister_netdev(netdev);
2827
2828 igbvf_reset_interrupt_capability(adapter);
2829
2830 /*
2831 * it is important to delete the napi struct prior to freeing the
2832 * rx ring so that you do not end up with null pointer refs
2833 */
2834 netif_napi_del(&adapter->rx_ring->napi);
2835 kfree(adapter->tx_ring);
2836 kfree(adapter->rx_ring);
2837
2838 iounmap(hw->hw_addr);
2839 if (hw->flash_address)
2840 iounmap(hw->flash_address);
2841 pci_release_regions(pdev);
2842
2843 free_netdev(netdev);
2844
2845 pci_disable_device(pdev);
2846}
2847
2848/* PCI Error Recovery (ERS) */
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07002849static const struct pci_error_handlers igbvf_err_handler = {
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002850 .error_detected = igbvf_io_error_detected,
2851 .slot_reset = igbvf_io_slot_reset,
2852 .resume = igbvf_io_resume,
2853};
2854
Alexey Dobriyana3aa1882010-01-07 11:58:11 +00002855static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002856 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
Williams, Mitch A031d7952010-12-09 03:23:56 +00002857 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002858 { } /* terminate list */
2859};
2860MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2861
2862/* PCI Device API Driver */
2863static struct pci_driver igbvf_driver = {
2864 .name = igbvf_driver_name,
2865 .id_table = igbvf_pci_tbl,
2866 .probe = igbvf_probe,
2867 .remove = __devexit_p(igbvf_remove),
2868#ifdef CONFIG_PM
2869 /* Power Management Hooks */
2870 .suspend = igbvf_suspend,
2871 .resume = igbvf_resume,
2872#endif
2873 .shutdown = igbvf_shutdown,
2874 .err_handler = &igbvf_err_handler
2875};
2876
2877/**
2878 * igbvf_init_module - Driver Registration Routine
2879 *
2880 * igbvf_init_module is the first routine called when the driver is
2881 * loaded. All it does is register with the PCI subsystem.
2882 **/
2883static int __init igbvf_init_module(void)
2884{
2885 int ret;
Jeff Kirshera4ba8cb2011-10-21 19:42:26 +00002886 pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
2887 pr_info("%s\n", igbvf_copyright);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002888
2889 ret = pci_register_driver(&igbvf_driver);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002890
2891 return ret;
2892}
2893module_init(igbvf_init_module);
2894
2895/**
2896 * igbvf_exit_module - Driver Exit Cleanup Routine
2897 *
2898 * igbvf_exit_module is called just before the driver is removed
2899 * from memory.
2900 **/
2901static void __exit igbvf_exit_module(void)
2902{
2903 pci_unregister_driver(&igbvf_driver);
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002904}
2905module_exit(igbvf_exit_module);
2906
2907
2908MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
Williams, Mitch A10090752011-10-18 06:39:37 +00002909MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
Alexander Duyckd4e0fe02009-04-07 14:37:34 +00002910MODULE_LICENSE("GPL");
2911MODULE_VERSION(DRV_VERSION);
2912
2913/* netdev.c */