blob: 57ae5cdd6416df7e484a2b3ce3900f7986a4d90b [file] [log] [blame]
Greg Rose92915f72010-01-09 02:24:10 +00001/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
Greg Rose5c47a2b2012-01-06 02:53:30 +00004 Copyright(c) 1999 - 2012 Intel Corporation.
Greg Rose92915f72010-01-09 02:24:10 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
Jeff Kirsherdbd96362011-10-21 19:38:18 +000032
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
Greg Rose92915f72010-01-09 02:24:10 +000035#include <linux/types.h>
Jiri Pirkodadcd652011-07-21 03:25:09 +000036#include <linux/bitops.h>
Greg Rose92915f72010-01-09 02:24:10 +000037#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
Alexander Duyck70a10e22012-05-11 08:33:21 +000045#include <linux/sctp.h>
Greg Rose92915f72010-01-09 02:24:10 +000046#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/slab.h>
Greg Rose92915f72010-01-09 02:24:10 +000048#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000051#include <linux/if.h>
Greg Rose92915f72010-01-09 02:24:10 +000052#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040053#include <linux/prefetch.h>
Greg Rose92915f72010-01-09 02:24:10 +000054
55#include "ixgbevf.h"
56
Stephen Hemminger3d8fe982012-01-18 22:13:34 +000057const char ixgbevf_driver_name[] = "ixgbevf";
Greg Rose92915f72010-01-09 02:24:10 +000058static const char ixgbevf_driver_string[] =
Greg Rose422e05d2011-03-12 02:01:29 +000059 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
Greg Rose92915f72010-01-09 02:24:10 +000060
Greg Rose1b3d2d72012-10-04 02:10:53 +000061#define DRV_VERSION "2.7.12-k"
Greg Rose92915f72010-01-09 02:24:10 +000062const char ixgbevf_driver_version[] = DRV_VERSION;
Greg Rose66c87bd2010-11-16 19:26:43 -080063static char ixgbevf_copyright[] =
Greg Rose5c47a2b2012-01-06 02:53:30 +000064 "Copyright (c) 2009 - 2012 Intel Corporation.";
Greg Rose92915f72010-01-09 02:24:10 +000065
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
Greg Rose2316aa22010-12-02 07:12:26 +000067 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
Greg Rose92915f72010-01-09 02:24:10 +000069};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
79static struct pci_device_id ixgbevf_pci_tbl[] = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
81 board_82599_vf},
Greg Rose2316aa22010-12-02 07:12:26 +000082 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
83 board_X540_vf},
Greg Rose92915f72010-01-09 02:24:10 +000084
85 /* required last entry */
86 {0, }
87};
88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
89
90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_VERSION);
94
stephen hemmingerb3f4d592012-03-13 06:04:20 +000095#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96static int debug = -1;
97module_param(debug, int, 0);
98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
Greg Rose92915f72010-01-09 02:24:10 +000099
100/* forward decls */
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
Alexander Duyck56e94092012-07-20 08:10:03 +0000102static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000103
104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
105 struct ixgbevf_ring *rx_ring,
106 u32 val)
107{
108 /*
109 * Force memory writes to complete before letting h/w
110 * know there are new descriptors to fetch. (Only
111 * applicable for weak-ordered memory model archs,
112 * such as IA-64).
113 */
114 wmb();
115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
116}
117
Ben Hutchings49ce9c22012-07-10 10:56:00 +0000118/**
Greg Rose65d676c2011-02-03 06:54:13 +0000119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
Greg Rose92915f72010-01-09 02:24:10 +0000120 * @adapter: pointer to adapter struct
121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
122 * @queue: queue to map the corresponding interrupt to
123 * @msix_vector: the vector to map to the corresponding queue
Greg Rose92915f72010-01-09 02:24:10 +0000124 */
125static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
126 u8 queue, u8 msix_vector)
127{
128 u32 ivar, index;
129 struct ixgbe_hw *hw = &adapter->hw;
130 if (direction == -1) {
131 /* other causes */
132 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
133 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
134 ivar &= ~0xFF;
135 ivar |= msix_vector;
136 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
137 } else {
138 /* tx or rx causes */
139 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
140 index = ((16 * (queue & 1)) + (8 * direction));
141 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
142 ivar &= ~(0xFF << index);
143 ivar |= (msix_vector << index);
144 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
145 }
146}
147
Alexander Duyck70a10e22012-05-11 08:33:21 +0000148static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +0000149 struct ixgbevf_tx_buffer
150 *tx_buffer_info)
151{
152 if (tx_buffer_info->dma) {
153 if (tx_buffer_info->mapped_as_page)
Alexander Duyck70a10e22012-05-11 08:33:21 +0000154 dma_unmap_page(tx_ring->dev,
Greg Rose92915f72010-01-09 02:24:10 +0000155 tx_buffer_info->dma,
156 tx_buffer_info->length,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000157 DMA_TO_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000158 else
Alexander Duyck70a10e22012-05-11 08:33:21 +0000159 dma_unmap_single(tx_ring->dev,
Greg Rose92915f72010-01-09 02:24:10 +0000160 tx_buffer_info->dma,
161 tx_buffer_info->length,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000162 DMA_TO_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000163 tx_buffer_info->dma = 0;
164 }
165 if (tx_buffer_info->skb) {
166 dev_kfree_skb_any(tx_buffer_info->skb);
167 tx_buffer_info->skb = NULL;
168 }
169 tx_buffer_info->time_stamp = 0;
170 /* tx_buffer_info must be completely set up in the transmit path */
171}
172
Greg Rose92915f72010-01-09 02:24:10 +0000173#define IXGBE_MAX_TXD_PWR 14
174#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
175
176/* Tx Descriptors needed, worst case */
Alexander Duyck35959902012-05-11 08:32:40 +0000177#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
178#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
Greg Rose92915f72010-01-09 02:24:10 +0000179
180static void ixgbevf_tx_timeout(struct net_device *netdev);
181
182/**
183 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000184 * @q_vector: board private structure
Greg Rose92915f72010-01-09 02:24:10 +0000185 * @tx_ring: tx ring to clean
186 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000187static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
Greg Rose92915f72010-01-09 02:24:10 +0000188 struct ixgbevf_ring *tx_ring)
189{
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000190 struct ixgbevf_adapter *adapter = q_vector->adapter;
Greg Rose92915f72010-01-09 02:24:10 +0000191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
192 struct ixgbevf_tx_buffer *tx_buffer_info;
193 unsigned int i, eop, count = 0;
194 unsigned int total_bytes = 0, total_packets = 0;
195
Alexander Duyck10cc1bd2012-07-16 23:44:48 +0000196 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
197 return true;
198
Greg Rose92915f72010-01-09 02:24:10 +0000199 i = tx_ring->next_to_clean;
200 eop = tx_ring->tx_buffer_info[i].next_to_watch;
Alexander Duyck908421f2012-05-11 08:33:00 +0000201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
Greg Rose92915f72010-01-09 02:24:10 +0000202
203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000204 (count < tx_ring->count)) {
Greg Rose92915f72010-01-09 02:24:10 +0000205 bool cleaned = false;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +0000206 rmb(); /* read buffer_info after eop_desc */
Greg Rose98b9e482011-06-03 03:53:24 +0000207 /* eop could change between read and DD-check */
208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
209 goto cont_loop;
Greg Rose92915f72010-01-09 02:24:10 +0000210 for ( ; !cleaned; count++) {
211 struct sk_buff *skb;
Alexander Duyck908421f2012-05-11 08:33:00 +0000212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +0000213 tx_buffer_info = &tx_ring->tx_buffer_info[i];
214 cleaned = (i == eop);
215 skb = tx_buffer_info->skb;
216
217 if (cleaned && skb) {
218 unsigned int segs, bytecount;
219
220 /* gso_segs is currently only valid for tcp */
221 segs = skb_shinfo(skb)->gso_segs ?: 1;
222 /* multiply data chunks by size of headers */
223 bytecount = ((segs - 1) * skb_headlen(skb)) +
224 skb->len;
225 total_packets += segs;
226 total_bytes += bytecount;
227 }
228
Alexander Duyck70a10e22012-05-11 08:33:21 +0000229 ixgbevf_unmap_and_free_tx_resource(tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +0000230 tx_buffer_info);
231
232 tx_desc->wb.status = 0;
233
234 i++;
235 if (i == tx_ring->count)
236 i = 0;
237 }
238
Greg Rose98b9e482011-06-03 03:53:24 +0000239cont_loop:
Greg Rose92915f72010-01-09 02:24:10 +0000240 eop = tx_ring->tx_buffer_info[i].next_to_watch;
Alexander Duyck908421f2012-05-11 08:33:00 +0000241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
Greg Rose92915f72010-01-09 02:24:10 +0000242 }
243
244 tx_ring->next_to_clean = i;
245
246#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Alexander Duyckfb401952012-05-11 08:33:16 +0000247 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
Greg Rose92915f72010-01-09 02:24:10 +0000248 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
249 /* Make sure that anybody stopping the queue after this
250 * sees the new next_to_clean.
251 */
252 smp_mb();
Alexander Duyckfb401952012-05-11 08:33:16 +0000253 if (__netif_subqueue_stopped(tx_ring->netdev,
254 tx_ring->queue_index) &&
Greg Rose92915f72010-01-09 02:24:10 +0000255 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
Alexander Duyckfb401952012-05-11 08:33:16 +0000256 netif_wake_subqueue(tx_ring->netdev,
257 tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +0000258 ++adapter->restart_queue;
259 }
Greg Rose92915f72010-01-09 02:24:10 +0000260 }
261
Eric Dumazet4197aa72011-06-22 05:01:35 +0000262 u64_stats_update_begin(&tx_ring->syncp);
Greg Rose92915f72010-01-09 02:24:10 +0000263 tx_ring->total_bytes += total_bytes;
264 tx_ring->total_packets += total_packets;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000265 u64_stats_update_end(&tx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +0000266 q_vector->tx.total_bytes += total_bytes;
267 q_vector->tx.total_packets += total_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000268
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000269 return count < tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +0000270}
271
272/**
273 * ixgbevf_receive_skb - Send a completed packet up the stack
274 * @q_vector: structure containing interrupt and ring information
275 * @skb: packet to send up
276 * @status: hardware indication of status of receive
Greg Rose92915f72010-01-09 02:24:10 +0000277 * @rx_desc: rx descriptor
278 **/
279static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
280 struct sk_buff *skb, u8 status,
Greg Rose92915f72010-01-09 02:24:10 +0000281 union ixgbe_adv_rx_desc *rx_desc)
282{
283 struct ixgbevf_adapter *adapter = q_vector->adapter;
284 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
Greg Rosedd1ed3b2011-08-27 02:06:25 +0000285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Greg Rose92915f72010-01-09 02:24:10 +0000286
Pascal Bouchareine5d9a5332012-06-14 02:18:18 +0000287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
Jiri Pirkodadcd652011-07-21 03:25:09 +0000288 __vlan_hwaccel_put_tag(skb, tag);
Jiri Pirkodadcd652011-07-21 03:25:09 +0000289
Greg Rose366c1092012-11-13 04:03:18 +0000290 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
291 napi_gro_receive(&q_vector->napi, skb);
292 else
293 netif_rx(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000294}
295
296/**
297 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
298 * @adapter: address of board private structure
299 * @status_err: hardware indication of status of receive
300 * @skb: skb currently being received and modified
301 **/
302static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
Alexander Duyckfb401952012-05-11 08:33:16 +0000303 struct ixgbevf_ring *ring,
Greg Rose92915f72010-01-09 02:24:10 +0000304 u32 status_err, struct sk_buff *skb)
305{
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700306 skb_checksum_none_assert(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000307
308 /* Rx csum disabled */
Alexander Duyckfb401952012-05-11 08:33:16 +0000309 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Greg Rose92915f72010-01-09 02:24:10 +0000310 return;
311
312 /* if IP and error */
313 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
314 (status_err & IXGBE_RXDADV_ERR_IPE)) {
315 adapter->hw_csum_rx_error++;
316 return;
317 }
318
319 if (!(status_err & IXGBE_RXD_STAT_L4CS))
320 return;
321
322 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
323 adapter->hw_csum_rx_error++;
324 return;
325 }
326
327 /* It must be a TCP or UDP packet with a valid checksum */
328 skb->ip_summed = CHECKSUM_UNNECESSARY;
329 adapter->hw_csum_rx_good++;
330}
331
332/**
333 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
334 * @adapter: address of board private structure
335 **/
336static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
337 struct ixgbevf_ring *rx_ring,
338 int cleaned_count)
339{
340 struct pci_dev *pdev = adapter->pdev;
341 union ixgbe_adv_rx_desc *rx_desc;
342 struct ixgbevf_rx_buffer *bi;
Alexander Duyckfb401952012-05-11 08:33:16 +0000343 unsigned int i = rx_ring->next_to_use;
Greg Rose92915f72010-01-09 02:24:10 +0000344
Greg Rose92915f72010-01-09 02:24:10 +0000345 bi = &rx_ring->rx_buffer_info[i];
346
347 while (cleaned_count--) {
Alexander Duyck908421f2012-05-11 08:33:00 +0000348 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
Greg Roseb9dd2452012-11-02 05:50:21 +0000349
350 if (!bi->skb) {
351 struct sk_buff *skb;
352
Alexander Duyckfb401952012-05-11 08:33:16 +0000353 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
354 rx_ring->rx_buf_len);
Greg Rose92915f72010-01-09 02:24:10 +0000355 if (!skb) {
356 adapter->alloc_rx_buff_failed++;
357 goto no_buffers;
358 }
Greg Rose92915f72010-01-09 02:24:10 +0000359 bi->skb = skb;
Greg Roseb9dd2452012-11-02 05:50:21 +0000360
Nick Nunley2a1f8792010-04-27 13:10:50 +0000361 bi->dma = dma_map_single(&pdev->dev, skb->data,
Greg Rose92915f72010-01-09 02:24:10 +0000362 rx_ring->rx_buf_len,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000363 DMA_FROM_DEVICE);
Greg Rose6132ee82012-09-21 00:14:14 +0000364 if (dma_mapping_error(&pdev->dev, bi->dma)) {
365 dev_kfree_skb(skb);
366 bi->skb = NULL;
367 dev_err(&pdev->dev, "RX DMA map failed\n");
368 break;
369 }
Greg Rose92915f72010-01-09 02:24:10 +0000370 }
Alexander Duyck77d5dfc2012-05-11 08:32:19 +0000371 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
Greg Rose92915f72010-01-09 02:24:10 +0000372
373 i++;
374 if (i == rx_ring->count)
375 i = 0;
376 bi = &rx_ring->rx_buffer_info[i];
377 }
378
379no_buffers:
380 if (rx_ring->next_to_use != i) {
381 rx_ring->next_to_use = i;
Greg Rose92915f72010-01-09 02:24:10 +0000382 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
383 }
384}
385
386static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000387 u32 qmask)
Greg Rose92915f72010-01-09 02:24:10 +0000388{
Greg Rose92915f72010-01-09 02:24:10 +0000389 struct ixgbe_hw *hw = &adapter->hw;
390
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000391 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
Greg Rose92915f72010-01-09 02:24:10 +0000392}
393
Greg Rose92915f72010-01-09 02:24:10 +0000394static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
395 struct ixgbevf_ring *rx_ring,
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000396 int budget)
Greg Rose92915f72010-01-09 02:24:10 +0000397{
398 struct ixgbevf_adapter *adapter = q_vector->adapter;
399 struct pci_dev *pdev = adapter->pdev;
400 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
401 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
402 struct sk_buff *skb;
403 unsigned int i;
404 u32 len, staterr;
Greg Rose92915f72010-01-09 02:24:10 +0000405 int cleaned_count = 0;
406 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
407
408 i = rx_ring->next_to_clean;
Alexander Duyck908421f2012-05-11 08:33:00 +0000409 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +0000410 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
411 rx_buffer_info = &rx_ring->rx_buffer_info[i];
412
413 while (staterr & IXGBE_RXD_STAT_DD) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000414 if (!budget)
Greg Rose92915f72010-01-09 02:24:10 +0000415 break;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000416 budget--;
Greg Rose92915f72010-01-09 02:24:10 +0000417
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +0000418 rmb(); /* read descriptor and rx_buffer_info after status DD */
Alexander Duyck77d5dfc2012-05-11 08:32:19 +0000419 len = le16_to_cpu(rx_desc->wb.upper.length);
Greg Rose92915f72010-01-09 02:24:10 +0000420 skb = rx_buffer_info->skb;
421 prefetch(skb->data - NET_IP_ALIGN);
422 rx_buffer_info->skb = NULL;
423
424 if (rx_buffer_info->dma) {
Nick Nunley2a1f8792010-04-27 13:10:50 +0000425 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
Greg Rose92915f72010-01-09 02:24:10 +0000426 rx_ring->rx_buf_len,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000427 DMA_FROM_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000428 rx_buffer_info->dma = 0;
429 skb_put(skb, len);
430 }
431
Greg Rose92915f72010-01-09 02:24:10 +0000432 i++;
433 if (i == rx_ring->count)
434 i = 0;
435
Alexander Duyck908421f2012-05-11 08:33:00 +0000436 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +0000437 prefetch(next_rxd);
438 cleaned_count++;
439
440 next_buffer = &rx_ring->rx_buffer_info[i];
441
442 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
Alexander Duyck77d5dfc2012-05-11 08:32:19 +0000443 skb->next = next_buffer->skb;
Alexander Duyck5c60f812012-09-01 05:12:38 +0000444 IXGBE_CB(skb->next)->prev = skb;
Greg Rose92915f72010-01-09 02:24:10 +0000445 adapter->non_eop_descs++;
446 goto next_desc;
447 }
448
Alexander Duyck5c60f812012-09-01 05:12:38 +0000449 /* we should not be chaining buffers, if we did drop the skb */
450 if (IXGBE_CB(skb)->prev) {
451 do {
452 struct sk_buff *this = skb;
453 skb = IXGBE_CB(skb)->prev;
454 dev_kfree_skb(this);
455 } while (skb);
456 goto next_desc;
457 }
458
Greg Rose92915f72010-01-09 02:24:10 +0000459 /* ERR_MASK will only have valid bits if EOP set */
460 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
461 dev_kfree_skb_irq(skb);
462 goto next_desc;
463 }
464
Alexander Duyckfb401952012-05-11 08:33:16 +0000465 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000466
467 /* probably a little skewed due to removing CRC */
468 total_rx_bytes += skb->len;
469 total_rx_packets++;
470
471 /*
472 * Work around issue of some types of VM to VM loop back
473 * packets not getting split correctly
474 */
475 if (staterr & IXGBE_RXD_STAT_LB) {
Eric Dumazete743d312010-04-14 15:59:40 -0700476 u32 header_fixup_len = skb_headlen(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000477 if (header_fixup_len < 14)
478 skb_push(skb, header_fixup_len);
479 }
Alexander Duyckfb401952012-05-11 08:33:16 +0000480 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Greg Rose92915f72010-01-09 02:24:10 +0000481
John Fastabend815cccb2012-10-24 08:13:09 +0000482 /* Workaround hardware that can't do proper VEPA multicast
483 * source pruning.
484 */
485 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
486 !(compare_ether_addr(adapter->netdev->dev_addr,
487 eth_hdr(skb)->h_source))) {
488 dev_kfree_skb_irq(skb);
489 goto next_desc;
490 }
491
Narendra Kb3d58a82012-08-14 00:00:14 +0000492 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
Greg Rose92915f72010-01-09 02:24:10 +0000493
494next_desc:
495 rx_desc->wb.upper.status_error = 0;
496
497 /* return some buffers to hardware, one at a time is too slow */
498 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
499 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
500 cleaned_count);
501 cleaned_count = 0;
502 }
503
504 /* use prefetched values */
505 rx_desc = next_rxd;
506 rx_buffer_info = &rx_ring->rx_buffer_info[i];
507
508 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
509 }
510
511 rx_ring->next_to_clean = i;
512 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
513
514 if (cleaned_count)
515 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
516
Eric Dumazet4197aa72011-06-22 05:01:35 +0000517 u64_stats_update_begin(&rx_ring->syncp);
Greg Rose92915f72010-01-09 02:24:10 +0000518 rx_ring->total_packets += total_rx_packets;
519 rx_ring->total_bytes += total_rx_bytes;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000520 u64_stats_update_end(&rx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +0000521 q_vector->rx.total_packets += total_rx_packets;
522 q_vector->rx.total_bytes += total_rx_bytes;
Greg Rose92915f72010-01-09 02:24:10 +0000523
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000524 return !!budget;
Greg Rose92915f72010-01-09 02:24:10 +0000525}
526
527/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000528 * ixgbevf_poll - NAPI polling calback
Greg Rose92915f72010-01-09 02:24:10 +0000529 * @napi: napi struct with our devices info in it
530 * @budget: amount of work driver is allowed to do this pass, in packets
531 *
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000532 * This function will clean more than one or more rings associated with a
Greg Rose92915f72010-01-09 02:24:10 +0000533 * q_vector.
534 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000535static int ixgbevf_poll(struct napi_struct *napi, int budget)
Greg Rose92915f72010-01-09 02:24:10 +0000536{
537 struct ixgbevf_q_vector *q_vector =
538 container_of(napi, struct ixgbevf_q_vector, napi);
539 struct ixgbevf_adapter *adapter = q_vector->adapter;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000540 struct ixgbevf_ring *ring;
541 int per_ring_budget;
542 bool clean_complete = true;
543
544 ixgbevf_for_each_ring(ring, q_vector->tx)
545 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
Greg Rose92915f72010-01-09 02:24:10 +0000546
547 /* attempt to distribute budget to each queue fairly, but don't allow
548 * the budget to go below 1 because we'll exit polling */
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000549 if (q_vector->rx.count > 1)
550 per_ring_budget = max(budget/q_vector->rx.count, 1);
551 else
552 per_ring_budget = budget;
Greg Rose92915f72010-01-09 02:24:10 +0000553
Greg Rose366c1092012-11-13 04:03:18 +0000554 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000555 ixgbevf_for_each_ring(ring, q_vector->rx)
556 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
557 per_ring_budget);
Greg Rose366c1092012-11-13 04:03:18 +0000558 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
Greg Rose92915f72010-01-09 02:24:10 +0000559
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000560 /* If all work not completed, return budget and keep polling */
561 if (!clean_complete)
562 return budget;
563 /* all work done, exit the polling mode */
564 napi_complete(napi);
565 if (adapter->rx_itr_setting & 1)
566 ixgbevf_set_itr(q_vector);
567 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
568 ixgbevf_irq_enable_queues(adapter,
569 1 << q_vector->v_idx);
Greg Rose92915f72010-01-09 02:24:10 +0000570
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000571 return 0;
Greg Rose92915f72010-01-09 02:24:10 +0000572}
573
Greg Rosece422602012-05-22 02:17:49 +0000574/**
575 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
576 * @q_vector: structure containing interrupt and ring information
577 */
578static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
579{
580 struct ixgbevf_adapter *adapter = q_vector->adapter;
581 struct ixgbe_hw *hw = &adapter->hw;
582 int v_idx = q_vector->v_idx;
583 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
584
585 /*
586 * set the WDIS bit to not clear the timer bits and cause an
587 * immediate assertion of the interrupt
588 */
589 itr_reg |= IXGBE_EITR_CNT_WDIS;
590
591 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
592}
Greg Rose92915f72010-01-09 02:24:10 +0000593
594/**
595 * ixgbevf_configure_msix - Configure MSI-X hardware
596 * @adapter: board private structure
597 *
598 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
599 * interrupts.
600 **/
601static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
602{
603 struct ixgbevf_q_vector *q_vector;
Alexander Duyck6b43c442012-05-11 08:32:45 +0000604 int q_vectors, v_idx;
Greg Rose92915f72010-01-09 02:24:10 +0000605
606 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000607 adapter->eims_enable_mask = 0;
Greg Rose92915f72010-01-09 02:24:10 +0000608
609 /*
610 * Populate the IVAR table and set the ITR values to the
611 * corresponding register.
612 */
613 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck6b43c442012-05-11 08:32:45 +0000614 struct ixgbevf_ring *ring;
Greg Rose92915f72010-01-09 02:24:10 +0000615 q_vector = adapter->q_vector[v_idx];
Greg Rose92915f72010-01-09 02:24:10 +0000616
Alexander Duyck6b43c442012-05-11 08:32:45 +0000617 ixgbevf_for_each_ring(ring, q_vector->rx)
618 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +0000619
Alexander Duyck6b43c442012-05-11 08:32:45 +0000620 ixgbevf_for_each_ring(ring, q_vector->tx)
621 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +0000622
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000623 if (q_vector->tx.ring && !q_vector->rx.ring) {
624 /* tx only vector */
625 if (adapter->tx_itr_setting == 1)
626 q_vector->itr = IXGBE_10K_ITR;
627 else
628 q_vector->itr = adapter->tx_itr_setting;
629 } else {
630 /* rx or rx/tx vector */
631 if (adapter->rx_itr_setting == 1)
632 q_vector->itr = IXGBE_20K_ITR;
633 else
634 q_vector->itr = adapter->rx_itr_setting;
635 }
Greg Rose92915f72010-01-09 02:24:10 +0000636
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000637 /* add q_vector eims value to global eims_enable_mask */
638 adapter->eims_enable_mask |= 1 << v_idx;
639
640 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +0000641 }
642
643 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000644 /* setup eims_other and add value to global eims_enable_mask */
645 adapter->eims_other = 1 << v_idx;
646 adapter->eims_enable_mask |= adapter->eims_other;
Greg Rose92915f72010-01-09 02:24:10 +0000647}
648
649enum latency_range {
650 lowest_latency = 0,
651 low_latency = 1,
652 bulk_latency = 2,
653 latency_invalid = 255
654};
655
656/**
657 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000658 * @q_vector: structure containing interrupt and ring information
659 * @ring_container: structure containing ring performance data
Greg Rose92915f72010-01-09 02:24:10 +0000660 *
661 * Stores a new ITR value based on packets and byte
662 * counts during the last interrupt. The advantage of per interrupt
663 * computation is faster updates and more accurate ITR for the current
664 * traffic pattern. Constants in this function were computed
665 * based on theoretical maximum wire speed and thresholds were set based
666 * on testing data as well as attempting to minimize response time
667 * while increasing bulk throughput.
668 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000669static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
670 struct ixgbevf_ring_container *ring_container)
Greg Rose92915f72010-01-09 02:24:10 +0000671{
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000672 int bytes = ring_container->total_bytes;
673 int packets = ring_container->total_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000674 u32 timepassed_us;
675 u64 bytes_perint;
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000676 u8 itr_setting = ring_container->itr;
Greg Rose92915f72010-01-09 02:24:10 +0000677
678 if (packets == 0)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000679 return;
Greg Rose92915f72010-01-09 02:24:10 +0000680
681 /* simple throttlerate management
682 * 0-20MB/s lowest (100000 ints/s)
683 * 20-100MB/s low (20000 ints/s)
684 * 100-1249MB/s bulk (8000 ints/s)
685 */
686 /* what was last interrupt timeslice? */
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000687 timepassed_us = q_vector->itr >> 2;
Greg Rose92915f72010-01-09 02:24:10 +0000688 bytes_perint = bytes / timepassed_us; /* bytes/usec */
689
690 switch (itr_setting) {
691 case lowest_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000692 if (bytes_perint > 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000693 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +0000694 break;
695 case low_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000696 if (bytes_perint > 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000697 itr_setting = bulk_latency;
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000698 else if (bytes_perint <= 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000699 itr_setting = lowest_latency;
Greg Rose92915f72010-01-09 02:24:10 +0000700 break;
701 case bulk_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000702 if (bytes_perint <= 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000703 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +0000704 break;
705 }
706
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000707 /* clear work counters since we have the values we need */
708 ring_container->total_bytes = 0;
709 ring_container->total_packets = 0;
710
711 /* write updated itr to ring container */
712 ring_container->itr = itr_setting;
Greg Rose92915f72010-01-09 02:24:10 +0000713}
714
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000715static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
Greg Rose92915f72010-01-09 02:24:10 +0000716{
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000717 u32 new_itr = q_vector->itr;
718 u8 current_itr;
Greg Rose92915f72010-01-09 02:24:10 +0000719
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000720 ixgbevf_update_itr(q_vector, &q_vector->tx);
721 ixgbevf_update_itr(q_vector, &q_vector->rx);
Greg Rose92915f72010-01-09 02:24:10 +0000722
Alexander Duyck6b43c442012-05-11 08:32:45 +0000723 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Greg Rose92915f72010-01-09 02:24:10 +0000724
725 switch (current_itr) {
726 /* counts and packets in update_itr are dependent on these numbers */
727 case lowest_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000728 new_itr = IXGBE_100K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +0000729 break;
730 case low_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000731 new_itr = IXGBE_20K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +0000732 break;
733 case bulk_latency:
734 default:
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000735 new_itr = IXGBE_8K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +0000736 break;
737 }
738
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000739 if (new_itr != q_vector->itr) {
Greg Rose92915f72010-01-09 02:24:10 +0000740 /* do an exponential smoothing */
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000741 new_itr = (10 * new_itr * q_vector->itr) /
742 ((9 * new_itr) + q_vector->itr);
743
744 /* save the algorithm value here */
745 q_vector->itr = new_itr;
746
747 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +0000748 }
Greg Rose92915f72010-01-09 02:24:10 +0000749}
750
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000751static irqreturn_t ixgbevf_msix_other(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +0000752{
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000753 struct ixgbevf_adapter *adapter = data;
Greg Rose92915f72010-01-09 02:24:10 +0000754 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +0000755
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000756 hw->mac.get_link_status = 1;
Greg Rose375b27c2012-01-18 22:13:31 +0000757
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000758 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
759 mod_timer(&adapter->watchdog_timer, jiffies);
Greg Rose3a2c4032012-02-01 01:28:15 +0000760
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000761 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
762
Greg Rose92915f72010-01-09 02:24:10 +0000763 return IRQ_HANDLED;
764}
765
Greg Rose92915f72010-01-09 02:24:10 +0000766/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000767 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
Greg Rose92915f72010-01-09 02:24:10 +0000768 * @irq: unused
769 * @data: pointer to our q_vector struct for this interrupt vector
770 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000771static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +0000772{
773 struct ixgbevf_q_vector *q_vector = data;
Greg Rose92915f72010-01-09 02:24:10 +0000774
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000775 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000776 if (q_vector->rx.ring || q_vector->tx.ring)
777 napi_schedule(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +0000778
779 return IRQ_HANDLED;
780}
781
782static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
783 int r_idx)
784{
785 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
786
Alexander Duyck6b43c442012-05-11 08:32:45 +0000787 a->rx_ring[r_idx].next = q_vector->rx.ring;
788 q_vector->rx.ring = &a->rx_ring[r_idx];
789 q_vector->rx.count++;
Greg Rose92915f72010-01-09 02:24:10 +0000790}
791
792static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
793 int t_idx)
794{
795 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
796
Alexander Duyck6b43c442012-05-11 08:32:45 +0000797 a->tx_ring[t_idx].next = q_vector->tx.ring;
798 q_vector->tx.ring = &a->tx_ring[t_idx];
799 q_vector->tx.count++;
Greg Rose92915f72010-01-09 02:24:10 +0000800}
801
802/**
803 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
804 * @adapter: board private structure to initialize
805 *
806 * This function maps descriptor rings to the queue-specific vectors
807 * we were allotted through the MSI-X enabling code. Ideally, we'd have
808 * one vector per ring/queue, but on a constrained vector budget, we
809 * group the rings as "efficiently" as possible. You would add new
810 * mapping configurations in here.
811 **/
812static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
813{
814 int q_vectors;
815 int v_start = 0;
816 int rxr_idx = 0, txr_idx = 0;
817 int rxr_remaining = adapter->num_rx_queues;
818 int txr_remaining = adapter->num_tx_queues;
819 int i, j;
820 int rqpv, tqpv;
821 int err = 0;
822
823 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
824
825 /*
826 * The ideal configuration...
827 * We have enough vectors to map one per queue.
828 */
829 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
830 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
831 map_vector_to_rxq(adapter, v_start, rxr_idx);
832
833 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
834 map_vector_to_txq(adapter, v_start, txr_idx);
835 goto out;
836 }
837
838 /*
839 * If we don't have enough vectors for a 1-to-1
840 * mapping, we'll have to group them so there are
841 * multiple queues per vector.
842 */
843 /* Re-adjusting *qpv takes care of the remainder. */
844 for (i = v_start; i < q_vectors; i++) {
845 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
846 for (j = 0; j < rqpv; j++) {
847 map_vector_to_rxq(adapter, i, rxr_idx);
848 rxr_idx++;
849 rxr_remaining--;
850 }
851 }
852 for (i = v_start; i < q_vectors; i++) {
853 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
854 for (j = 0; j < tqpv; j++) {
855 map_vector_to_txq(adapter, i, txr_idx);
856 txr_idx++;
857 txr_remaining--;
858 }
859 }
860
861out:
862 return err;
863}
864
865/**
866 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
867 * @adapter: board private structure
868 *
869 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
870 * interrupts from the kernel.
871 **/
872static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
873{
874 struct net_device *netdev = adapter->netdev;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000875 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
876 int vector, err;
Greg Rose92915f72010-01-09 02:24:10 +0000877 int ri = 0, ti = 0;
878
Greg Rose92915f72010-01-09 02:24:10 +0000879 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000880 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
881 struct msix_entry *entry = &adapter->msix_entries[vector];
Greg Rose92915f72010-01-09 02:24:10 +0000882
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000883 if (q_vector->tx.ring && q_vector->rx.ring) {
884 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
885 "%s-%s-%d", netdev->name, "TxRx", ri++);
886 ti++;
887 } else if (q_vector->rx.ring) {
888 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
889 "%s-%s-%d", netdev->name, "rx", ri++);
890 } else if (q_vector->tx.ring) {
891 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
892 "%s-%s-%d", netdev->name, "tx", ti++);
Greg Rose92915f72010-01-09 02:24:10 +0000893 } else {
894 /* skip this unused q_vector */
895 continue;
896 }
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000897 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
898 q_vector->name, q_vector);
Greg Rose92915f72010-01-09 02:24:10 +0000899 if (err) {
900 hw_dbg(&adapter->hw,
901 "request_irq failed for MSIX interrupt "
902 "Error: %d\n", err);
903 goto free_queue_irqs;
904 }
905 }
906
Greg Rose92915f72010-01-09 02:24:10 +0000907 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000908 &ixgbevf_msix_other, 0, netdev->name, adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000909 if (err) {
910 hw_dbg(&adapter->hw,
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000911 "request_irq for msix_other failed: %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +0000912 goto free_queue_irqs;
913 }
914
915 return 0;
916
917free_queue_irqs:
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000918 while (vector) {
919 vector--;
920 free_irq(adapter->msix_entries[vector].vector,
921 adapter->q_vector[vector]);
922 }
Greg Rose92915f72010-01-09 02:24:10 +0000923 pci_disable_msix(adapter->pdev);
924 kfree(adapter->msix_entries);
925 adapter->msix_entries = NULL;
926 return err;
927}
928
929static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
930{
931 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
932
933 for (i = 0; i < q_vectors; i++) {
934 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
Alexander Duyck6b43c442012-05-11 08:32:45 +0000935 q_vector->rx.ring = NULL;
936 q_vector->tx.ring = NULL;
937 q_vector->rx.count = 0;
938 q_vector->tx.count = 0;
Greg Rose92915f72010-01-09 02:24:10 +0000939 }
940}
941
942/**
943 * ixgbevf_request_irq - initialize interrupts
944 * @adapter: board private structure
945 *
946 * Attempts to configure interrupts using the best available
947 * capabilities of the hardware and kernel.
948 **/
949static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
950{
951 int err = 0;
952
953 err = ixgbevf_request_msix_irqs(adapter);
954
955 if (err)
956 hw_dbg(&adapter->hw,
957 "request_irq failed, Error %d\n", err);
958
959 return err;
960}
961
962static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
963{
Greg Rose92915f72010-01-09 02:24:10 +0000964 int i, q_vectors;
965
966 q_vectors = adapter->num_msix_vectors;
Greg Rose92915f72010-01-09 02:24:10 +0000967 i = q_vectors - 1;
968
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000969 free_irq(adapter->msix_entries[i].vector, adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000970 i--;
971
972 for (; i >= 0; i--) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000973 /* free only the irqs that were actually requested */
974 if (!adapter->q_vector[i]->rx.ring &&
975 !adapter->q_vector[i]->tx.ring)
976 continue;
977
Greg Rose92915f72010-01-09 02:24:10 +0000978 free_irq(adapter->msix_entries[i].vector,
979 adapter->q_vector[i]);
980 }
981
982 ixgbevf_reset_q_vectors(adapter);
983}
984
985/**
986 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
987 * @adapter: board private structure
988 **/
989static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
990{
Greg Rose92915f72010-01-09 02:24:10 +0000991 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000992 int i;
Greg Rose92915f72010-01-09 02:24:10 +0000993
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000994 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
Greg Rose92915f72010-01-09 02:24:10 +0000995 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000996 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
Greg Rose92915f72010-01-09 02:24:10 +0000997
998 IXGBE_WRITE_FLUSH(hw);
999
1000 for (i = 0; i < adapter->num_msix_vectors; i++)
1001 synchronize_irq(adapter->msix_entries[i].vector);
1002}
1003
1004/**
1005 * ixgbevf_irq_enable - Enable default interrupt generation settings
1006 * @adapter: board private structure
1007 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001008static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001009{
1010 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001011
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001012 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1013 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1014 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
Greg Rose92915f72010-01-09 02:24:10 +00001015}
1016
1017/**
1018 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1019 * @adapter: board private structure
1020 *
1021 * Configure the Tx unit of the MAC after a reset.
1022 **/
1023static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1024{
1025 u64 tdba;
1026 struct ixgbe_hw *hw = &adapter->hw;
1027 u32 i, j, tdlen, txctrl;
1028
1029 /* Setup the HW Tx Head and Tail descriptor pointers */
1030 for (i = 0; i < adapter->num_tx_queues; i++) {
1031 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1032 j = ring->reg_idx;
1033 tdba = ring->dma;
1034 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1035 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1036 (tdba & DMA_BIT_MASK(32)));
1037 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1038 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1039 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1040 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1041 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1042 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1043 /* Disable Tx Head Writeback RO bit, since this hoses
1044 * bookkeeping if things aren't delivered in order.
1045 */
1046 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1047 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1048 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1049 }
1050}
1051
1052#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1053
1054static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1055{
1056 struct ixgbevf_ring *rx_ring;
1057 struct ixgbe_hw *hw = &adapter->hw;
1058 u32 srrctl;
1059
1060 rx_ring = &adapter->rx_ring[index];
1061
1062 srrctl = IXGBE_SRRCTL_DROP_EN;
1063
Alexander Duyck77d5dfc2012-05-11 08:32:19 +00001064 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Greg Rose92915f72010-01-09 02:24:10 +00001065
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001066 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1067 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1068
Greg Rose92915f72010-01-09 02:24:10 +00001069 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1070}
1071
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001072static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1073{
1074 struct ixgbe_hw *hw = &adapter->hw;
1075 struct net_device *netdev = adapter->netdev;
1076 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1077 int i;
1078 u16 rx_buf_len;
1079
1080 /* notify the PF of our intent to use this size of frame */
1081 ixgbevf_rlpml_set_vf(hw, max_frame);
1082
1083 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1084 max_frame += VLAN_HLEN;
1085
1086 /*
Greg Rose85624ca2012-11-13 04:03:19 +00001087 * Allocate buffer sizes that fit well into 32K and
1088 * take into account max frame size of 9.5K
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001089 */
1090 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1091 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1092 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Greg Rose85624ca2012-11-13 04:03:19 +00001093 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1094 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1095 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1096 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1097 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1098 rx_buf_len = IXGBEVF_RXBUFFER_8K;
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001099 else
Greg Rose85624ca2012-11-13 04:03:19 +00001100 rx_buf_len = IXGBEVF_RXBUFFER_10K;
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001101
1102 for (i = 0; i < adapter->num_rx_queues; i++)
1103 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1104}
1105
Greg Rose92915f72010-01-09 02:24:10 +00001106/**
1107 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1108 * @adapter: board private structure
1109 *
1110 * Configure the Rx unit of the MAC after a reset.
1111 **/
1112static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1113{
1114 u64 rdba;
1115 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001116 int i, j;
1117 u32 rdlen;
Greg Rose92915f72010-01-09 02:24:10 +00001118
Alexander Duyck77d5dfc2012-05-11 08:32:19 +00001119 /* PSRTYPE must be initialized in 82599 */
1120 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001121
1122 /* set_rx_buffer_len must be called before ring initialization */
1123 ixgbevf_set_rx_buffer_len(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001124
1125 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1126 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1127 * the Base and Length of the Rx Descriptor Ring */
1128 for (i = 0; i < adapter->num_rx_queues; i++) {
1129 rdba = adapter->rx_ring[i].dma;
1130 j = adapter->rx_ring[i].reg_idx;
1131 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1132 (rdba & DMA_BIT_MASK(32)));
1133 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1134 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1135 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1136 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1137 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1138 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
Greg Rose92915f72010-01-09 02:24:10 +00001139
1140 ixgbevf_configure_srrctl(adapter, j);
1141 }
1142}
1143
Jiri Pirko8e586132011-12-08 19:52:37 -05001144static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001145{
1146 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1147 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001148 int err;
1149
John Fastabend55fdd45b2012-10-01 14:52:20 +00001150 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001151
Greg Rose92915f72010-01-09 02:24:10 +00001152 /* add VID to filter table */
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001153 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001154
John Fastabend55fdd45b2012-10-01 14:52:20 +00001155 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001156
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001157 /* translate error return types so error makes sense */
1158 if (err == IXGBE_ERR_MBX)
1159 return -EIO;
1160
1161 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1162 return -EACCES;
1163
Jiri Pirkodadcd652011-07-21 03:25:09 +00001164 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001165
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001166 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001167}
1168
Jiri Pirko8e586132011-12-08 19:52:37 -05001169static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001170{
1171 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1172 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001173 int err = -EOPNOTSUPP;
Greg Rose92915f72010-01-09 02:24:10 +00001174
John Fastabend55fdd45b2012-10-01 14:52:20 +00001175 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001176
Greg Rose92915f72010-01-09 02:24:10 +00001177 /* remove VID from filter table */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001178 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001179
John Fastabend55fdd45b2012-10-01 14:52:20 +00001180 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001181
Jiri Pirkodadcd652011-07-21 03:25:09 +00001182 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001183
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001184 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001185}
1186
1187static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1188{
Jiri Pirkodadcd652011-07-21 03:25:09 +00001189 u16 vid;
Greg Rose92915f72010-01-09 02:24:10 +00001190
Jiri Pirkodadcd652011-07-21 03:25:09 +00001191 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1192 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
Greg Rose92915f72010-01-09 02:24:10 +00001193}
1194
Greg Rose46ec20f2011-05-13 01:33:42 +00001195static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1196{
1197 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1198 struct ixgbe_hw *hw = &adapter->hw;
1199 int count = 0;
1200
1201 if ((netdev_uc_count(netdev)) > 10) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00001202 pr_err("Too many unicast filters - No Space\n");
Greg Rose46ec20f2011-05-13 01:33:42 +00001203 return -ENOSPC;
1204 }
1205
1206 if (!netdev_uc_empty(netdev)) {
1207 struct netdev_hw_addr *ha;
1208 netdev_for_each_uc_addr(ha, netdev) {
1209 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1210 udelay(200);
1211 }
1212 } else {
1213 /*
1214 * If the list is empty then send message to PF driver to
1215 * clear all macvlans on this VF.
1216 */
1217 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1218 }
1219
1220 return count;
1221}
1222
Greg Rose92915f72010-01-09 02:24:10 +00001223/**
Greg Rosedee847f2012-11-02 05:50:57 +00001224 * ixgbevf_set_rx_mode - Multicast and unicast set
Greg Rose92915f72010-01-09 02:24:10 +00001225 * @netdev: network interface device structure
1226 *
1227 * The set_rx_method entry point is called whenever the multicast address
Greg Rosedee847f2012-11-02 05:50:57 +00001228 * list, unicast address list or the network interface flags are updated.
1229 * This routine is responsible for configuring the hardware for proper
1230 * multicast mode and configuring requested unicast filters.
Greg Rose92915f72010-01-09 02:24:10 +00001231 **/
1232static void ixgbevf_set_rx_mode(struct net_device *netdev)
1233{
1234 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1235 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001236
John Fastabend55fdd45b2012-10-01 14:52:20 +00001237 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001238
Greg Rose92915f72010-01-09 02:24:10 +00001239 /* reprogram multicast list */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001240 hw->mac.ops.update_mc_addr_list(hw, netdev);
Greg Rose46ec20f2011-05-13 01:33:42 +00001241
1242 ixgbevf_write_uc_addr_list(netdev);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001243
John Fastabend55fdd45b2012-10-01 14:52:20 +00001244 spin_unlock_bh(&adapter->mbx_lock);
Greg Rose92915f72010-01-09 02:24:10 +00001245}
1246
1247static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1248{
1249 int q_idx;
1250 struct ixgbevf_q_vector *q_vector;
1251 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1252
1253 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Greg Rose92915f72010-01-09 02:24:10 +00001254 q_vector = adapter->q_vector[q_idx];
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001255 napi_enable(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001256 }
1257}
1258
1259static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1260{
1261 int q_idx;
1262 struct ixgbevf_q_vector *q_vector;
1263 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1264
1265 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1266 q_vector = adapter->q_vector[q_idx];
Greg Rose92915f72010-01-09 02:24:10 +00001267 napi_disable(&q_vector->napi);
1268 }
1269}
1270
1271static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1272{
1273 struct net_device *netdev = adapter->netdev;
1274 int i;
1275
1276 ixgbevf_set_rx_mode(netdev);
1277
1278 ixgbevf_restore_vlan(adapter);
1279
1280 ixgbevf_configure_tx(adapter);
1281 ixgbevf_configure_rx(adapter);
1282 for (i = 0; i < adapter->num_rx_queues; i++) {
1283 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
Alexander Duyck18c63082012-05-11 08:33:11 +00001284 ixgbevf_alloc_rx_buffers(adapter, ring,
1285 IXGBE_DESC_UNUSED(ring));
Greg Rose92915f72010-01-09 02:24:10 +00001286 }
1287}
1288
1289#define IXGBE_MAX_RX_DESC_POLL 10
1290static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1291 int rxr)
1292{
1293 struct ixgbe_hw *hw = &adapter->hw;
1294 int j = adapter->rx_ring[rxr].reg_idx;
1295 int k;
1296
1297 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1298 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1299 break;
1300 else
1301 msleep(1);
1302 }
1303 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1304 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1305 "not set within the polling period\n", rxr);
1306 }
1307
Greg Rose6259a012012-11-02 05:50:26 +00001308 ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
1309 adapter->rx_ring[rxr].count - 1);
Greg Rose92915f72010-01-09 02:24:10 +00001310}
1311
Greg Rose33bd9f62010-03-19 02:59:52 +00001312static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1313{
1314 /* Only save pre-reset stats if there are some */
1315 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1316 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1317 adapter->stats.base_vfgprc;
1318 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1319 adapter->stats.base_vfgptc;
1320 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1321 adapter->stats.base_vfgorc;
1322 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1323 adapter->stats.base_vfgotc;
1324 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1325 adapter->stats.base_vfmprc;
1326 }
1327}
1328
1329static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1330{
1331 struct ixgbe_hw *hw = &adapter->hw;
1332
1333 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1334 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1335 adapter->stats.last_vfgorc |=
1336 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1337 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1338 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1339 adapter->stats.last_vfgotc |=
1340 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1341 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1342
1343 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1344 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1345 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1346 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1347 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1348}
1349
Alexander Duyck31186782012-07-20 08:09:58 +00001350static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1351{
1352 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck56e94092012-07-20 08:10:03 +00001353 int api[] = { ixgbe_mbox_api_11,
1354 ixgbe_mbox_api_10,
Alexander Duyck31186782012-07-20 08:09:58 +00001355 ixgbe_mbox_api_unknown };
1356 int err = 0, idx = 0;
1357
John Fastabend55fdd45b2012-10-01 14:52:20 +00001358 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00001359
1360 while (api[idx] != ixgbe_mbox_api_unknown) {
1361 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1362 if (!err)
1363 break;
1364 idx++;
1365 }
1366
John Fastabend55fdd45b2012-10-01 14:52:20 +00001367 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00001368}
1369
Greg Rose795180d2012-04-17 04:29:34 +00001370static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001371{
1372 struct net_device *netdev = adapter->netdev;
1373 struct ixgbe_hw *hw = &adapter->hw;
1374 int i, j = 0;
1375 int num_rx_rings = adapter->num_rx_queues;
1376 u32 txdctl, rxdctl;
1377
1378 for (i = 0; i < adapter->num_tx_queues; i++) {
1379 j = adapter->tx_ring[i].reg_idx;
1380 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1381 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1382 txdctl |= (8 << 16);
1383 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1384 }
1385
1386 for (i = 0; i < adapter->num_tx_queues; i++) {
1387 j = adapter->tx_ring[i].reg_idx;
1388 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1389 txdctl |= IXGBE_TXDCTL_ENABLE;
1390 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1391 }
1392
1393 for (i = 0; i < num_rx_rings; i++) {
1394 j = adapter->rx_ring[i].reg_idx;
1395 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
Jiri Pirkodadcd652011-07-21 03:25:09 +00001396 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
Greg Rose69bfbec2011-01-26 01:06:12 +00001397 if (hw->mac.type == ixgbe_mac_X540_vf) {
1398 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1399 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1400 IXGBE_RXDCTL_RLPML_EN);
1401 }
Greg Rose92915f72010-01-09 02:24:10 +00001402 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1403 ixgbevf_rx_desc_queue_enable(adapter, i);
1404 }
1405
1406 ixgbevf_configure_msix(adapter);
1407
John Fastabend55fdd45b2012-10-01 14:52:20 +00001408 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001409
Greg Rose92fe0bf2012-11-02 05:50:47 +00001410 if (is_valid_ether_addr(hw->mac.addr))
1411 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1412 else
1413 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001414
John Fastabend55fdd45b2012-10-01 14:52:20 +00001415 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001416
Greg Rose92915f72010-01-09 02:24:10 +00001417 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1418 ixgbevf_napi_enable_all(adapter);
1419
1420 /* enable transmits */
1421 netif_tx_start_all_queues(netdev);
1422
Greg Rose33bd9f62010-03-19 02:59:52 +00001423 ixgbevf_save_reset_stats(adapter);
1424 ixgbevf_init_last_counter_stats(adapter);
1425
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001426 hw->mac.get_link_status = 1;
Greg Rose92915f72010-01-09 02:24:10 +00001427 mod_timer(&adapter->watchdog_timer, jiffies);
Greg Rose92915f72010-01-09 02:24:10 +00001428}
1429
Alexander Duyck56e94092012-07-20 08:10:03 +00001430static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1431{
1432 struct ixgbe_hw *hw = &adapter->hw;
1433 struct ixgbevf_ring *rx_ring;
1434 unsigned int def_q = 0;
1435 unsigned int num_tcs = 0;
1436 unsigned int num_rx_queues = 1;
1437 int err, i;
1438
John Fastabend55fdd45b2012-10-01 14:52:20 +00001439 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck56e94092012-07-20 08:10:03 +00001440
1441 /* fetch queue configuration from the PF */
1442 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1443
John Fastabend55fdd45b2012-10-01 14:52:20 +00001444 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck56e94092012-07-20 08:10:03 +00001445
1446 if (err)
1447 return err;
1448
1449 if (num_tcs > 1) {
1450 /* update default Tx ring register index */
1451 adapter->tx_ring[0].reg_idx = def_q;
1452
1453 /* we need as many queues as traffic classes */
1454 num_rx_queues = num_tcs;
1455 }
1456
1457 /* nothing to do if we have the correct number of queues */
1458 if (adapter->num_rx_queues == num_rx_queues)
1459 return 0;
1460
1461 /* allocate new rings */
1462 rx_ring = kcalloc(num_rx_queues,
1463 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1464 if (!rx_ring)
1465 return -ENOMEM;
1466
1467 /* setup ring fields */
1468 for (i = 0; i < num_rx_queues; i++) {
1469 rx_ring[i].count = adapter->rx_ring_count;
1470 rx_ring[i].queue_index = i;
1471 rx_ring[i].reg_idx = i;
1472 rx_ring[i].dev = &adapter->pdev->dev;
1473 rx_ring[i].netdev = adapter->netdev;
1474
1475 /* allocate resources on the ring */
1476 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1477 if (err) {
1478 while (i) {
1479 i--;
1480 ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1481 }
1482 kfree(rx_ring);
1483 return err;
1484 }
1485 }
1486
1487 /* free the existing rings and queues */
1488 ixgbevf_free_all_rx_resources(adapter);
1489 adapter->num_rx_queues = 0;
1490 kfree(adapter->rx_ring);
1491
1492 /* move new rings into position on the adapter struct */
1493 adapter->rx_ring = rx_ring;
1494 adapter->num_rx_queues = num_rx_queues;
1495
1496 /* reset ring to vector mapping */
1497 ixgbevf_reset_q_vectors(adapter);
1498 ixgbevf_map_rings_to_vectors(adapter);
1499
1500 return 0;
1501}
1502
Greg Rose795180d2012-04-17 04:29:34 +00001503void ixgbevf_up(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001504{
Greg Rose92915f72010-01-09 02:24:10 +00001505 struct ixgbe_hw *hw = &adapter->hw;
1506
Alexander Duyck31186782012-07-20 08:09:58 +00001507 ixgbevf_negotiate_api(adapter);
1508
Alexander Duyck56e94092012-07-20 08:10:03 +00001509 ixgbevf_reset_queues(adapter);
1510
Greg Rose92915f72010-01-09 02:24:10 +00001511 ixgbevf_configure(adapter);
1512
Greg Rose795180d2012-04-17 04:29:34 +00001513 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001514
1515 /* clear any pending interrupts, may auto mask */
1516 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1517
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001518 ixgbevf_irq_enable(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001519}
1520
1521/**
1522 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1523 * @adapter: board private structure
1524 * @rx_ring: ring to free buffers from
1525 **/
1526static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1527 struct ixgbevf_ring *rx_ring)
1528{
1529 struct pci_dev *pdev = adapter->pdev;
1530 unsigned long size;
1531 unsigned int i;
1532
Greg Rosec0456c22010-01-22 22:47:18 +00001533 if (!rx_ring->rx_buffer_info)
1534 return;
Greg Rose92915f72010-01-09 02:24:10 +00001535
Greg Rosec0456c22010-01-22 22:47:18 +00001536 /* Free all the Rx ring sk_buffs */
Greg Rose92915f72010-01-09 02:24:10 +00001537 for (i = 0; i < rx_ring->count; i++) {
1538 struct ixgbevf_rx_buffer *rx_buffer_info;
1539
1540 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1541 if (rx_buffer_info->dma) {
Nick Nunley2a1f8792010-04-27 13:10:50 +00001542 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
Greg Rose92915f72010-01-09 02:24:10 +00001543 rx_ring->rx_buf_len,
Nick Nunley2a1f8792010-04-27 13:10:50 +00001544 DMA_FROM_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +00001545 rx_buffer_info->dma = 0;
1546 }
1547 if (rx_buffer_info->skb) {
1548 struct sk_buff *skb = rx_buffer_info->skb;
1549 rx_buffer_info->skb = NULL;
1550 do {
1551 struct sk_buff *this = skb;
Alexander Duyck5c60f812012-09-01 05:12:38 +00001552 skb = IXGBE_CB(skb)->prev;
Greg Rose92915f72010-01-09 02:24:10 +00001553 dev_kfree_skb(this);
1554 } while (skb);
1555 }
Greg Rose92915f72010-01-09 02:24:10 +00001556 }
1557
1558 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1559 memset(rx_ring->rx_buffer_info, 0, size);
1560
1561 /* Zero out the descriptor ring */
1562 memset(rx_ring->desc, 0, rx_ring->size);
1563
1564 rx_ring->next_to_clean = 0;
1565 rx_ring->next_to_use = 0;
1566
1567 if (rx_ring->head)
1568 writel(0, adapter->hw.hw_addr + rx_ring->head);
1569 if (rx_ring->tail)
1570 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1571}
1572
1573/**
1574 * ixgbevf_clean_tx_ring - Free Tx Buffers
1575 * @adapter: board private structure
1576 * @tx_ring: ring to be cleaned
1577 **/
1578static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1579 struct ixgbevf_ring *tx_ring)
1580{
1581 struct ixgbevf_tx_buffer *tx_buffer_info;
1582 unsigned long size;
1583 unsigned int i;
1584
Greg Rosec0456c22010-01-22 22:47:18 +00001585 if (!tx_ring->tx_buffer_info)
1586 return;
1587
Greg Rose92915f72010-01-09 02:24:10 +00001588 /* Free all the Tx ring sk_buffs */
Greg Rose92915f72010-01-09 02:24:10 +00001589 for (i = 0; i < tx_ring->count; i++) {
1590 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck70a10e22012-05-11 08:33:21 +00001591 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Greg Rose92915f72010-01-09 02:24:10 +00001592 }
1593
1594 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1595 memset(tx_ring->tx_buffer_info, 0, size);
1596
1597 memset(tx_ring->desc, 0, tx_ring->size);
1598
1599 tx_ring->next_to_use = 0;
1600 tx_ring->next_to_clean = 0;
1601
1602 if (tx_ring->head)
1603 writel(0, adapter->hw.hw_addr + tx_ring->head);
1604 if (tx_ring->tail)
1605 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1606}
1607
1608/**
1609 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1610 * @adapter: board private structure
1611 **/
1612static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1613{
1614 int i;
1615
1616 for (i = 0; i < adapter->num_rx_queues; i++)
1617 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1618}
1619
1620/**
1621 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1622 * @adapter: board private structure
1623 **/
1624static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1625{
1626 int i;
1627
1628 for (i = 0; i < adapter->num_tx_queues; i++)
1629 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1630}
1631
1632void ixgbevf_down(struct ixgbevf_adapter *adapter)
1633{
1634 struct net_device *netdev = adapter->netdev;
1635 struct ixgbe_hw *hw = &adapter->hw;
1636 u32 txdctl;
1637 int i, j;
1638
1639 /* signal that we are down to the interrupt handler */
1640 set_bit(__IXGBEVF_DOWN, &adapter->state);
1641 /* disable receives */
1642
1643 netif_tx_disable(netdev);
1644
1645 msleep(10);
1646
1647 netif_tx_stop_all_queues(netdev);
1648
1649 ixgbevf_irq_disable(adapter);
1650
1651 ixgbevf_napi_disable_all(adapter);
1652
1653 del_timer_sync(&adapter->watchdog_timer);
1654 /* can't call flush scheduled work here because it can deadlock
1655 * if linkwatch_event tries to acquire the rtnl_lock which we are
1656 * holding */
1657 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1658 msleep(1);
1659
1660 /* disable transmits in the hardware now that interrupts are off */
1661 for (i = 0; i < adapter->num_tx_queues; i++) {
1662 j = adapter->tx_ring[i].reg_idx;
1663 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1664 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1665 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1666 }
1667
1668 netif_carrier_off(netdev);
1669
1670 if (!pci_channel_offline(adapter->pdev))
1671 ixgbevf_reset(adapter);
1672
1673 ixgbevf_clean_all_tx_rings(adapter);
1674 ixgbevf_clean_all_rx_rings(adapter);
1675}
1676
1677void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1678{
1679 WARN_ON(in_interrupt());
Greg Rosec0456c22010-01-22 22:47:18 +00001680
Greg Rose92915f72010-01-09 02:24:10 +00001681 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1682 msleep(1);
1683
Greg Rosec0456c22010-01-22 22:47:18 +00001684 /*
1685 * Check if PF is up before re-init. If not then skip until
1686 * later when the PF is up and ready to service requests from
1687 * the VF via mailbox. If the VF is up and running then the
1688 * watchdog task will continue to schedule reset tasks until
1689 * the PF is up and running.
1690 */
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001691 ixgbevf_down(adapter);
1692 ixgbevf_up(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001693
1694 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1695}
1696
1697void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1698{
1699 struct ixgbe_hw *hw = &adapter->hw;
1700 struct net_device *netdev = adapter->netdev;
1701
1702 if (hw->mac.ops.reset_hw(hw))
1703 hw_dbg(hw, "PF still resetting\n");
1704 else
1705 hw->mac.ops.init_hw(hw);
1706
1707 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1708 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1709 netdev->addr_len);
1710 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1711 netdev->addr_len);
1712 }
1713}
1714
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00001715static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1716 int vectors)
Greg Rose92915f72010-01-09 02:24:10 +00001717{
Emil Tantilova5f93372012-11-13 04:03:17 +00001718 int err = 0;
1719 int vector_threshold;
Greg Rose92915f72010-01-09 02:24:10 +00001720
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001721 /* We'll want at least 2 (vector_threshold):
1722 * 1) TxQ[0] + RxQ[0] handler
1723 * 2) Other (Link Status Change, etc.)
Greg Rose92915f72010-01-09 02:24:10 +00001724 */
1725 vector_threshold = MIN_MSIX_COUNT;
1726
1727 /* The more we get, the more we will assign to Tx/Rx Cleanup
1728 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1729 * Right now, we simply care about how many we'll get; we'll
1730 * set them up later while requesting irq's.
1731 */
1732 while (vectors >= vector_threshold) {
1733 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1734 vectors);
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00001735 if (!err || err < 0) /* Success or a nasty failure. */
Greg Rose92915f72010-01-09 02:24:10 +00001736 break;
Greg Rose92915f72010-01-09 02:24:10 +00001737 else /* err == number of vectors we should try again with */
1738 vectors = err;
1739 }
1740
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00001741 if (vectors < vector_threshold)
1742 err = -ENOMEM;
1743
1744 if (err) {
1745 dev_err(&adapter->pdev->dev,
1746 "Unable to allocate MSI-X interrupts\n");
Greg Rose92915f72010-01-09 02:24:10 +00001747 kfree(adapter->msix_entries);
1748 adapter->msix_entries = NULL;
1749 } else {
1750 /*
1751 * Adjust for only the vectors we'll use, which is minimum
1752 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1753 * vectors we were allocated.
1754 */
1755 adapter->num_msix_vectors = vectors;
1756 }
Greg Rosedee847f2012-11-02 05:50:57 +00001757
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00001758 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001759}
1760
Ben Hutchings49ce9c22012-07-10 10:56:00 +00001761/**
1762 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
Greg Rose92915f72010-01-09 02:24:10 +00001763 * @adapter: board private structure to initialize
1764 *
1765 * This is the top level queue allocation routine. The order here is very
1766 * important, starting with the "most" number of features turned on at once,
1767 * and ending with the smallest set of features. This way large combinations
1768 * can be allocated if they're turned on, and smaller combinations are the
1769 * fallthrough conditions.
1770 *
1771 **/
1772static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1773{
1774 /* Start with base case */
1775 adapter->num_rx_queues = 1;
1776 adapter->num_tx_queues = 1;
Greg Rose92915f72010-01-09 02:24:10 +00001777}
1778
1779/**
1780 * ixgbevf_alloc_queues - Allocate memory for all rings
1781 * @adapter: board private structure to initialize
1782 *
1783 * We allocate one ring per queue at run-time since we don't know the
1784 * number of queues at compile-time. The polling_netdev array is
1785 * intended for Multiqueue, but should work fine with a single queue.
1786 **/
1787static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1788{
1789 int i;
1790
1791 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1792 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1793 if (!adapter->tx_ring)
1794 goto err_tx_ring_allocation;
1795
1796 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1797 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1798 if (!adapter->rx_ring)
1799 goto err_rx_ring_allocation;
1800
1801 for (i = 0; i < adapter->num_tx_queues; i++) {
1802 adapter->tx_ring[i].count = adapter->tx_ring_count;
1803 adapter->tx_ring[i].queue_index = i;
Alexander Duyck56e94092012-07-20 08:10:03 +00001804 /* reg_idx may be remapped later by DCB config */
Greg Rose92915f72010-01-09 02:24:10 +00001805 adapter->tx_ring[i].reg_idx = i;
Alexander Duyckfb401952012-05-11 08:33:16 +00001806 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1807 adapter->tx_ring[i].netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00001808 }
1809
1810 for (i = 0; i < adapter->num_rx_queues; i++) {
1811 adapter->rx_ring[i].count = adapter->rx_ring_count;
1812 adapter->rx_ring[i].queue_index = i;
1813 adapter->rx_ring[i].reg_idx = i;
Alexander Duyckfb401952012-05-11 08:33:16 +00001814 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1815 adapter->rx_ring[i].netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00001816 }
1817
1818 return 0;
1819
1820err_rx_ring_allocation:
1821 kfree(adapter->tx_ring);
1822err_tx_ring_allocation:
1823 return -ENOMEM;
1824}
1825
1826/**
1827 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1828 * @adapter: board private structure to initialize
1829 *
1830 * Attempt to configure the interrupts using the best available
1831 * capabilities of the hardware and the kernel.
1832 **/
1833static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1834{
Greg Rose91e2b892012-10-03 00:57:23 +00001835 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00001836 int err = 0;
1837 int vector, v_budget;
1838
1839 /*
1840 * It's easy to be greedy for MSI-X vectors, but it really
1841 * doesn't do us much good if we have a lot more vectors
1842 * than CPU's. So let's be conservative and only ask for
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001843 * (roughly) the same number of vectors as there are CPU's.
1844 * The default is to use pairs of vectors.
Greg Rose92915f72010-01-09 02:24:10 +00001845 */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001846 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1847 v_budget = min_t(int, v_budget, num_online_cpus());
1848 v_budget += NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00001849
1850 /* A failure in MSI-X entry allocation isn't fatal, but it does
1851 * mean we disable MSI-X capabilities of the adapter. */
1852 adapter->msix_entries = kcalloc(v_budget,
1853 sizeof(struct msix_entry), GFP_KERNEL);
1854 if (!adapter->msix_entries) {
1855 err = -ENOMEM;
1856 goto out;
1857 }
1858
1859 for (vector = 0; vector < v_budget; vector++)
1860 adapter->msix_entries[vector].entry = vector;
1861
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00001862 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1863 if (err)
1864 goto out;
Greg Rose92915f72010-01-09 02:24:10 +00001865
Greg Rose91e2b892012-10-03 00:57:23 +00001866 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1867 if (err)
1868 goto out;
1869
1870 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1871
Greg Rose92915f72010-01-09 02:24:10 +00001872out:
1873 return err;
1874}
1875
1876/**
1877 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1878 * @adapter: board private structure to initialize
1879 *
1880 * We allocate one q_vector per queue interrupt. If allocation fails we
1881 * return -ENOMEM.
1882 **/
1883static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1884{
1885 int q_idx, num_q_vectors;
1886 struct ixgbevf_q_vector *q_vector;
Greg Rose92915f72010-01-09 02:24:10 +00001887
1888 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00001889
1890 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1891 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1892 if (!q_vector)
1893 goto err_out;
1894 q_vector->adapter = adapter;
1895 q_vector->v_idx = q_idx;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001896 netif_napi_add(adapter->netdev, &q_vector->napi,
1897 ixgbevf_poll, 64);
Greg Rose92915f72010-01-09 02:24:10 +00001898 adapter->q_vector[q_idx] = q_vector;
1899 }
1900
1901 return 0;
1902
1903err_out:
1904 while (q_idx) {
1905 q_idx--;
1906 q_vector = adapter->q_vector[q_idx];
1907 netif_napi_del(&q_vector->napi);
1908 kfree(q_vector);
1909 adapter->q_vector[q_idx] = NULL;
1910 }
1911 return -ENOMEM;
1912}
1913
1914/**
1915 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1916 * @adapter: board private structure to initialize
1917 *
1918 * This function frees the memory allocated to the q_vectors. In addition if
1919 * NAPI is enabled it will delete any references to the NAPI struct prior
1920 * to freeing the q_vector.
1921 **/
1922static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1923{
John Fastabendf4477702012-09-16 08:19:46 +00001924 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00001925
1926 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1927 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1928
1929 adapter->q_vector[q_idx] = NULL;
John Fastabendf4477702012-09-16 08:19:46 +00001930 netif_napi_del(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001931 kfree(q_vector);
1932 }
1933}
1934
1935/**
1936 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
1937 * @adapter: board private structure
1938 *
1939 **/
1940static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1941{
1942 pci_disable_msix(adapter->pdev);
1943 kfree(adapter->msix_entries);
1944 adapter->msix_entries = NULL;
Greg Rose92915f72010-01-09 02:24:10 +00001945}
1946
1947/**
1948 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
1949 * @adapter: board private structure to initialize
1950 *
1951 **/
1952static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1953{
1954 int err;
1955
1956 /* Number of supported queues */
1957 ixgbevf_set_num_queues(adapter);
1958
1959 err = ixgbevf_set_interrupt_capability(adapter);
1960 if (err) {
1961 hw_dbg(&adapter->hw,
1962 "Unable to setup interrupt capabilities\n");
1963 goto err_set_interrupt;
1964 }
1965
1966 err = ixgbevf_alloc_q_vectors(adapter);
1967 if (err) {
1968 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
1969 "vectors\n");
1970 goto err_alloc_q_vectors;
1971 }
1972
1973 err = ixgbevf_alloc_queues(adapter);
1974 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00001975 pr_err("Unable to allocate memory for queues\n");
Greg Rose92915f72010-01-09 02:24:10 +00001976 goto err_alloc_queues;
1977 }
1978
1979 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
1980 "Tx Queue count = %u\n",
1981 (adapter->num_rx_queues > 1) ? "Enabled" :
1982 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
1983
1984 set_bit(__IXGBEVF_DOWN, &adapter->state);
1985
1986 return 0;
1987err_alloc_queues:
1988 ixgbevf_free_q_vectors(adapter);
1989err_alloc_q_vectors:
1990 ixgbevf_reset_interrupt_capability(adapter);
1991err_set_interrupt:
1992 return err;
1993}
1994
1995/**
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00001996 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
1997 * @adapter: board private structure to clear interrupt scheme on
1998 *
1999 * We go through and clear interrupt specific resources and reset the structure
2000 * to pre-load conditions
2001 **/
2002static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2003{
2004 adapter->num_tx_queues = 0;
2005 adapter->num_rx_queues = 0;
2006
2007 ixgbevf_free_q_vectors(adapter);
2008 ixgbevf_reset_interrupt_capability(adapter);
2009}
2010
2011/**
Greg Rose92915f72010-01-09 02:24:10 +00002012 * ixgbevf_sw_init - Initialize general software structures
2013 * (struct ixgbevf_adapter)
2014 * @adapter: board private structure to initialize
2015 *
2016 * ixgbevf_sw_init initializes the Adapter private data structure.
2017 * Fields are initialized based on PCI device information and
2018 * OS network device settings (MTU size).
2019 **/
2020static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2021{
2022 struct ixgbe_hw *hw = &adapter->hw;
2023 struct pci_dev *pdev = adapter->pdev;
2024 int err;
2025
2026 /* PCI config space info */
2027
2028 hw->vendor_id = pdev->vendor;
2029 hw->device_id = pdev->device;
Sergei Shtylyovff938e42011-02-28 11:57:33 -08002030 hw->revision_id = pdev->revision;
Greg Rose92915f72010-01-09 02:24:10 +00002031 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2032 hw->subsystem_device_id = pdev->subsystem_device;
2033
2034 hw->mbx.ops.init_params(hw);
Alexander Duyck56e94092012-07-20 08:10:03 +00002035
2036 /* assume legacy case in which PF would only give VF 2 queues */
2037 hw->mac.max_tx_queues = 2;
2038 hw->mac.max_rx_queues = 2;
2039
Greg Rose92915f72010-01-09 02:24:10 +00002040 err = hw->mac.ops.reset_hw(hw);
2041 if (err) {
2042 dev_info(&pdev->dev,
2043 "PF still in reset state, assigning new address\n");
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00002044 eth_hw_addr_random(adapter->netdev);
2045 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
2046 adapter->netdev->addr_len);
Greg Rose92915f72010-01-09 02:24:10 +00002047 } else {
2048 err = hw->mac.ops.init_hw(hw);
2049 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002050 pr_err("init_shared_code failed: %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +00002051 goto out;
2052 }
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00002053 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
Greg Rosedee847f2012-11-02 05:50:57 +00002054 adapter->netdev->addr_len);
Greg Rose92915f72010-01-09 02:24:10 +00002055 }
2056
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002057 /* lock to protect mailbox accesses */
2058 spin_lock_init(&adapter->mbx_lock);
2059
Greg Rose92915f72010-01-09 02:24:10 +00002060 /* Enable dynamic interrupt throttling rates */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002061 adapter->rx_itr_setting = 1;
2062 adapter->tx_itr_setting = 1;
Greg Rose92915f72010-01-09 02:24:10 +00002063
Greg Rose92915f72010-01-09 02:24:10 +00002064 /* set default ring sizes */
2065 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2066 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2067
Greg Rose92915f72010-01-09 02:24:10 +00002068 set_bit(__IXGBEVF_DOWN, &adapter->state);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00002069 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002070
2071out:
2072 return err;
2073}
2074
Greg Rose92915f72010-01-09 02:24:10 +00002075#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2076 { \
2077 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2078 if (current_counter < last_counter) \
2079 counter += 0x100000000LL; \
2080 last_counter = current_counter; \
2081 counter &= 0xFFFFFFFF00000000LL; \
2082 counter |= current_counter; \
2083 }
2084
2085#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2086 { \
2087 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2088 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2089 u64 current_counter = (current_counter_msb << 32) | \
2090 current_counter_lsb; \
2091 if (current_counter < last_counter) \
2092 counter += 0x1000000000LL; \
2093 last_counter = current_counter; \
2094 counter &= 0xFFFFFFF000000000LL; \
2095 counter |= current_counter; \
2096 }
2097/**
2098 * ixgbevf_update_stats - Update the board statistics counters.
2099 * @adapter: board private structure
2100 **/
2101void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2102{
2103 struct ixgbe_hw *hw = &adapter->hw;
2104
2105 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2106 adapter->stats.vfgprc);
2107 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2108 adapter->stats.vfgptc);
2109 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2110 adapter->stats.last_vfgorc,
2111 adapter->stats.vfgorc);
2112 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2113 adapter->stats.last_vfgotc,
2114 adapter->stats.vfgotc);
2115 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2116 adapter->stats.vfmprc);
Greg Rose92915f72010-01-09 02:24:10 +00002117}
2118
2119/**
2120 * ixgbevf_watchdog - Timer Call-back
2121 * @data: pointer to adapter cast into an unsigned long
2122 **/
2123static void ixgbevf_watchdog(unsigned long data)
2124{
2125 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2126 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002127 u32 eics = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002128 int i;
2129
2130 /*
2131 * Do the watchdog outside of interrupt context due to the lovely
2132 * delays that some of the newer hardware requires
2133 */
2134
2135 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2136 goto watchdog_short_circuit;
2137
2138 /* get one bit for every active tx/rx interrupt vector */
2139 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2140 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
Alexander Duyck6b43c442012-05-11 08:32:45 +00002141 if (qv->rx.ring || qv->tx.ring)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002142 eics |= 1 << i;
Greg Rose92915f72010-01-09 02:24:10 +00002143 }
2144
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002145 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
Greg Rose92915f72010-01-09 02:24:10 +00002146
2147watchdog_short_circuit:
2148 schedule_work(&adapter->watchdog_task);
2149}
2150
2151/**
2152 * ixgbevf_tx_timeout - Respond to a Tx Hang
2153 * @netdev: network interface device structure
2154 **/
2155static void ixgbevf_tx_timeout(struct net_device *netdev)
2156{
2157 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2158
2159 /* Do the reset outside of interrupt context */
2160 schedule_work(&adapter->reset_task);
2161}
2162
2163static void ixgbevf_reset_task(struct work_struct *work)
2164{
2165 struct ixgbevf_adapter *adapter;
2166 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2167
2168 /* If we're already down or resetting, just bail */
2169 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2170 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2171 return;
2172
2173 adapter->tx_timeout_count++;
2174
2175 ixgbevf_reinit_locked(adapter);
2176}
2177
2178/**
2179 * ixgbevf_watchdog_task - worker thread to bring link up
2180 * @work: pointer to work_struct containing our data
2181 **/
2182static void ixgbevf_watchdog_task(struct work_struct *work)
2183{
2184 struct ixgbevf_adapter *adapter = container_of(work,
2185 struct ixgbevf_adapter,
2186 watchdog_task);
2187 struct net_device *netdev = adapter->netdev;
2188 struct ixgbe_hw *hw = &adapter->hw;
2189 u32 link_speed = adapter->link_speed;
2190 bool link_up = adapter->link_up;
Greg Rose92fe0bf2012-11-02 05:50:47 +00002191 s32 need_reset;
Greg Rose92915f72010-01-09 02:24:10 +00002192
2193 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2194
2195 /*
2196 * Always check the link on the watchdog because we have
2197 * no LSC interrupt
2198 */
Greg Rose92fe0bf2012-11-02 05:50:47 +00002199 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002200
Greg Rose92fe0bf2012-11-02 05:50:47 +00002201 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002202
Greg Rose92fe0bf2012-11-02 05:50:47 +00002203 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002204
Greg Rose92fe0bf2012-11-02 05:50:47 +00002205 if (need_reset) {
2206 adapter->link_up = link_up;
2207 adapter->link_speed = link_speed;
2208 netif_carrier_off(netdev);
2209 netif_tx_stop_all_queues(netdev);
2210 schedule_work(&adapter->reset_task);
2211 goto pf_has_reset;
Greg Rose92915f72010-01-09 02:24:10 +00002212 }
2213 adapter->link_up = link_up;
2214 adapter->link_speed = link_speed;
2215
2216 if (link_up) {
2217 if (!netif_carrier_ok(netdev)) {
Joe Perches300bc062010-03-22 20:08:04 -07002218 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2219 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2220 10 : 1);
Greg Rose92915f72010-01-09 02:24:10 +00002221 netif_carrier_on(netdev);
2222 netif_tx_wake_all_queues(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00002223 }
2224 } else {
2225 adapter->link_up = false;
2226 adapter->link_speed = 0;
2227 if (netif_carrier_ok(netdev)) {
2228 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2229 netif_carrier_off(netdev);
2230 netif_tx_stop_all_queues(netdev);
2231 }
2232 }
2233
Greg Rose92915f72010-01-09 02:24:10 +00002234 ixgbevf_update_stats(adapter);
2235
Greg Rose33bd9f62010-03-19 02:59:52 +00002236pf_has_reset:
Greg Rose92915f72010-01-09 02:24:10 +00002237 /* Reset the timer */
2238 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2239 mod_timer(&adapter->watchdog_timer,
2240 round_jiffies(jiffies + (2 * HZ)));
2241
2242 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2243}
2244
2245/**
2246 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2247 * @adapter: board private structure
2248 * @tx_ring: Tx descriptor ring for a specific queue
2249 *
2250 * Free all transmit software resources
2251 **/
2252void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2253 struct ixgbevf_ring *tx_ring)
2254{
2255 struct pci_dev *pdev = adapter->pdev;
2256
Greg Rose92915f72010-01-09 02:24:10 +00002257 ixgbevf_clean_tx_ring(adapter, tx_ring);
2258
2259 vfree(tx_ring->tx_buffer_info);
2260 tx_ring->tx_buffer_info = NULL;
2261
Nick Nunley2a1f8792010-04-27 13:10:50 +00002262 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2263 tx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00002264
2265 tx_ring->desc = NULL;
2266}
2267
2268/**
2269 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2270 * @adapter: board private structure
2271 *
2272 * Free all transmit software resources
2273 **/
2274static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2275{
2276 int i;
2277
2278 for (i = 0; i < adapter->num_tx_queues; i++)
2279 if (adapter->tx_ring[i].desc)
2280 ixgbevf_free_tx_resources(adapter,
2281 &adapter->tx_ring[i]);
2282
2283}
2284
2285/**
2286 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2287 * @adapter: board private structure
2288 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2289 *
2290 * Return 0 on success, negative on failure
2291 **/
2292int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2293 struct ixgbevf_ring *tx_ring)
2294{
2295 struct pci_dev *pdev = adapter->pdev;
2296 int size;
2297
2298 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002299 tx_ring->tx_buffer_info = vzalloc(size);
Greg Rose92915f72010-01-09 02:24:10 +00002300 if (!tx_ring->tx_buffer_info)
2301 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00002302
2303 /* round up to nearest 4K */
2304 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2305 tx_ring->size = ALIGN(tx_ring->size, 4096);
2306
Nick Nunley2a1f8792010-04-27 13:10:50 +00002307 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2308 &tx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00002309 if (!tx_ring->desc)
2310 goto err;
2311
2312 tx_ring->next_to_use = 0;
2313 tx_ring->next_to_clean = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002314 return 0;
2315
2316err:
2317 vfree(tx_ring->tx_buffer_info);
2318 tx_ring->tx_buffer_info = NULL;
2319 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2320 "descriptor ring\n");
2321 return -ENOMEM;
2322}
2323
2324/**
2325 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2326 * @adapter: board private structure
2327 *
2328 * If this function returns with an error, then it's possible one or
2329 * more of the rings is populated (while the rest are not). It is the
2330 * callers duty to clean those orphaned rings.
2331 *
2332 * Return 0 on success, negative on failure
2333 **/
2334static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2335{
2336 int i, err = 0;
2337
2338 for (i = 0; i < adapter->num_tx_queues; i++) {
2339 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2340 if (!err)
2341 continue;
2342 hw_dbg(&adapter->hw,
2343 "Allocation for Tx Queue %u failed\n", i);
2344 break;
2345 }
2346
2347 return err;
2348}
2349
2350/**
2351 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2352 * @adapter: board private structure
2353 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2354 *
2355 * Returns 0 on success, negative on failure
2356 **/
2357int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2358 struct ixgbevf_ring *rx_ring)
2359{
2360 struct pci_dev *pdev = adapter->pdev;
2361 int size;
2362
2363 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002364 rx_ring->rx_buffer_info = vzalloc(size);
Joe Perchese404dec2012-01-29 12:56:23 +00002365 if (!rx_ring->rx_buffer_info)
Greg Rose92915f72010-01-09 02:24:10 +00002366 goto alloc_failed;
Greg Rose92915f72010-01-09 02:24:10 +00002367
2368 /* Round up to nearest 4K */
2369 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2370 rx_ring->size = ALIGN(rx_ring->size, 4096);
2371
Nick Nunley2a1f8792010-04-27 13:10:50 +00002372 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2373 &rx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00002374
2375 if (!rx_ring->desc) {
2376 hw_dbg(&adapter->hw,
2377 "Unable to allocate memory for "
2378 "the receive descriptor ring\n");
2379 vfree(rx_ring->rx_buffer_info);
2380 rx_ring->rx_buffer_info = NULL;
2381 goto alloc_failed;
2382 }
2383
2384 rx_ring->next_to_clean = 0;
2385 rx_ring->next_to_use = 0;
2386
2387 return 0;
2388alloc_failed:
2389 return -ENOMEM;
2390}
2391
2392/**
2393 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2394 * @adapter: board private structure
2395 *
2396 * If this function returns with an error, then it's possible one or
2397 * more of the rings is populated (while the rest are not). It is the
2398 * callers duty to clean those orphaned rings.
2399 *
2400 * Return 0 on success, negative on failure
2401 **/
2402static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2403{
2404 int i, err = 0;
2405
2406 for (i = 0; i < adapter->num_rx_queues; i++) {
2407 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2408 if (!err)
2409 continue;
2410 hw_dbg(&adapter->hw,
2411 "Allocation for Rx Queue %u failed\n", i);
2412 break;
2413 }
2414 return err;
2415}
2416
2417/**
2418 * ixgbevf_free_rx_resources - Free Rx Resources
2419 * @adapter: board private structure
2420 * @rx_ring: ring to clean the resources from
2421 *
2422 * Free all receive software resources
2423 **/
2424void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2425 struct ixgbevf_ring *rx_ring)
2426{
2427 struct pci_dev *pdev = adapter->pdev;
2428
2429 ixgbevf_clean_rx_ring(adapter, rx_ring);
2430
2431 vfree(rx_ring->rx_buffer_info);
2432 rx_ring->rx_buffer_info = NULL;
2433
Nick Nunley2a1f8792010-04-27 13:10:50 +00002434 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2435 rx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00002436
2437 rx_ring->desc = NULL;
2438}
2439
2440/**
2441 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2442 * @adapter: board private structure
2443 *
2444 * Free all receive software resources
2445 **/
2446static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2447{
2448 int i;
2449
2450 for (i = 0; i < adapter->num_rx_queues; i++)
2451 if (adapter->rx_ring[i].desc)
2452 ixgbevf_free_rx_resources(adapter,
2453 &adapter->rx_ring[i]);
2454}
2455
Alexander Duyck56e94092012-07-20 08:10:03 +00002456static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2457{
2458 struct ixgbe_hw *hw = &adapter->hw;
2459 struct ixgbevf_ring *rx_ring;
2460 unsigned int def_q = 0;
2461 unsigned int num_tcs = 0;
2462 unsigned int num_rx_queues = 1;
2463 int err, i;
2464
John Fastabend55fdd45b2012-10-01 14:52:20 +00002465 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck56e94092012-07-20 08:10:03 +00002466
2467 /* fetch queue configuration from the PF */
2468 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2469
John Fastabend55fdd45b2012-10-01 14:52:20 +00002470 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck56e94092012-07-20 08:10:03 +00002471
2472 if (err)
2473 return err;
2474
2475 if (num_tcs > 1) {
2476 /* update default Tx ring register index */
2477 adapter->tx_ring[0].reg_idx = def_q;
2478
2479 /* we need as many queues as traffic classes */
2480 num_rx_queues = num_tcs;
2481 }
2482
2483 /* nothing to do if we have the correct number of queues */
2484 if (adapter->num_rx_queues == num_rx_queues)
2485 return 0;
2486
2487 /* allocate new rings */
2488 rx_ring = kcalloc(num_rx_queues,
2489 sizeof(struct ixgbevf_ring), GFP_KERNEL);
2490 if (!rx_ring)
2491 return -ENOMEM;
2492
2493 /* setup ring fields */
2494 for (i = 0; i < num_rx_queues; i++) {
2495 rx_ring[i].count = adapter->rx_ring_count;
2496 rx_ring[i].queue_index = i;
2497 rx_ring[i].reg_idx = i;
2498 rx_ring[i].dev = &adapter->pdev->dev;
2499 rx_ring[i].netdev = adapter->netdev;
2500 }
2501
2502 /* free the existing ring and queues */
2503 adapter->num_rx_queues = 0;
2504 kfree(adapter->rx_ring);
2505
2506 /* move new rings into position on the adapter struct */
2507 adapter->rx_ring = rx_ring;
2508 adapter->num_rx_queues = num_rx_queues;
2509
2510 return 0;
2511}
2512
Greg Rose92915f72010-01-09 02:24:10 +00002513/**
2514 * ixgbevf_open - Called when a network interface is made active
2515 * @netdev: network interface device structure
2516 *
2517 * Returns 0 on success, negative value on failure
2518 *
2519 * The open entry point is called when a network interface is made
2520 * active by the system (IFF_UP). At this point all resources needed
2521 * for transmit and receive operations are allocated, the interrupt
2522 * handler is registered with the OS, the watchdog timer is started,
2523 * and the stack is notified that the interface is ready.
2524 **/
2525static int ixgbevf_open(struct net_device *netdev)
2526{
2527 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2528 struct ixgbe_hw *hw = &adapter->hw;
2529 int err;
2530
2531 /* disallow open during test */
2532 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2533 return -EBUSY;
2534
2535 if (hw->adapter_stopped) {
2536 ixgbevf_reset(adapter);
2537 /* if adapter is still stopped then PF isn't up and
2538 * the vf can't start. */
2539 if (hw->adapter_stopped) {
2540 err = IXGBE_ERR_MBX;
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002541 pr_err("Unable to start - perhaps the PF Driver isn't "
2542 "up yet\n");
Greg Rose92915f72010-01-09 02:24:10 +00002543 goto err_setup_reset;
2544 }
2545 }
2546
Alexander Duyck31186782012-07-20 08:09:58 +00002547 ixgbevf_negotiate_api(adapter);
2548
Alexander Duyck56e94092012-07-20 08:10:03 +00002549 /* setup queue reg_idx and Rx queue count */
2550 err = ixgbevf_setup_queues(adapter);
2551 if (err)
2552 goto err_setup_queues;
2553
Greg Rose92915f72010-01-09 02:24:10 +00002554 /* allocate transmit descriptors */
2555 err = ixgbevf_setup_all_tx_resources(adapter);
2556 if (err)
2557 goto err_setup_tx;
2558
2559 /* allocate receive descriptors */
2560 err = ixgbevf_setup_all_rx_resources(adapter);
2561 if (err)
2562 goto err_setup_rx;
2563
2564 ixgbevf_configure(adapter);
2565
2566 /*
2567 * Map the Tx/Rx rings to the vectors we were allotted.
2568 * if request_irq will be called in this function map_rings
2569 * must be called *before* up_complete
2570 */
2571 ixgbevf_map_rings_to_vectors(adapter);
2572
Greg Rose795180d2012-04-17 04:29:34 +00002573 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002574
2575 /* clear any pending interrupts, may auto mask */
2576 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2577 err = ixgbevf_request_irq(adapter);
2578 if (err)
2579 goto err_req_irq;
2580
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002581 ixgbevf_irq_enable(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002582
2583 return 0;
2584
2585err_req_irq:
2586 ixgbevf_down(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002587 ixgbevf_free_irq(adapter);
2588err_setup_rx:
2589 ixgbevf_free_all_rx_resources(adapter);
2590err_setup_tx:
2591 ixgbevf_free_all_tx_resources(adapter);
Alexander Duyck56e94092012-07-20 08:10:03 +00002592err_setup_queues:
Greg Rose92915f72010-01-09 02:24:10 +00002593 ixgbevf_reset(adapter);
2594
2595err_setup_reset:
2596
2597 return err;
2598}
2599
2600/**
2601 * ixgbevf_close - Disables a network interface
2602 * @netdev: network interface device structure
2603 *
2604 * Returns 0, this is not allowed to fail
2605 *
2606 * The close entry point is called when an interface is de-activated
2607 * by the OS. The hardware is still under the drivers control, but
2608 * needs to be disabled. A global MAC reset is issued to stop the
2609 * hardware, and all transmit and receive resources are freed.
2610 **/
2611static int ixgbevf_close(struct net_device *netdev)
2612{
2613 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2614
2615 ixgbevf_down(adapter);
2616 ixgbevf_free_irq(adapter);
2617
2618 ixgbevf_free_all_tx_resources(adapter);
2619 ixgbevf_free_all_rx_resources(adapter);
2620
2621 return 0;
2622}
2623
Alexander Duyck70a10e22012-05-11 08:33:21 +00002624static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2625 u32 vlan_macip_lens, u32 type_tucmd,
2626 u32 mss_l4len_idx)
2627{
2628 struct ixgbe_adv_tx_context_desc *context_desc;
2629 u16 i = tx_ring->next_to_use;
2630
2631 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2632
2633 i++;
2634 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2635
2636 /* set bits to identify this as an advanced context descriptor */
2637 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2638
2639 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2640 context_desc->seqnum_seed = 0;
2641 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2642 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2643}
2644
2645static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +00002646 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2647{
Alexander Duyck70a10e22012-05-11 08:33:21 +00002648 u32 vlan_macip_lens, type_tucmd;
Greg Rose92915f72010-01-09 02:24:10 +00002649 u32 mss_l4len_idx, l4len;
2650
Alexander Duyck70a10e22012-05-11 08:33:21 +00002651 if (!skb_is_gso(skb))
2652 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002653
Alexander Duyck70a10e22012-05-11 08:33:21 +00002654 if (skb_header_cloned(skb)) {
2655 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2656 if (err)
2657 return err;
Greg Rose92915f72010-01-09 02:24:10 +00002658 }
2659
Alexander Duyck70a10e22012-05-11 08:33:21 +00002660 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2661 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2662
2663 if (skb->protocol == htons(ETH_P_IP)) {
2664 struct iphdr *iph = ip_hdr(skb);
2665 iph->tot_len = 0;
2666 iph->check = 0;
2667 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2668 iph->daddr, 0,
2669 IPPROTO_TCP,
2670 0);
2671 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2672 } else if (skb_is_gso_v6(skb)) {
2673 ipv6_hdr(skb)->payload_len = 0;
2674 tcp_hdr(skb)->check =
2675 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2676 &ipv6_hdr(skb)->daddr,
2677 0, IPPROTO_TCP, 0);
2678 }
2679
2680 /* compute header lengths */
2681 l4len = tcp_hdrlen(skb);
2682 *hdr_len += l4len;
2683 *hdr_len = skb_transport_offset(skb) + l4len;
2684
2685 /* mss_l4len_id: use 1 as index for TSO */
2686 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2687 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2688 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2689
2690 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2691 vlan_macip_lens = skb_network_header_len(skb);
2692 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2693 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2694
2695 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2696 type_tucmd, mss_l4len_idx);
2697
2698 return 1;
Greg Rose92915f72010-01-09 02:24:10 +00002699}
2700
Alexander Duyck70a10e22012-05-11 08:33:21 +00002701static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +00002702 struct sk_buff *skb, u32 tx_flags)
2703{
Alexander Duyck70a10e22012-05-11 08:33:21 +00002704 u32 vlan_macip_lens = 0;
2705 u32 mss_l4len_idx = 0;
2706 u32 type_tucmd = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002707
Alexander Duyck70a10e22012-05-11 08:33:21 +00002708 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2709 u8 l4_hdr = 0;
2710 switch (skb->protocol) {
2711 case __constant_htons(ETH_P_IP):
2712 vlan_macip_lens |= skb_network_header_len(skb);
2713 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2714 l4_hdr = ip_hdr(skb)->protocol;
2715 break;
2716 case __constant_htons(ETH_P_IPV6):
2717 vlan_macip_lens |= skb_network_header_len(skb);
2718 l4_hdr = ipv6_hdr(skb)->nexthdr;
2719 break;
2720 default:
2721 if (unlikely(net_ratelimit())) {
2722 dev_warn(tx_ring->dev,
2723 "partial checksum but proto=%x!\n",
2724 skb->protocol);
Greg Rose92915f72010-01-09 02:24:10 +00002725 }
Alexander Duyck70a10e22012-05-11 08:33:21 +00002726 break;
Greg Rose92915f72010-01-09 02:24:10 +00002727 }
2728
Alexander Duyck70a10e22012-05-11 08:33:21 +00002729 switch (l4_hdr) {
2730 case IPPROTO_TCP:
2731 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2732 mss_l4len_idx = tcp_hdrlen(skb) <<
2733 IXGBE_ADVTXD_L4LEN_SHIFT;
2734 break;
2735 case IPPROTO_SCTP:
2736 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2737 mss_l4len_idx = sizeof(struct sctphdr) <<
2738 IXGBE_ADVTXD_L4LEN_SHIFT;
2739 break;
2740 case IPPROTO_UDP:
2741 mss_l4len_idx = sizeof(struct udphdr) <<
2742 IXGBE_ADVTXD_L4LEN_SHIFT;
2743 break;
2744 default:
2745 if (unlikely(net_ratelimit())) {
2746 dev_warn(tx_ring->dev,
2747 "partial checksum but l4 proto=%x!\n",
2748 l4_hdr);
2749 }
2750 break;
2751 }
Greg Rose92915f72010-01-09 02:24:10 +00002752 }
2753
Alexander Duyck70a10e22012-05-11 08:33:21 +00002754 /* vlan_macip_lens: MACLEN, VLAN tag */
2755 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2756 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2757
2758 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2759 type_tucmd, mss_l4len_idx);
2760
2761 return (skb->ip_summed == CHECKSUM_PARTIAL);
Greg Rose92915f72010-01-09 02:24:10 +00002762}
2763
Alexander Duyck70a10e22012-05-11 08:33:21 +00002764static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +00002765 struct sk_buff *skb, u32 tx_flags,
2766 unsigned int first)
2767{
Greg Rose92915f72010-01-09 02:24:10 +00002768 struct ixgbevf_tx_buffer *tx_buffer_info;
2769 unsigned int len;
2770 unsigned int total = skb->len;
Kulikov Vasiliy2540ddb2010-07-15 08:45:57 +00002771 unsigned int offset = 0, size;
2772 int count = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002773 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2774 unsigned int f;
Greg Rose65deeed2010-03-24 09:35:42 +00002775 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002776
2777 i = tx_ring->next_to_use;
2778
2779 len = min(skb_headlen(skb), total);
2780 while (len) {
2781 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2782 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2783
2784 tx_buffer_info->length = size;
2785 tx_buffer_info->mapped_as_page = false;
Alexander Duyck70a10e22012-05-11 08:33:21 +00002786 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
Greg Rose92915f72010-01-09 02:24:10 +00002787 skb->data + offset,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002788 size, DMA_TO_DEVICE);
Alexander Duyck70a10e22012-05-11 08:33:21 +00002789 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
Greg Rose92915f72010-01-09 02:24:10 +00002790 goto dma_error;
Greg Rose92915f72010-01-09 02:24:10 +00002791 tx_buffer_info->next_to_watch = i;
2792
2793 len -= size;
2794 total -= size;
2795 offset += size;
2796 count++;
2797 i++;
2798 if (i == tx_ring->count)
2799 i = 0;
2800 }
2801
2802 for (f = 0; f < nr_frags; f++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002803 const struct skb_frag_struct *frag;
Greg Rose92915f72010-01-09 02:24:10 +00002804
2805 frag = &skb_shinfo(skb)->frags[f];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002806 len = min((unsigned int)skb_frag_size(frag), total);
Ian Campbell877749b2011-08-29 23:18:26 +00002807 offset = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002808
2809 while (len) {
2810 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2811 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2812
2813 tx_buffer_info->length = size;
Ian Campbell877749b2011-08-29 23:18:26 +00002814 tx_buffer_info->dma =
Alexander Duyck70a10e22012-05-11 08:33:21 +00002815 skb_frag_dma_map(tx_ring->dev, frag,
Ian Campbell877749b2011-08-29 23:18:26 +00002816 offset, size, DMA_TO_DEVICE);
Alexander Duyck70a10e22012-05-11 08:33:21 +00002817 if (dma_mapping_error(tx_ring->dev,
2818 tx_buffer_info->dma))
Greg Rose92915f72010-01-09 02:24:10 +00002819 goto dma_error;
Greg Rose6132ee82012-09-21 00:14:14 +00002820 tx_buffer_info->mapped_as_page = true;
Greg Rose92915f72010-01-09 02:24:10 +00002821 tx_buffer_info->next_to_watch = i;
2822
2823 len -= size;
2824 total -= size;
2825 offset += size;
2826 count++;
2827 i++;
2828 if (i == tx_ring->count)
2829 i = 0;
2830 }
2831 if (total == 0)
2832 break;
2833 }
2834
2835 if (i == 0)
2836 i = tx_ring->count - 1;
2837 else
2838 i = i - 1;
2839 tx_ring->tx_buffer_info[i].skb = skb;
2840 tx_ring->tx_buffer_info[first].next_to_watch = i;
Alexander Duyck70a10e22012-05-11 08:33:21 +00002841 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
Greg Rose92915f72010-01-09 02:24:10 +00002842
2843 return count;
2844
2845dma_error:
Alexander Duyck70a10e22012-05-11 08:33:21 +00002846 dev_err(tx_ring->dev, "TX DMA map failed\n");
Greg Rose92915f72010-01-09 02:24:10 +00002847
2848 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2849 tx_buffer_info->dma = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002850 tx_buffer_info->next_to_watch = 0;
2851 count--;
2852
2853 /* clear timestamp and dma mappings for remaining portion of packet */
2854 while (count >= 0) {
2855 count--;
2856 i--;
2857 if (i < 0)
2858 i += tx_ring->count;
2859 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck70a10e22012-05-11 08:33:21 +00002860 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Greg Rose92915f72010-01-09 02:24:10 +00002861 }
2862
2863 return count;
2864}
2865
Alexander Duyck70a10e22012-05-11 08:33:21 +00002866static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
Greg Rose92915f72010-01-09 02:24:10 +00002867 int count, u32 paylen, u8 hdr_len)
2868{
2869 union ixgbe_adv_tx_desc *tx_desc = NULL;
2870 struct ixgbevf_tx_buffer *tx_buffer_info;
2871 u32 olinfo_status = 0, cmd_type_len = 0;
2872 unsigned int i;
2873
2874 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2875
2876 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2877
2878 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2879
2880 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2881 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2882
Alexander Duyck70a10e22012-05-11 08:33:21 +00002883 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2884 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2885
Greg Rose92915f72010-01-09 02:24:10 +00002886 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2887 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2888
Greg Rose92915f72010-01-09 02:24:10 +00002889 /* use index 1 context for tso */
2890 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2891 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
Alexander Duyck70a10e22012-05-11 08:33:21 +00002892 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
Alexander Duyck70a10e22012-05-11 08:33:21 +00002893 }
2894
2895 /*
2896 * Check Context must be set if Tx switch is enabled, which it
2897 * always is for case where virtual functions are running
2898 */
2899 olinfo_status |= IXGBE_ADVTXD_CC;
Greg Rose92915f72010-01-09 02:24:10 +00002900
2901 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2902
2903 i = tx_ring->next_to_use;
2904 while (count--) {
2905 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck908421f2012-05-11 08:33:00 +00002906 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +00002907 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2908 tx_desc->read.cmd_type_len =
2909 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2910 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2911 i++;
2912 if (i == tx_ring->count)
2913 i = 0;
2914 }
2915
2916 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2917
Greg Rose92915f72010-01-09 02:24:10 +00002918 tx_ring->next_to_use = i;
Greg Rose92915f72010-01-09 02:24:10 +00002919}
2920
Alexander Duyckfb401952012-05-11 08:33:16 +00002921static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00002922{
Alexander Duyckfb401952012-05-11 08:33:16 +00002923 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
Greg Rose92915f72010-01-09 02:24:10 +00002924
Alexander Duyckfb401952012-05-11 08:33:16 +00002925 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +00002926 /* Herbert's original patch had:
2927 * smp_mb__after_netif_stop_queue();
2928 * but since that doesn't exist yet, just open code it. */
2929 smp_mb();
2930
2931 /* We need to check again in a case another CPU has just
2932 * made room available. */
2933 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2934 return -EBUSY;
2935
2936 /* A reprieve! - use start_queue because it doesn't call schedule */
Alexander Duyckfb401952012-05-11 08:33:16 +00002937 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +00002938 ++adapter->restart_queue;
2939 return 0;
2940}
2941
Alexander Duyckfb401952012-05-11 08:33:16 +00002942static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00002943{
2944 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2945 return 0;
Alexander Duyckfb401952012-05-11 08:33:16 +00002946 return __ixgbevf_maybe_stop_tx(tx_ring, size);
Greg Rose92915f72010-01-09 02:24:10 +00002947}
2948
2949static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2950{
2951 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2952 struct ixgbevf_ring *tx_ring;
2953 unsigned int first;
2954 unsigned int tx_flags = 0;
2955 u8 hdr_len = 0;
2956 int r_idx = 0, tso;
Alexander Duyck35959902012-05-11 08:32:40 +00002957 u16 count = TXD_USE_COUNT(skb_headlen(skb));
2958#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2959 unsigned short f;
2960#endif
Greg Rosef9d08f162012-10-02 00:50:52 +00002961 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
Ben Hutchings46acc462012-11-01 09:11:11 +00002962 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
Greg Rosef9d08f162012-10-02 00:50:52 +00002963 dev_kfree_skb(skb);
2964 return NETDEV_TX_OK;
2965 }
Greg Rose92915f72010-01-09 02:24:10 +00002966
2967 tx_ring = &adapter->tx_ring[r_idx];
2968
Alexander Duyck35959902012-05-11 08:32:40 +00002969 /*
2970 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
2971 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
2972 * + 2 desc gap to keep tail from touching head,
2973 * + 1 desc for context descriptor,
2974 * otherwise try next time
2975 */
2976#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2977 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2978 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2979#else
2980 count += skb_shinfo(skb)->nr_frags;
2981#endif
Alexander Duyckfb401952012-05-11 08:33:16 +00002982 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
Alexander Duyck35959902012-05-11 08:32:40 +00002983 adapter->tx_busy++;
2984 return NETDEV_TX_BUSY;
2985 }
2986
Jesse Grosseab6d182010-10-20 13:56:03 +00002987 if (vlan_tx_tag_present(skb)) {
Greg Rose92915f72010-01-09 02:24:10 +00002988 tx_flags |= vlan_tx_tag_get(skb);
2989 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
2990 tx_flags |= IXGBE_TX_FLAGS_VLAN;
2991 }
2992
Greg Rose92915f72010-01-09 02:24:10 +00002993 first = tx_ring->next_to_use;
2994
2995 if (skb->protocol == htons(ETH_P_IP))
2996 tx_flags |= IXGBE_TX_FLAGS_IPV4;
Alexander Duyck70a10e22012-05-11 08:33:21 +00002997 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
Greg Rose92915f72010-01-09 02:24:10 +00002998 if (tso < 0) {
2999 dev_kfree_skb_any(skb);
3000 return NETDEV_TX_OK;
3001 }
3002
3003 if (tso)
Alexander Duyck70a10e22012-05-11 08:33:21 +00003004 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3005 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
Greg Rose92915f72010-01-09 02:24:10 +00003006 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3007
Alexander Duyck70a10e22012-05-11 08:33:21 +00003008 ixgbevf_tx_queue(tx_ring, tx_flags,
3009 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
Greg Rose92915f72010-01-09 02:24:10 +00003010 skb->len, hdr_len);
Alexander Duyck70a10e22012-05-11 08:33:21 +00003011 /*
3012 * Force memory writes to complete before letting h/w
3013 * know there are new descriptors to fetch. (Only
3014 * applicable for weak-ordered memory model archs,
3015 * such as IA-64).
3016 */
3017 wmb();
3018
3019 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
Greg Rose92915f72010-01-09 02:24:10 +00003020
Alexander Duyckfb401952012-05-11 08:33:16 +00003021 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
Greg Rose92915f72010-01-09 02:24:10 +00003022
3023 return NETDEV_TX_OK;
3024}
3025
3026/**
Greg Rose92915f72010-01-09 02:24:10 +00003027 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3028 * @netdev: network interface device structure
3029 * @p: pointer to an address structure
3030 *
3031 * Returns 0 on success, negative on failure
3032 **/
3033static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3034{
3035 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3036 struct ixgbe_hw *hw = &adapter->hw;
3037 struct sockaddr *addr = p;
3038
3039 if (!is_valid_ether_addr(addr->sa_data))
3040 return -EADDRNOTAVAIL;
3041
3042 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3043 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3044
John Fastabend55fdd45b2012-10-01 14:52:20 +00003045 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003046
Greg Rose92fe0bf2012-11-02 05:50:47 +00003047 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
Greg Rose92915f72010-01-09 02:24:10 +00003048
John Fastabend55fdd45b2012-10-01 14:52:20 +00003049 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003050
Greg Rose92915f72010-01-09 02:24:10 +00003051 return 0;
3052}
3053
3054/**
3055 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3056 * @netdev: network interface device structure
3057 * @new_mtu: new value for maximum frame size
3058 *
3059 * Returns 0 on success, negative on failure
3060 **/
3061static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3062{
3063 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3064 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Greg Rose69bfbec2011-01-26 01:06:12 +00003065 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
Greg Rose69bfbec2011-01-26 01:06:12 +00003066
Alexander Duyck56e94092012-07-20 08:10:03 +00003067 switch (adapter->hw.api_version) {
3068 case ixgbe_mbox_api_11:
Greg Rose69bfbec2011-01-26 01:06:12 +00003069 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
Alexander Duyck56e94092012-07-20 08:10:03 +00003070 break;
3071 default:
3072 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3073 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3074 break;
3075 }
Greg Rose92915f72010-01-09 02:24:10 +00003076
3077 /* MTU < 68 is an error and causes problems on some kernels */
Greg Rose69bfbec2011-01-26 01:06:12 +00003078 if ((new_mtu < 68) || (max_frame > max_possible_frame))
Greg Rose92915f72010-01-09 02:24:10 +00003079 return -EINVAL;
3080
3081 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3082 netdev->mtu, new_mtu);
3083 /* must set new MTU before calling down or up */
3084 netdev->mtu = new_mtu;
3085
3086 if (netif_running(netdev))
3087 ixgbevf_reinit_locked(adapter);
3088
3089 return 0;
3090}
3091
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003092static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
Greg Rose92915f72010-01-09 02:24:10 +00003093{
3094 struct net_device *netdev = pci_get_drvdata(pdev);
3095 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003096#ifdef CONFIG_PM
3097 int retval = 0;
3098#endif
Greg Rose92915f72010-01-09 02:24:10 +00003099
3100 netif_device_detach(netdev);
3101
3102 if (netif_running(netdev)) {
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003103 rtnl_lock();
Greg Rose92915f72010-01-09 02:24:10 +00003104 ixgbevf_down(adapter);
3105 ixgbevf_free_irq(adapter);
3106 ixgbevf_free_all_tx_resources(adapter);
3107 ixgbevf_free_all_rx_resources(adapter);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003108 rtnl_unlock();
Greg Rose92915f72010-01-09 02:24:10 +00003109 }
3110
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003111 ixgbevf_clear_interrupt_scheme(adapter);
3112
3113#ifdef CONFIG_PM
3114 retval = pci_save_state(pdev);
3115 if (retval)
3116 return retval;
3117
3118#endif
3119 pci_disable_device(pdev);
3120
3121 return 0;
3122}
3123
3124#ifdef CONFIG_PM
3125static int ixgbevf_resume(struct pci_dev *pdev)
3126{
3127 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3128 struct net_device *netdev = adapter->netdev;
3129 u32 err;
3130
3131 pci_set_power_state(pdev, PCI_D0);
3132 pci_restore_state(pdev);
3133 /*
3134 * pci_restore_state clears dev->state_saved so call
3135 * pci_save_state to restore it.
3136 */
Greg Rose92915f72010-01-09 02:24:10 +00003137 pci_save_state(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00003138
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003139 err = pci_enable_device_mem(pdev);
3140 if (err) {
3141 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3142 return err;
3143 }
3144 pci_set_master(pdev);
3145
3146 rtnl_lock();
3147 err = ixgbevf_init_interrupt_scheme(adapter);
3148 rtnl_unlock();
3149 if (err) {
3150 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3151 return err;
3152 }
3153
3154 ixgbevf_reset(adapter);
3155
3156 if (netif_running(netdev)) {
3157 err = ixgbevf_open(netdev);
3158 if (err)
3159 return err;
3160 }
3161
3162 netif_device_attach(netdev);
3163
3164 return err;
3165}
3166
3167#endif /* CONFIG_PM */
3168static void ixgbevf_shutdown(struct pci_dev *pdev)
3169{
3170 ixgbevf_suspend(pdev, PMSG_SUSPEND);
Greg Rose92915f72010-01-09 02:24:10 +00003171}
3172
Eric Dumazet4197aa72011-06-22 05:01:35 +00003173static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3174 struct rtnl_link_stats64 *stats)
3175{
3176 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3177 unsigned int start;
3178 u64 bytes, packets;
3179 const struct ixgbevf_ring *ring;
3180 int i;
3181
3182 ixgbevf_update_stats(adapter);
3183
3184 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3185
3186 for (i = 0; i < adapter->num_rx_queues; i++) {
3187 ring = &adapter->rx_ring[i];
3188 do {
3189 start = u64_stats_fetch_begin_bh(&ring->syncp);
3190 bytes = ring->total_bytes;
3191 packets = ring->total_packets;
3192 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3193 stats->rx_bytes += bytes;
3194 stats->rx_packets += packets;
3195 }
3196
3197 for (i = 0; i < adapter->num_tx_queues; i++) {
3198 ring = &adapter->tx_ring[i];
3199 do {
3200 start = u64_stats_fetch_begin_bh(&ring->syncp);
3201 bytes = ring->total_bytes;
3202 packets = ring->total_packets;
3203 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3204 stats->tx_bytes += bytes;
3205 stats->tx_packets += packets;
3206 }
3207
3208 return stats;
3209}
3210
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003211static const struct net_device_ops ixgbevf_netdev_ops = {
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003212 .ndo_open = ixgbevf_open,
3213 .ndo_stop = ixgbevf_close,
3214 .ndo_start_xmit = ixgbevf_xmit_frame,
3215 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
Eric Dumazet4197aa72011-06-22 05:01:35 +00003216 .ndo_get_stats64 = ixgbevf_get_stats,
Greg Rose92915f72010-01-09 02:24:10 +00003217 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003218 .ndo_set_mac_address = ixgbevf_set_mac,
3219 .ndo_change_mtu = ixgbevf_change_mtu,
3220 .ndo_tx_timeout = ixgbevf_tx_timeout,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003221 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3222 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
Greg Rose92915f72010-01-09 02:24:10 +00003223};
Greg Rose92915f72010-01-09 02:24:10 +00003224
3225static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3226{
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003227 dev->netdev_ops = &ixgbevf_netdev_ops;
Greg Rose92915f72010-01-09 02:24:10 +00003228 ixgbevf_set_ethtool_ops(dev);
3229 dev->watchdog_timeo = 5 * HZ;
3230}
3231
3232/**
3233 * ixgbevf_probe - Device Initialization Routine
3234 * @pdev: PCI device information struct
3235 * @ent: entry in ixgbevf_pci_tbl
3236 *
3237 * Returns 0 on success, negative on failure
3238 *
3239 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3240 * The OS initialization, configuring of the adapter private structure,
3241 * and a hardware reset occur.
3242 **/
3243static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3244 const struct pci_device_id *ent)
3245{
3246 struct net_device *netdev;
3247 struct ixgbevf_adapter *adapter = NULL;
3248 struct ixgbe_hw *hw = NULL;
3249 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3250 static int cards_found;
3251 int err, pci_using_dac;
3252
3253 err = pci_enable_device(pdev);
3254 if (err)
3255 return err;
3256
Nick Nunley2a1f8792010-04-27 13:10:50 +00003257 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3258 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
Greg Rose92915f72010-01-09 02:24:10 +00003259 pci_using_dac = 1;
3260 } else {
Nick Nunley2a1f8792010-04-27 13:10:50 +00003261 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Greg Rose92915f72010-01-09 02:24:10 +00003262 if (err) {
Nick Nunley2a1f8792010-04-27 13:10:50 +00003263 err = dma_set_coherent_mask(&pdev->dev,
3264 DMA_BIT_MASK(32));
Greg Rose92915f72010-01-09 02:24:10 +00003265 if (err) {
3266 dev_err(&pdev->dev, "No usable DMA "
3267 "configuration, aborting\n");
3268 goto err_dma;
3269 }
3270 }
3271 pci_using_dac = 0;
3272 }
3273
3274 err = pci_request_regions(pdev, ixgbevf_driver_name);
3275 if (err) {
3276 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3277 goto err_pci_reg;
3278 }
3279
3280 pci_set_master(pdev);
3281
Greg Rose92915f72010-01-09 02:24:10 +00003282 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3283 MAX_TX_QUEUES);
Greg Rose92915f72010-01-09 02:24:10 +00003284 if (!netdev) {
3285 err = -ENOMEM;
3286 goto err_alloc_etherdev;
3287 }
3288
3289 SET_NETDEV_DEV(netdev, &pdev->dev);
3290
3291 pci_set_drvdata(pdev, netdev);
3292 adapter = netdev_priv(netdev);
3293
3294 adapter->netdev = netdev;
3295 adapter->pdev = pdev;
3296 hw = &adapter->hw;
3297 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00003298 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Greg Rose92915f72010-01-09 02:24:10 +00003299
3300 /*
3301 * call save state here in standalone driver because it relies on
3302 * adapter struct to exist, and needs to call netdev_priv
3303 */
3304 pci_save_state(pdev);
3305
3306 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3307 pci_resource_len(pdev, 0));
3308 if (!hw->hw_addr) {
3309 err = -EIO;
3310 goto err_ioremap;
3311 }
3312
3313 ixgbevf_assign_netdev_ops(netdev);
3314
3315 adapter->bd_number = cards_found;
3316
3317 /* Setup hw api */
3318 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3319 hw->mac.type = ii->mac;
3320
3321 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
Greg Rosef416dfc2011-06-08 07:32:38 +00003322 sizeof(struct ixgbe_mbx_operations));
Greg Rose92915f72010-01-09 02:24:10 +00003323
Greg Rose92915f72010-01-09 02:24:10 +00003324 /* setup the private structure */
3325 err = ixgbevf_sw_init(adapter);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00003326 if (err)
3327 goto err_sw_init;
3328
3329 /* The HW MAC address was set and/or determined in sw_init */
3330 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3331
3332 if (!is_valid_ether_addr(netdev->dev_addr)) {
3333 pr_err("invalid MAC address\n");
3334 err = -EIO;
3335 goto err_sw_init;
3336 }
Greg Rose92915f72010-01-09 02:24:10 +00003337
Michał Mirosław471a76d2011-06-08 08:53:03 +00003338 netdev->hw_features = NETIF_F_SG |
Greg Rose92915f72010-01-09 02:24:10 +00003339 NETIF_F_IP_CSUM |
Michał Mirosław471a76d2011-06-08 08:53:03 +00003340 NETIF_F_IPV6_CSUM |
3341 NETIF_F_TSO |
3342 NETIF_F_TSO6 |
3343 NETIF_F_RXCSUM;
3344
3345 netdev->features = netdev->hw_features |
Greg Rose92915f72010-01-09 02:24:10 +00003346 NETIF_F_HW_VLAN_TX |
3347 NETIF_F_HW_VLAN_RX |
3348 NETIF_F_HW_VLAN_FILTER;
3349
Greg Rose92915f72010-01-09 02:24:10 +00003350 netdev->vlan_features |= NETIF_F_TSO;
3351 netdev->vlan_features |= NETIF_F_TSO6;
3352 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyck3bfacf92010-08-02 14:59:04 +00003353 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Greg Rose92915f72010-01-09 02:24:10 +00003354 netdev->vlan_features |= NETIF_F_SG;
3355
3356 if (pci_using_dac)
3357 netdev->features |= NETIF_F_HIGHDMA;
3358
Jiri Pirko01789342011-08-16 06:29:00 +00003359 netdev->priv_flags |= IFF_UNICAST_FLT;
3360
Greg Rose92915f72010-01-09 02:24:10 +00003361 init_timer(&adapter->watchdog_timer);
Joe Perchesc061b182010-08-23 18:20:03 +00003362 adapter->watchdog_timer.function = ixgbevf_watchdog;
Greg Rose92915f72010-01-09 02:24:10 +00003363 adapter->watchdog_timer.data = (unsigned long)adapter;
3364
3365 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3366 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3367
3368 err = ixgbevf_init_interrupt_scheme(adapter);
3369 if (err)
3370 goto err_sw_init;
3371
Greg Rose92915f72010-01-09 02:24:10 +00003372 strcpy(netdev->name, "eth%d");
3373
3374 err = register_netdev(netdev);
3375 if (err)
3376 goto err_register;
3377
Greg Rose5d426ad2010-11-16 19:27:19 -08003378 netif_carrier_off(netdev);
3379
Greg Rose33bd9f62010-03-19 02:59:52 +00003380 ixgbevf_init_last_counter_stats(adapter);
3381
Greg Rose92915f72010-01-09 02:24:10 +00003382 /* print the MAC address */
Danny Kukawkaf794e7e2012-02-24 03:45:56 +00003383 hw_dbg(hw, "%pM\n", netdev->dev_addr);
Greg Rose92915f72010-01-09 02:24:10 +00003384
3385 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3386
Greg Rose92915f72010-01-09 02:24:10 +00003387 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3388 cards_found++;
3389 return 0;
3390
3391err_register:
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003392 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003393err_sw_init:
3394 ixgbevf_reset_interrupt_capability(adapter);
3395 iounmap(hw->hw_addr);
3396err_ioremap:
3397 free_netdev(netdev);
3398err_alloc_etherdev:
3399 pci_release_regions(pdev);
3400err_pci_reg:
3401err_dma:
3402 pci_disable_device(pdev);
3403 return err;
3404}
3405
3406/**
3407 * ixgbevf_remove - Device Removal Routine
3408 * @pdev: PCI device information struct
3409 *
3410 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3411 * that it should release a PCI device. The could be caused by a
3412 * Hot-Plug event, or because the driver is going to be removed from
3413 * memory.
3414 **/
3415static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3416{
3417 struct net_device *netdev = pci_get_drvdata(pdev);
3418 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3419
3420 set_bit(__IXGBEVF_DOWN, &adapter->state);
3421
3422 del_timer_sync(&adapter->watchdog_timer);
3423
Tejun Heo23f333a2010-12-12 16:45:14 +01003424 cancel_work_sync(&adapter->reset_task);
Greg Rose92915f72010-01-09 02:24:10 +00003425 cancel_work_sync(&adapter->watchdog_task);
3426
Alexander Duyckfd13a9a2012-05-11 08:32:24 +00003427 if (netdev->reg_state == NETREG_REGISTERED)
Greg Rose92915f72010-01-09 02:24:10 +00003428 unregister_netdev(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00003429
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003430 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003431 ixgbevf_reset_interrupt_capability(adapter);
3432
3433 iounmap(adapter->hw.hw_addr);
3434 pci_release_regions(pdev);
3435
3436 hw_dbg(&adapter->hw, "Remove complete\n");
3437
3438 kfree(adapter->tx_ring);
3439 kfree(adapter->rx_ring);
3440
3441 free_netdev(netdev);
3442
3443 pci_disable_device(pdev);
3444}
3445
Alexander Duyck9f19f312012-05-11 08:33:32 +00003446/**
3447 * ixgbevf_io_error_detected - called when PCI error is detected
3448 * @pdev: Pointer to PCI device
3449 * @state: The current pci connection state
3450 *
3451 * This function is called after a PCI bus error affecting
3452 * this device has been detected.
3453 */
3454static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3455 pci_channel_state_t state)
3456{
3457 struct net_device *netdev = pci_get_drvdata(pdev);
3458 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3459
3460 netif_device_detach(netdev);
3461
3462 if (state == pci_channel_io_perm_failure)
3463 return PCI_ERS_RESULT_DISCONNECT;
3464
3465 if (netif_running(netdev))
3466 ixgbevf_down(adapter);
3467
3468 pci_disable_device(pdev);
3469
3470 /* Request a slot slot reset. */
3471 return PCI_ERS_RESULT_NEED_RESET;
3472}
3473
3474/**
3475 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3476 * @pdev: Pointer to PCI device
3477 *
3478 * Restart the card from scratch, as if from a cold-boot. Implementation
3479 * resembles the first-half of the ixgbevf_resume routine.
3480 */
3481static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3482{
3483 struct net_device *netdev = pci_get_drvdata(pdev);
3484 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3485
3486 if (pci_enable_device_mem(pdev)) {
3487 dev_err(&pdev->dev,
3488 "Cannot re-enable PCI device after reset.\n");
3489 return PCI_ERS_RESULT_DISCONNECT;
3490 }
3491
3492 pci_set_master(pdev);
3493
3494 ixgbevf_reset(adapter);
3495
3496 return PCI_ERS_RESULT_RECOVERED;
3497}
3498
3499/**
3500 * ixgbevf_io_resume - called when traffic can start flowing again.
3501 * @pdev: Pointer to PCI device
3502 *
3503 * This callback is called when the error recovery driver tells us that
3504 * its OK to resume normal operation. Implementation resembles the
3505 * second-half of the ixgbevf_resume routine.
3506 */
3507static void ixgbevf_io_resume(struct pci_dev *pdev)
3508{
3509 struct net_device *netdev = pci_get_drvdata(pdev);
3510 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3511
3512 if (netif_running(netdev))
3513 ixgbevf_up(adapter);
3514
3515 netif_device_attach(netdev);
3516}
3517
3518/* PCI Error Recovery (ERS) */
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07003519static const struct pci_error_handlers ixgbevf_err_handler = {
Alexander Duyck9f19f312012-05-11 08:33:32 +00003520 .error_detected = ixgbevf_io_error_detected,
3521 .slot_reset = ixgbevf_io_slot_reset,
3522 .resume = ixgbevf_io_resume,
3523};
3524
Greg Rose92915f72010-01-09 02:24:10 +00003525static struct pci_driver ixgbevf_driver = {
3526 .name = ixgbevf_driver_name,
3527 .id_table = ixgbevf_pci_tbl,
3528 .probe = ixgbevf_probe,
3529 .remove = __devexit_p(ixgbevf_remove),
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003530#ifdef CONFIG_PM
3531 /* Power Management Hooks */
3532 .suspend = ixgbevf_suspend,
3533 .resume = ixgbevf_resume,
3534#endif
Greg Rose92915f72010-01-09 02:24:10 +00003535 .shutdown = ixgbevf_shutdown,
Alexander Duyck9f19f312012-05-11 08:33:32 +00003536 .err_handler = &ixgbevf_err_handler
Greg Rose92915f72010-01-09 02:24:10 +00003537};
3538
3539/**
Greg Rose65d676c2011-02-03 06:54:13 +00003540 * ixgbevf_init_module - Driver Registration Routine
Greg Rose92915f72010-01-09 02:24:10 +00003541 *
Greg Rose65d676c2011-02-03 06:54:13 +00003542 * ixgbevf_init_module is the first routine called when the driver is
Greg Rose92915f72010-01-09 02:24:10 +00003543 * loaded. All it does is register with the PCI subsystem.
3544 **/
3545static int __init ixgbevf_init_module(void)
3546{
3547 int ret;
Jeff Kirsherdbd96362011-10-21 19:38:18 +00003548 pr_info("%s - version %s\n", ixgbevf_driver_string,
3549 ixgbevf_driver_version);
Greg Rose92915f72010-01-09 02:24:10 +00003550
Jeff Kirsherdbd96362011-10-21 19:38:18 +00003551 pr_info("%s\n", ixgbevf_copyright);
Greg Rose92915f72010-01-09 02:24:10 +00003552
3553 ret = pci_register_driver(&ixgbevf_driver);
3554 return ret;
3555}
3556
3557module_init(ixgbevf_init_module);
3558
3559/**
Greg Rose65d676c2011-02-03 06:54:13 +00003560 * ixgbevf_exit_module - Driver Exit Cleanup Routine
Greg Rose92915f72010-01-09 02:24:10 +00003561 *
Greg Rose65d676c2011-02-03 06:54:13 +00003562 * ixgbevf_exit_module is called just before the driver is removed
Greg Rose92915f72010-01-09 02:24:10 +00003563 * from memory.
3564 **/
3565static void __exit ixgbevf_exit_module(void)
3566{
3567 pci_unregister_driver(&ixgbevf_driver);
3568}
3569
3570#ifdef DEBUG
3571/**
Greg Rose65d676c2011-02-03 06:54:13 +00003572 * ixgbevf_get_hw_dev_name - return device name string
Greg Rose92915f72010-01-09 02:24:10 +00003573 * used by hardware layer to print debugging information
3574 **/
3575char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3576{
3577 struct ixgbevf_adapter *adapter = hw->back;
3578 return adapter->netdev->name;
3579}
3580
3581#endif
3582module_exit(ixgbevf_exit_module);
3583
3584/* ixgbevf_main.c */