blob: 1d7b00b038a2ea8c3bdd9394ad3d4988ccee04c8 [file] [log] [blame]
Greg Rose92915f72010-01-09 02:24:10 +00001/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004 Copyright(c) 1999 - 2015 Intel Corporation.
Greg Rose92915f72010-01-09 02:24:10 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +000016 this program; if not, see <http://www.gnu.org/licenses/>.
Greg Rose92915f72010-01-09 02:24:10 +000017
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26
Greg Rose92915f72010-01-09 02:24:10 +000027/******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29******************************************************************************/
Jeff Kirsherdbd96362011-10-21 19:38:18 +000030
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
Greg Rose92915f72010-01-09 02:24:10 +000033#include <linux/types.h>
Jiri Pirkodadcd652011-07-21 03:25:09 +000034#include <linux/bitops.h>
Greg Rose92915f72010-01-09 02:24:10 +000035#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39#include <linux/string.h>
40#include <linux/in.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
Alexander Duyck70a10e22012-05-11 08:33:21 +000043#include <linux/sctp.h>
Greg Rose92915f72010-01-09 02:24:10 +000044#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090045#include <linux/slab.h>
Greg Rose92915f72010-01-09 02:24:10 +000046#include <net/checksum.h>
47#include <net/ip6_checksum.h>
48#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000049#include <linux/if.h>
Greg Rose92915f72010-01-09 02:24:10 +000050#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040051#include <linux/prefetch.h>
Greg Rose92915f72010-01-09 02:24:10 +000052
53#include "ixgbevf.h"
54
Stephen Hemminger3d8fe982012-01-18 22:13:34 +000055const char ixgbevf_driver_name[] = "ixgbevf";
Greg Rose92915f72010-01-09 02:24:10 +000056static const char ixgbevf_driver_string[] =
Greg Rose422e05d2011-03-12 02:01:29 +000057 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
Greg Rose92915f72010-01-09 02:24:10 +000058
Don Skidmore86f359f2014-01-17 01:21:38 -080059#define DRV_VERSION "2.12.1-k"
Greg Rose92915f72010-01-09 02:24:10 +000060const char ixgbevf_driver_version[] = DRV_VERSION;
Greg Rose66c87bd2010-11-16 19:26:43 -080061static char ixgbevf_copyright[] =
Greg Rose5c47a2b2012-01-06 02:53:30 +000062 "Copyright (c) 2009 - 2012 Intel Corporation.";
Greg Rose92915f72010-01-09 02:24:10 +000063
64static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
Greg Rose2316aa22010-12-02 07:12:26 +000065 [board_82599_vf] = &ixgbevf_82599_vf_info,
66 [board_X540_vf] = &ixgbevf_X540_vf_info,
Emil Tantilov47068b02014-11-22 07:59:56 +000067 [board_X550_vf] = &ixgbevf_X550_vf_info,
68 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
Greg Rose92915f72010-01-09 02:24:10 +000069};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
Benoit Taine9baa3c32014-08-08 15:56:03 +020079static const struct pci_device_id ixgbevf_pci_tbl[] = {
Stephen Hemminger39ba22b2013-02-06 02:37:04 +000080 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
Emil Tantilov47068b02014-11-22 07:59:56 +000082 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
Greg Rose92915f72010-01-09 02:24:10 +000084 /* required last entry */
85 {0, }
86};
87MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
88
89MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
Emil Tantilovb8ce18c2014-04-05 05:39:42 +000090MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
Greg Rose92915f72010-01-09 02:24:10 +000091MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION);
93
stephen hemmingerb3f4d592012-03-13 06:04:20 +000094#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
95static int debug = -1;
96module_param(debug, int, 0);
97MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
Greg Rose92915f72010-01-09 02:24:10 +000098
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +000099static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
100{
101 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
102 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
103 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
104 schedule_work(&adapter->service_task);
105}
106
107static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
108{
109 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
110
111 /* flush memory to make sure state is correct before next watchdog */
112 smp_mb__before_atomic();
113 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
114}
115
Greg Rose92915f72010-01-09 02:24:10 +0000116/* forward decls */
Don Skidmore220fe052013-09-21 01:40:49 +0000117static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000118static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
Alexander Duyck56e94092012-07-20 08:10:03 +0000119static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000120
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000121static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
122{
123 struct ixgbevf_adapter *adapter = hw->back;
124
125 if (!hw->hw_addr)
126 return;
127 hw->hw_addr = NULL;
128 dev_err(&adapter->pdev->dev, "Adapter removed\n");
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000129 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
130 ixgbevf_service_event_schedule(adapter);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000131}
132
133static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
134{
135 u32 value;
136
137 /* The following check not only optimizes a bit by not
138 * performing a read on the status register when the
139 * register just read was a status register read that
140 * returned IXGBE_FAILED_READ_REG. It also blocks any
141 * potential recursion.
142 */
143 if (reg == IXGBE_VFSTATUS) {
144 ixgbevf_remove_adapter(hw);
145 return;
146 }
Mark Rustad32c74942014-03-18 07:03:35 +0000147 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000148 if (value == IXGBE_FAILED_READ_REG)
149 ixgbevf_remove_adapter(hw);
150}
151
Mark Rustad32c74942014-03-18 07:03:35 +0000152u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000153{
154 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
155 u32 value;
156
157 if (IXGBE_REMOVED(reg_addr))
158 return IXGBE_FAILED_READ_REG;
159 value = readl(reg_addr + reg);
160 if (unlikely(value == IXGBE_FAILED_READ_REG))
161 ixgbevf_check_remove(hw, reg);
162 return value;
163}
164
Ben Hutchings49ce9c22012-07-10 10:56:00 +0000165/**
Greg Rose65d676c2011-02-03 06:54:13 +0000166 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
Greg Rose92915f72010-01-09 02:24:10 +0000167 * @adapter: pointer to adapter struct
168 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
169 * @queue: queue to map the corresponding interrupt to
170 * @msix_vector: the vector to map to the corresponding queue
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000171 **/
Greg Rose92915f72010-01-09 02:24:10 +0000172static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
173 u8 queue, u8 msix_vector)
174{
175 u32 ivar, index;
176 struct ixgbe_hw *hw = &adapter->hw;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000177
Greg Rose92915f72010-01-09 02:24:10 +0000178 if (direction == -1) {
179 /* other causes */
180 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
181 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
182 ivar &= ~0xFF;
183 ivar |= msix_vector;
184 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
185 } else {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000186 /* Tx or Rx causes */
Greg Rose92915f72010-01-09 02:24:10 +0000187 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
188 index = ((16 * (queue & 1)) + (8 * direction));
189 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
190 ivar &= ~(0xFF << index);
191 ivar |= (msix_vector << index);
192 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
193 }
194}
195
Alexander Duyck70a10e22012-05-11 08:33:21 +0000196static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800197 struct ixgbevf_tx_buffer *tx_buffer)
Greg Rose92915f72010-01-09 02:24:10 +0000198{
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800199 if (tx_buffer->skb) {
200 dev_kfree_skb_any(tx_buffer->skb);
201 if (dma_unmap_len(tx_buffer, len))
Alexander Duyck70a10e22012-05-11 08:33:21 +0000202 dma_unmap_single(tx_ring->dev,
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800203 dma_unmap_addr(tx_buffer, dma),
204 dma_unmap_len(tx_buffer, len),
Nick Nunley2a1f8792010-04-27 13:10:50 +0000205 DMA_TO_DEVICE);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800206 } else if (dma_unmap_len(tx_buffer, len)) {
207 dma_unmap_page(tx_ring->dev,
208 dma_unmap_addr(tx_buffer, dma),
209 dma_unmap_len(tx_buffer, len),
210 DMA_TO_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000211 }
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800212 tx_buffer->next_to_watch = NULL;
213 tx_buffer->skb = NULL;
214 dma_unmap_len_set(tx_buffer, len, 0);
215 /* tx_buffer must be completely set up in the transmit path */
Greg Rose92915f72010-01-09 02:24:10 +0000216}
217
Emil Tantilove08400b2015-01-28 03:21:24 +0000218static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
219{
220 return ring->stats.packets;
221}
Greg Rose92915f72010-01-09 02:24:10 +0000222
Emil Tantilove08400b2015-01-28 03:21:24 +0000223static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
224{
225 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
226 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +0000227
Emil Tantilove08400b2015-01-28 03:21:24 +0000228 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
229 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
230
231 if (head != tail)
232 return (head < tail) ?
233 tail - head : (tail + ring->count - head);
234
235 return 0;
236}
237
238static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
239{
240 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
241 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
242 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
243
244 clear_check_for_tx_hang(tx_ring);
245
246 /* Check for a hung queue, but be thorough. This verifies
247 * that a transmit has been completed since the previous
248 * check AND there is at least one packet pending. The
249 * ARMED bit is set to indicate a potential hang.
250 */
251 if ((tx_done_old == tx_done) && tx_pending) {
252 /* make sure it is true for two checks in a row */
253 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
254 &tx_ring->state);
255 }
256 /* reset the countdown */
257 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
258
259 /* update completed stats and continue */
260 tx_ring->tx_stats.tx_done_old = tx_done;
261
262 return false;
263}
264
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000265static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
266{
267 /* Do the reset outside of interrupt context */
268 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
269 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
270 ixgbevf_service_event_schedule(adapter);
271 }
272}
273
Emil Tantilove08400b2015-01-28 03:21:24 +0000274/**
275 * ixgbevf_tx_timeout - Respond to a Tx Hang
276 * @netdev: network interface device structure
277 **/
278static void ixgbevf_tx_timeout(struct net_device *netdev)
279{
280 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
281
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000282 ixgbevf_tx_timeout_reset(adapter);
Emil Tantilove08400b2015-01-28 03:21:24 +0000283}
Greg Rose92915f72010-01-09 02:24:10 +0000284
285/**
286 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000287 * @q_vector: board private structure
Greg Rose92915f72010-01-09 02:24:10 +0000288 * @tx_ring: tx ring to clean
289 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000290static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
Greg Rose92915f72010-01-09 02:24:10 +0000291 struct ixgbevf_ring *tx_ring)
292{
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000293 struct ixgbevf_adapter *adapter = q_vector->adapter;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800294 struct ixgbevf_tx_buffer *tx_buffer;
295 union ixgbe_adv_tx_desc *tx_desc;
Greg Rose92915f72010-01-09 02:24:10 +0000296 unsigned int total_bytes = 0, total_packets = 0;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800297 unsigned int budget = tx_ring->count / 2;
298 unsigned int i = tx_ring->next_to_clean;
Greg Rose92915f72010-01-09 02:24:10 +0000299
Alexander Duyck10cc1bd2012-07-16 23:44:48 +0000300 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
301 return true;
302
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800303 tx_buffer = &tx_ring->tx_buffer_info[i];
304 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
305 i -= tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +0000306
Alexander Duycke757e3e2013-01-31 07:43:22 +0000307 do {
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800308 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000309
310 /* if next_to_watch is not set then there is no work pending */
311 if (!eop_desc)
312 break;
313
314 /* prevent any other reads prior to eop_desc */
315 read_barrier_depends();
316
317 /* if DD is not set pending work has not been completed */
318 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
319 break;
320
321 /* clear next_to_watch to prevent false hangs */
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800322 tx_buffer->next_to_watch = NULL;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000323
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800324 /* update the statistics for this packet */
325 total_bytes += tx_buffer->bytecount;
326 total_packets += tx_buffer->gso_segs;
Greg Rose92915f72010-01-09 02:24:10 +0000327
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800328 /* free the skb */
329 dev_kfree_skb_any(tx_buffer->skb);
330
331 /* unmap skb header data */
332 dma_unmap_single(tx_ring->dev,
333 dma_unmap_addr(tx_buffer, dma),
334 dma_unmap_len(tx_buffer, len),
335 DMA_TO_DEVICE);
336
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800337 /* clear tx_buffer data */
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800338 tx_buffer->skb = NULL;
339 dma_unmap_len_set(tx_buffer, len, 0);
Greg Rose92915f72010-01-09 02:24:10 +0000340
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800341 /* unmap remaining buffers */
342 while (tx_desc != eop_desc) {
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800343 tx_buffer++;
344 tx_desc++;
Greg Rose92915f72010-01-09 02:24:10 +0000345 i++;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800346 if (unlikely(!i)) {
347 i -= tx_ring->count;
348 tx_buffer = tx_ring->tx_buffer_info;
349 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
350 }
Alexander Duycke757e3e2013-01-31 07:43:22 +0000351
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800352 /* unmap any remaining paged data */
353 if (dma_unmap_len(tx_buffer, len)) {
354 dma_unmap_page(tx_ring->dev,
355 dma_unmap_addr(tx_buffer, dma),
356 dma_unmap_len(tx_buffer, len),
357 DMA_TO_DEVICE);
358 dma_unmap_len_set(tx_buffer, len, 0);
359 }
Greg Rose92915f72010-01-09 02:24:10 +0000360 }
361
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800362 /* move us one more past the eop_desc for start of next pkt */
363 tx_buffer++;
364 tx_desc++;
365 i++;
366 if (unlikely(!i)) {
367 i -= tx_ring->count;
368 tx_buffer = tx_ring->tx_buffer_info;
369 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
370 }
371
372 /* issue prefetch for next Tx descriptor */
373 prefetch(tx_desc);
374
375 /* update budget accounting */
376 budget--;
377 } while (likely(budget));
378
379 i += tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +0000380 tx_ring->next_to_clean = i;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000381 u64_stats_update_begin(&tx_ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -0800382 tx_ring->stats.bytes += total_bytes;
383 tx_ring->stats.packets += total_packets;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000384 u64_stats_update_end(&tx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +0000385 q_vector->tx.total_bytes += total_bytes;
386 q_vector->tx.total_packets += total_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000387
Emil Tantilove08400b2015-01-28 03:21:24 +0000388 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
389 struct ixgbe_hw *hw = &adapter->hw;
390 union ixgbe_adv_tx_desc *eop_desc;
391
392 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
393
394 pr_err("Detected Tx Unit Hang\n"
395 " Tx Queue <%d>\n"
396 " TDH, TDT <%x>, <%x>\n"
397 " next_to_use <%x>\n"
398 " next_to_clean <%x>\n"
399 "tx_buffer_info[next_to_clean]\n"
400 " next_to_watch <%p>\n"
401 " eop_desc->wb.status <%x>\n"
402 " time_stamp <%lx>\n"
403 " jiffies <%lx>\n",
404 tx_ring->queue_index,
405 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
406 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
407 tx_ring->next_to_use, i,
408 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
409 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
410
411 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
412
413 /* schedule immediate reset if we believe we hung */
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000414 ixgbevf_tx_timeout_reset(adapter);
Emil Tantilove08400b2015-01-28 03:21:24 +0000415
416 return true;
417 }
418
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800419#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
420 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
421 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
422 /* Make sure that anybody stopping the queue after this
423 * sees the new next_to_clean.
424 */
425 smp_mb();
426
427 if (__netif_subqueue_stopped(tx_ring->netdev,
428 tx_ring->queue_index) &&
429 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
430 netif_wake_subqueue(tx_ring->netdev,
431 tx_ring->queue_index);
432 ++tx_ring->tx_stats.restart_queue;
433 }
434 }
435
436 return !!budget;
Greg Rose92915f72010-01-09 02:24:10 +0000437}
438
439/**
Jacob Keller08681612013-09-21 06:24:09 +0000440 * ixgbevf_rx_skb - Helper function to determine proper Rx method
441 * @q_vector: structure containing interrupt and ring information
442 * @skb: packet to send up
Jacob Keller08681612013-09-21 06:24:09 +0000443 **/
444static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
Emil Tantilovdff80522014-11-08 01:39:25 +0000445 struct sk_buff *skb)
Jacob Keller08681612013-09-21 06:24:09 +0000446{
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000447#ifdef CONFIG_NET_RX_BUSY_POLL
448 skb_mark_napi_id(skb, &q_vector->napi);
449
450 if (ixgbevf_qv_busy_polling(q_vector)) {
451 netif_receive_skb(skb);
452 /* exit early if we busy polled */
453 return;
454 }
455#endif /* CONFIG_NET_RX_BUSY_POLL */
Emil Tantilov688ff322014-11-08 01:39:56 +0000456
457 napi_gro_receive(&q_vector->napi, skb);
Jacob Keller08681612013-09-21 06:24:09 +0000458}
459
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000460/**
461 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
Emil Tantilovec62fe22014-11-08 01:39:20 +0000462 * @ring: structure containig ring specific data
463 * @rx_desc: current Rx descriptor being processed
Greg Rose92915f72010-01-09 02:24:10 +0000464 * @skb: skb currently being received and modified
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000465 **/
Greg Rose55fb2772012-11-06 05:53:32 +0000466static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
Emil Tantilovec62fe22014-11-08 01:39:20 +0000467 union ixgbe_adv_rx_desc *rx_desc,
468 struct sk_buff *skb)
Greg Rose92915f72010-01-09 02:24:10 +0000469{
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700470 skb_checksum_none_assert(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000471
472 /* Rx csum disabled */
Alexander Duyckfb401952012-05-11 08:33:16 +0000473 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Greg Rose92915f72010-01-09 02:24:10 +0000474 return;
475
476 /* if IP and error */
Emil Tantilovec62fe22014-11-08 01:39:20 +0000477 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
478 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
Emil Tantilov095e2612014-01-17 18:30:00 -0800479 ring->rx_stats.csum_err++;
Greg Rose92915f72010-01-09 02:24:10 +0000480 return;
481 }
482
Emil Tantilovec62fe22014-11-08 01:39:20 +0000483 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
Greg Rose92915f72010-01-09 02:24:10 +0000484 return;
485
Emil Tantilovec62fe22014-11-08 01:39:20 +0000486 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
Emil Tantilov095e2612014-01-17 18:30:00 -0800487 ring->rx_stats.csum_err++;
Greg Rose92915f72010-01-09 02:24:10 +0000488 return;
489 }
490
491 /* It must be a TCP or UDP packet with a valid checksum */
492 skb->ip_summed = CHECKSUM_UNNECESSARY;
Greg Rose92915f72010-01-09 02:24:10 +0000493}
494
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000495/**
496 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
Emil Tantilovdff80522014-11-08 01:39:25 +0000497 * @rx_ring: rx descriptor ring packet is being transacted on
498 * @rx_desc: pointer to the EOP Rx descriptor
499 * @skb: pointer to current skb being populated
500 *
501 * This function checks the ring, descriptor, and packet information in
502 * order to populate the checksum, VLAN, protocol, and other fields within
503 * the skb.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000504 **/
Emil Tantilovdff80522014-11-08 01:39:25 +0000505static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
506 union ixgbe_adv_rx_desc *rx_desc,
507 struct sk_buff *skb)
508{
509 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
510
511 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
512 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
513 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
514
515 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
516 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
517 }
518
519 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
520}
521
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000522/**
523 * ixgbevf_is_non_eop - process handling of non-EOP buffers
524 * @rx_ring: Rx ring being processed
525 * @rx_desc: Rx descriptor for current buffer
526 * @skb: current socket buffer containing buffer in progress
527 *
528 * This function updates next to clean. If the buffer is an EOP buffer
529 * this function exits returning false, otherwise it will place the
530 * sk_buff in the next buffer to be chained and return true indicating
531 * that this is in fact a non-EOP buffer.
532 **/
533static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
Emil Tantilovbad17232014-11-21 02:57:15 +0000534 union ixgbe_adv_rx_desc *rx_desc)
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000535{
536 u32 ntc = rx_ring->next_to_clean + 1;
537
538 /* fetch, update, and store next to clean */
539 ntc = (ntc < rx_ring->count) ? ntc : 0;
540 rx_ring->next_to_clean = ntc;
541
542 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
543
544 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
545 return false;
546
547 return true;
548}
549
Emil Tantilovbad17232014-11-21 02:57:15 +0000550static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
551 struct ixgbevf_rx_buffer *bi)
Emil Tantilovbafa5782014-11-08 01:39:15 +0000552{
Emil Tantilovbad17232014-11-21 02:57:15 +0000553 struct page *page = bi->page;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000554 dma_addr_t dma = bi->dma;
555
Emil Tantilovbad17232014-11-21 02:57:15 +0000556 /* since we are recycling buffers we should seldom need to alloc */
557 if (likely(page))
Emil Tantilovbafa5782014-11-08 01:39:15 +0000558 return true;
559
Emil Tantilovbad17232014-11-21 02:57:15 +0000560 /* alloc new page for storage */
561 page = dev_alloc_page();
562 if (unlikely(!page)) {
563 rx_ring->rx_stats.alloc_rx_page_failed++;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000564 return false;
565 }
566
Emil Tantilovbad17232014-11-21 02:57:15 +0000567 /* map page for use */
568 dma = dma_map_page(rx_ring->dev, page, 0,
569 PAGE_SIZE, DMA_FROM_DEVICE);
Emil Tantilovbafa5782014-11-08 01:39:15 +0000570
571 /* if mapping failed free memory back to system since
572 * there isn't much point in holding memory we can't use
573 */
574 if (dma_mapping_error(rx_ring->dev, dma)) {
Emil Tantilovbad17232014-11-21 02:57:15 +0000575 __free_page(page);
Emil Tantilovbafa5782014-11-08 01:39:15 +0000576
577 rx_ring->rx_stats.alloc_rx_buff_failed++;
578 return false;
579 }
580
Emil Tantilovbafa5782014-11-08 01:39:15 +0000581 bi->dma = dma;
Emil Tantilovbad17232014-11-21 02:57:15 +0000582 bi->page = page;
583 bi->page_offset = 0;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000584
585 return true;
586}
587
Greg Rose92915f72010-01-09 02:24:10 +0000588/**
589 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
Emil Tantilov095e2612014-01-17 18:30:00 -0800590 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
Emil Tantilovbafa5782014-11-08 01:39:15 +0000591 * @cleaned_count: number of buffers to replace
Greg Rose92915f72010-01-09 02:24:10 +0000592 **/
Emil Tantilov095e2612014-01-17 18:30:00 -0800593static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
Emil Tantilovbafa5782014-11-08 01:39:15 +0000594 u16 cleaned_count)
Greg Rose92915f72010-01-09 02:24:10 +0000595{
Greg Rose92915f72010-01-09 02:24:10 +0000596 union ixgbe_adv_rx_desc *rx_desc;
597 struct ixgbevf_rx_buffer *bi;
Alexander Duyckfb401952012-05-11 08:33:16 +0000598 unsigned int i = rx_ring->next_to_use;
Greg Rose92915f72010-01-09 02:24:10 +0000599
Emil Tantilovbafa5782014-11-08 01:39:15 +0000600 /* nothing to do or no valid netdev defined */
601 if (!cleaned_count || !rx_ring->netdev)
602 return;
Greg Roseb9dd2452012-11-02 05:50:21 +0000603
Emil Tantilovbafa5782014-11-08 01:39:15 +0000604 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
605 bi = &rx_ring->rx_buffer_info[i];
606 i -= rx_ring->count;
Greg Roseb9dd2452012-11-02 05:50:21 +0000607
Emil Tantilovbafa5782014-11-08 01:39:15 +0000608 do {
Emil Tantilovbad17232014-11-21 02:57:15 +0000609 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
Emil Tantilovbafa5782014-11-08 01:39:15 +0000610 break;
Emil Tantilov05d063a2014-01-17 18:29:59 -0800611
Emil Tantilovbafa5782014-11-08 01:39:15 +0000612 /* Refresh the desc even if pkt_addr didn't change
613 * because each write-back erases this info.
614 */
Emil Tantilovbad17232014-11-21 02:57:15 +0000615 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Greg Rose92915f72010-01-09 02:24:10 +0000616
Emil Tantilovbafa5782014-11-08 01:39:15 +0000617 rx_desc++;
618 bi++;
Greg Rose92915f72010-01-09 02:24:10 +0000619 i++;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000620 if (unlikely(!i)) {
621 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
622 bi = rx_ring->rx_buffer_info;
623 i -= rx_ring->count;
624 }
Greg Rose92915f72010-01-09 02:24:10 +0000625
Emil Tantilovbafa5782014-11-08 01:39:15 +0000626 /* clear the hdr_addr for the next_to_use descriptor */
627 rx_desc->read.hdr_addr = 0;
628
629 cleaned_count--;
630 } while (cleaned_count);
631
632 i += rx_ring->count;
633
634 if (rx_ring->next_to_use != i) {
635 /* record the next descriptor to use */
636 rx_ring->next_to_use = i;
637
Emil Tantilovbad17232014-11-21 02:57:15 +0000638 /* update next to alloc since we have filled the ring */
639 rx_ring->next_to_alloc = i;
640
Emil Tantilovbafa5782014-11-08 01:39:15 +0000641 /* Force memory writes to complete before letting h/w
642 * know there are new descriptors to fetch. (Only
643 * applicable for weak-ordered memory model archs,
644 * such as IA-64).
645 */
646 wmb();
647 ixgbevf_write_tail(rx_ring, i);
648 }
Greg Rose92915f72010-01-09 02:24:10 +0000649}
650
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000651/**
652 * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
Emil Tantilovbad17232014-11-21 02:57:15 +0000653 * @rx_ring: rx descriptor ring packet is being transacted on
654 * @skb: pointer to current skb being adjusted
655 *
656 * This function is an ixgbevf specific version of __pskb_pull_tail. The
657 * main difference between this version and the original function is that
658 * this function can make several assumptions about the state of things
659 * that allow for significant optimizations versus the standard function.
660 * As a result we can do things like drop a frag and maintain an accurate
661 * truesize for the skb.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000662 **/
Emil Tantilovbad17232014-11-21 02:57:15 +0000663static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
664 struct sk_buff *skb)
665{
666 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
667 unsigned char *va;
668 unsigned int pull_len;
669
670 /* it is valid to use page_address instead of kmap since we are
671 * working with pages allocated out of the lomem pool per
672 * alloc_page(GFP_ATOMIC)
673 */
674 va = skb_frag_address(frag);
675
676 /* we need the header to contain the greater of either ETH_HLEN or
677 * 60 bytes if the skb->len is less than 60 for skb_pad.
678 */
679 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
680
681 /* align pull length to size of long to optimize memcpy performance */
682 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
683
684 /* update all of the pointers */
685 skb_frag_size_sub(frag, pull_len);
686 frag->page_offset += pull_len;
687 skb->data_len -= pull_len;
688 skb->tail += pull_len;
689}
690
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000691/**
692 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
Emil Tantilovbad17232014-11-21 02:57:15 +0000693 * @rx_ring: rx descriptor ring packet is being transacted on
694 * @rx_desc: pointer to the EOP Rx descriptor
695 * @skb: pointer to current skb being fixed
696 *
697 * Check for corrupted packet headers caused by senders on the local L2
698 * embedded NIC switch not setting up their Tx Descriptors right. These
699 * should be very rare.
700 *
701 * Also address the case where we are pulling data in on pages only
702 * and as such no data is present in the skb header.
703 *
704 * In addition if skb is not at least 60 bytes we need to pad it so that
705 * it is large enough to qualify as a valid Ethernet frame.
706 *
707 * Returns true if an error was encountered and skb was freed.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000708 **/
Emil Tantilovbad17232014-11-21 02:57:15 +0000709static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
710 union ixgbe_adv_rx_desc *rx_desc,
711 struct sk_buff *skb)
712{
713 /* verify that the packet does not have any known errors */
714 if (unlikely(ixgbevf_test_staterr(rx_desc,
715 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
716 struct net_device *netdev = rx_ring->netdev;
717
718 if (!(netdev->features & NETIF_F_RXALL)) {
719 dev_kfree_skb_any(skb);
720 return true;
721 }
722 }
723
724 /* place header in linear portion of buffer */
725 if (skb_is_nonlinear(skb))
726 ixgbevf_pull_tail(rx_ring, skb);
727
Alexander Duycka94d9e22014-12-03 08:17:39 -0800728 /* if eth_skb_pad returns an error the skb was freed */
729 if (eth_skb_pad(skb))
730 return true;
Emil Tantilovbad17232014-11-21 02:57:15 +0000731
732 return false;
733}
734
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000735/**
736 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
Emil Tantilovbad17232014-11-21 02:57:15 +0000737 * @rx_ring: rx descriptor ring to store buffers on
738 * @old_buff: donor buffer to have page reused
739 *
740 * Synchronizes page for reuse by the adapter
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000741 **/
Emil Tantilovbad17232014-11-21 02:57:15 +0000742static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
743 struct ixgbevf_rx_buffer *old_buff)
744{
745 struct ixgbevf_rx_buffer *new_buff;
746 u16 nta = rx_ring->next_to_alloc;
747
748 new_buff = &rx_ring->rx_buffer_info[nta];
749
750 /* update, and store next to alloc */
751 nta++;
752 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
753
754 /* transfer page from old buffer to new buffer */
755 new_buff->page = old_buff->page;
756 new_buff->dma = old_buff->dma;
757 new_buff->page_offset = old_buff->page_offset;
758
759 /* sync the buffer for use by the device */
760 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
761 new_buff->page_offset,
762 IXGBEVF_RX_BUFSZ,
763 DMA_FROM_DEVICE);
764}
765
766static inline bool ixgbevf_page_is_reserved(struct page *page)
767{
Michal Hocko2f064f32015-08-21 14:11:51 -0700768 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
Emil Tantilovbad17232014-11-21 02:57:15 +0000769}
770
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000771/**
772 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
Emil Tantilovbad17232014-11-21 02:57:15 +0000773 * @rx_ring: rx descriptor ring to transact packets on
774 * @rx_buffer: buffer containing page to add
775 * @rx_desc: descriptor containing length of buffer written by hardware
776 * @skb: sk_buff to place the data into
777 *
778 * This function will add the data contained in rx_buffer->page to the skb.
779 * This is done either through a direct copy if the data in the buffer is
780 * less than the skb header size, otherwise it will just attach the page as
781 * a frag to the skb.
782 *
783 * The function will then update the page offset if necessary and return
784 * true if the buffer can be reused by the adapter.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000785 **/
Emil Tantilovbad17232014-11-21 02:57:15 +0000786static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
787 struct ixgbevf_rx_buffer *rx_buffer,
788 union ixgbe_adv_rx_desc *rx_desc,
789 struct sk_buff *skb)
790{
791 struct page *page = rx_buffer->page;
792 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
793#if (PAGE_SIZE < 8192)
794 unsigned int truesize = IXGBEVF_RX_BUFSZ;
795#else
796 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
797#endif
798
799 if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
800 unsigned char *va = page_address(page) + rx_buffer->page_offset;
801
802 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
803
804 /* page is not reserved, we can reuse buffer as is */
805 if (likely(!ixgbevf_page_is_reserved(page)))
806 return true;
807
808 /* this page cannot be reused so discard it */
809 put_page(page);
810 return false;
811 }
812
813 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
814 rx_buffer->page_offset, size, truesize);
815
816 /* avoid re-using remote pages */
817 if (unlikely(ixgbevf_page_is_reserved(page)))
818 return false;
819
820#if (PAGE_SIZE < 8192)
821 /* if we are only owner of page we can reuse it */
822 if (unlikely(page_count(page) != 1))
823 return false;
824
825 /* flip page offset to other buffer */
826 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
827
828#else
829 /* move offset up to the next cache line */
830 rx_buffer->page_offset += truesize;
831
832 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
833 return false;
834
835#endif
836 /* Even if we own the page, we are not allowed to use atomic_set()
837 * This would break get_page_unless_zero() users.
838 */
839 atomic_inc(&page->_count);
840
841 return true;
842}
843
844static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
845 union ixgbe_adv_rx_desc *rx_desc,
846 struct sk_buff *skb)
847{
848 struct ixgbevf_rx_buffer *rx_buffer;
849 struct page *page;
850
851 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
852 page = rx_buffer->page;
853 prefetchw(page);
854
855 if (likely(!skb)) {
856 void *page_addr = page_address(page) +
857 rx_buffer->page_offset;
858
859 /* prefetch first cache line of first page */
860 prefetch(page_addr);
861#if L1_CACHE_BYTES < 128
862 prefetch(page_addr + L1_CACHE_BYTES);
863#endif
864
865 /* allocate a skb to store the frags */
866 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
867 IXGBEVF_RX_HDR_SIZE);
868 if (unlikely(!skb)) {
869 rx_ring->rx_stats.alloc_rx_buff_failed++;
870 return NULL;
871 }
872
873 /* we will be copying header into skb->data in
874 * pskb_may_pull so it is in our interest to prefetch
875 * it now to avoid a possible cache miss
876 */
877 prefetchw(skb->data);
878 }
879
880 /* we are reusing so sync this buffer for CPU use */
881 dma_sync_single_range_for_cpu(rx_ring->dev,
882 rx_buffer->dma,
883 rx_buffer->page_offset,
884 IXGBEVF_RX_BUFSZ,
885 DMA_FROM_DEVICE);
886
887 /* pull page into skb */
888 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
889 /* hand second half of page back to the ring */
890 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
891 } else {
892 /* we are not reusing the buffer so unmap it */
893 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
894 PAGE_SIZE, DMA_FROM_DEVICE);
895 }
896
897 /* clear contents of buffer_info */
898 rx_buffer->dma = 0;
899 rx_buffer->page = NULL;
900
901 return skb;
902}
903
Greg Rose92915f72010-01-09 02:24:10 +0000904static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000905 u32 qmask)
Greg Rose92915f72010-01-09 02:24:10 +0000906{
Greg Rose92915f72010-01-09 02:24:10 +0000907 struct ixgbe_hw *hw = &adapter->hw;
908
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000909 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
Greg Rose92915f72010-01-09 02:24:10 +0000910}
911
Jacob Keller08e50a22013-09-21 06:24:14 +0000912static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
913 struct ixgbevf_ring *rx_ring,
914 int budget)
Greg Rose92915f72010-01-09 02:24:10 +0000915{
Greg Rose92915f72010-01-09 02:24:10 +0000916 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000917 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
Emil Tantilovbad17232014-11-21 02:57:15 +0000918 struct sk_buff *skb = rx_ring->skb;
Greg Rose92915f72010-01-09 02:24:10 +0000919
Emil Tantilov66224022014-11-08 01:39:51 +0000920 while (likely(total_rx_packets < budget)) {
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000921 union ixgbe_adv_rx_desc *rx_desc;
Emil Tantilovb97fe3b2014-11-08 01:39:30 +0000922
Emil Tantilov0579eef2014-11-08 01:39:35 +0000923 /* return some buffers to hardware, one at a time is too slow */
924 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
925 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
926 cleaned_count = 0;
927 }
928
Emil Tantilovbad17232014-11-21 02:57:15 +0000929 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
Emil Tantilov0579eef2014-11-08 01:39:35 +0000930
931 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
Greg Rose92915f72010-01-09 02:24:10 +0000932 break;
Greg Rose92915f72010-01-09 02:24:10 +0000933
Emil Tantilov0579eef2014-11-08 01:39:35 +0000934 /* This memory barrier is needed to keep us from reading
935 * any other fields out of the rx_desc until we know the
936 * RXD_STAT_DD bit is set
937 */
938 rmb();
Emil Tantilovec62fe22014-11-08 01:39:20 +0000939
Emil Tantilovbad17232014-11-21 02:57:15 +0000940 /* retrieve a buffer from the ring */
941 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000942
Emil Tantilovbad17232014-11-21 02:57:15 +0000943 /* exit if we failed to retrieve a buffer */
944 if (!skb)
945 break;
Greg Rose92915f72010-01-09 02:24:10 +0000946
Emil Tantilovb97fe3b2014-11-08 01:39:30 +0000947 cleaned_count++;
948
Emil Tantilovbad17232014-11-21 02:57:15 +0000949 /* fetch next buffer in frame if non-eop */
950 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
Emil Tantilov0579eef2014-11-08 01:39:35 +0000951 continue;
Greg Rose92915f72010-01-09 02:24:10 +0000952
Emil Tantilovbad17232014-11-21 02:57:15 +0000953 /* verify the packet layout is correct */
954 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
955 skb = NULL;
Emil Tantilov0579eef2014-11-08 01:39:35 +0000956 continue;
Greg Rose92915f72010-01-09 02:24:10 +0000957 }
958
Greg Rose92915f72010-01-09 02:24:10 +0000959 /* probably a little skewed due to removing CRC */
960 total_rx_bytes += skb->len;
Greg Rose92915f72010-01-09 02:24:10 +0000961
John Fastabend815cccb2012-10-24 08:13:09 +0000962 /* Workaround hardware that can't do proper VEPA multicast
963 * source pruning.
964 */
Florian Fainellibd9d5592014-02-28 15:46:49 -0800965 if ((skb->pkt_type == PACKET_BROADCAST ||
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000966 skb->pkt_type == PACKET_MULTICAST) &&
Emil Tantilov095e2612014-01-17 18:30:00 -0800967 ether_addr_equal(rx_ring->netdev->dev_addr,
Joe Perches7367d0b2013-09-01 11:51:23 -0700968 eth_hdr(skb)->h_source)) {
John Fastabend815cccb2012-10-24 08:13:09 +0000969 dev_kfree_skb_irq(skb);
Emil Tantilov0579eef2014-11-08 01:39:35 +0000970 continue;
John Fastabend815cccb2012-10-24 08:13:09 +0000971 }
972
Emil Tantilovdff80522014-11-08 01:39:25 +0000973 /* populate checksum, VLAN, and protocol */
974 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
975
976 ixgbevf_rx_skb(q_vector, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000977
Emil Tantilovbad17232014-11-21 02:57:15 +0000978 /* reset skb pointer */
979 skb = NULL;
980
Emil Tantilov0579eef2014-11-08 01:39:35 +0000981 /* update budget accounting */
Emil Tantilov66224022014-11-08 01:39:51 +0000982 total_rx_packets++;
983 }
Greg Rose92915f72010-01-09 02:24:10 +0000984
Emil Tantilovbad17232014-11-21 02:57:15 +0000985 /* place incomplete frames back on ring for completion */
986 rx_ring->skb = skb;
987
Eric Dumazet4197aa72011-06-22 05:01:35 +0000988 u64_stats_update_begin(&rx_ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -0800989 rx_ring->stats.packets += total_rx_packets;
990 rx_ring->stats.bytes += total_rx_bytes;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000991 u64_stats_update_end(&rx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +0000992 q_vector->rx.total_packets += total_rx_packets;
993 q_vector->rx.total_bytes += total_rx_bytes;
Greg Rose92915f72010-01-09 02:24:10 +0000994
Jacob Keller08e50a22013-09-21 06:24:14 +0000995 return total_rx_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000996}
997
998/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000999 * ixgbevf_poll - NAPI polling calback
Greg Rose92915f72010-01-09 02:24:10 +00001000 * @napi: napi struct with our devices info in it
1001 * @budget: amount of work driver is allowed to do this pass, in packets
1002 *
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001003 * This function will clean more than one or more rings associated with a
Greg Rose92915f72010-01-09 02:24:10 +00001004 * q_vector.
1005 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001006static int ixgbevf_poll(struct napi_struct *napi, int budget)
Greg Rose92915f72010-01-09 02:24:10 +00001007{
1008 struct ixgbevf_q_vector *q_vector =
1009 container_of(napi, struct ixgbevf_q_vector, napi);
1010 struct ixgbevf_adapter *adapter = q_vector->adapter;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001011 struct ixgbevf_ring *ring;
1012 int per_ring_budget;
1013 bool clean_complete = true;
1014
1015 ixgbevf_for_each_ring(ring, q_vector->tx)
1016 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
Greg Rose92915f72010-01-09 02:24:10 +00001017
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001018#ifdef CONFIG_NET_RX_BUSY_POLL
1019 if (!ixgbevf_qv_lock_napi(q_vector))
1020 return budget;
1021#endif
1022
Greg Rose92915f72010-01-09 02:24:10 +00001023 /* attempt to distribute budget to each queue fairly, but don't allow
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001024 * the budget to go below 1 because we'll exit polling
1025 */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001026 if (q_vector->rx.count > 1)
1027 per_ring_budget = max(budget/q_vector->rx.count, 1);
1028 else
1029 per_ring_budget = budget;
Greg Rose92915f72010-01-09 02:24:10 +00001030
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001031 ixgbevf_for_each_ring(ring, q_vector->rx)
Jacob Keller08e50a22013-09-21 06:24:14 +00001032 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
1033 per_ring_budget)
1034 < per_ring_budget);
Greg Rose92915f72010-01-09 02:24:10 +00001035
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001036#ifdef CONFIG_NET_RX_BUSY_POLL
1037 ixgbevf_qv_unlock_napi(q_vector);
1038#endif
1039
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001040 /* If all work not completed, return budget and keep polling */
1041 if (!clean_complete)
1042 return budget;
1043 /* all work done, exit the polling mode */
1044 napi_complete(napi);
1045 if (adapter->rx_itr_setting & 1)
1046 ixgbevf_set_itr(q_vector);
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00001047 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1048 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001049 ixgbevf_irq_enable_queues(adapter,
1050 1 << q_vector->v_idx);
Greg Rose92915f72010-01-09 02:24:10 +00001051
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001052 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00001053}
1054
Greg Rosece422602012-05-22 02:17:49 +00001055/**
1056 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1057 * @q_vector: structure containing interrupt and ring information
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001058 **/
Jacob Keller38496232013-10-22 06:19:18 +00001059void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
Greg Rosece422602012-05-22 02:17:49 +00001060{
1061 struct ixgbevf_adapter *adapter = q_vector->adapter;
1062 struct ixgbe_hw *hw = &adapter->hw;
1063 int v_idx = q_vector->v_idx;
1064 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1065
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001066 /* set the WDIS bit to not clear the timer bits and cause an
Greg Rosece422602012-05-22 02:17:49 +00001067 * immediate assertion of the interrupt
1068 */
1069 itr_reg |= IXGBE_EITR_CNT_WDIS;
1070
1071 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1072}
Greg Rose92915f72010-01-09 02:24:10 +00001073
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001074#ifdef CONFIG_NET_RX_BUSY_POLL
1075/* must be called with local_bh_disable()d */
1076static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
1077{
1078 struct ixgbevf_q_vector *q_vector =
1079 container_of(napi, struct ixgbevf_q_vector, napi);
1080 struct ixgbevf_adapter *adapter = q_vector->adapter;
1081 struct ixgbevf_ring *ring;
1082 int found = 0;
1083
1084 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
1085 return LL_FLUSH_FAILED;
1086
1087 if (!ixgbevf_qv_lock_poll(q_vector))
1088 return LL_FLUSH_BUSY;
1089
1090 ixgbevf_for_each_ring(ring, q_vector->rx) {
1091 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
Jacob Keller3b5dca22013-09-21 06:24:25 +00001092#ifdef BP_EXTENDED_STATS
1093 if (found)
Emil Tantilov095e2612014-01-17 18:30:00 -08001094 ring->stats.cleaned += found;
Jacob Keller3b5dca22013-09-21 06:24:25 +00001095 else
Emil Tantilov095e2612014-01-17 18:30:00 -08001096 ring->stats.misses++;
Jacob Keller3b5dca22013-09-21 06:24:25 +00001097#endif
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001098 if (found)
1099 break;
1100 }
1101
1102 ixgbevf_qv_unlock_poll(q_vector);
1103
1104 return found;
1105}
1106#endif /* CONFIG_NET_RX_BUSY_POLL */
1107
Greg Rose92915f72010-01-09 02:24:10 +00001108/**
1109 * ixgbevf_configure_msix - Configure MSI-X hardware
1110 * @adapter: board private structure
1111 *
1112 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1113 * interrupts.
1114 **/
1115static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1116{
1117 struct ixgbevf_q_vector *q_vector;
Alexander Duyck6b43c442012-05-11 08:32:45 +00001118 int q_vectors, v_idx;
Greg Rose92915f72010-01-09 02:24:10 +00001119
1120 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001121 adapter->eims_enable_mask = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001122
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001123 /* Populate the IVAR table and set the ITR values to the
Greg Rose92915f72010-01-09 02:24:10 +00001124 * corresponding register.
1125 */
1126 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck6b43c442012-05-11 08:32:45 +00001127 struct ixgbevf_ring *ring;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001128
Greg Rose92915f72010-01-09 02:24:10 +00001129 q_vector = adapter->q_vector[v_idx];
Greg Rose92915f72010-01-09 02:24:10 +00001130
Alexander Duyck6b43c442012-05-11 08:32:45 +00001131 ixgbevf_for_each_ring(ring, q_vector->rx)
1132 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +00001133
Alexander Duyck6b43c442012-05-11 08:32:45 +00001134 ixgbevf_for_each_ring(ring, q_vector->tx)
1135 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +00001136
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001137 if (q_vector->tx.ring && !q_vector->rx.ring) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001138 /* Tx only vector */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001139 if (adapter->tx_itr_setting == 1)
1140 q_vector->itr = IXGBE_10K_ITR;
1141 else
1142 q_vector->itr = adapter->tx_itr_setting;
1143 } else {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001144 /* Rx or Rx/Tx vector */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001145 if (adapter->rx_itr_setting == 1)
1146 q_vector->itr = IXGBE_20K_ITR;
1147 else
1148 q_vector->itr = adapter->rx_itr_setting;
1149 }
Greg Rose92915f72010-01-09 02:24:10 +00001150
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001151 /* add q_vector eims value to global eims_enable_mask */
1152 adapter->eims_enable_mask |= 1 << v_idx;
1153
1154 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001155 }
1156
1157 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001158 /* setup eims_other and add value to global eims_enable_mask */
1159 adapter->eims_other = 1 << v_idx;
1160 adapter->eims_enable_mask |= adapter->eims_other;
Greg Rose92915f72010-01-09 02:24:10 +00001161}
1162
1163enum latency_range {
1164 lowest_latency = 0,
1165 low_latency = 1,
1166 bulk_latency = 2,
1167 latency_invalid = 255
1168};
1169
1170/**
1171 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001172 * @q_vector: structure containing interrupt and ring information
1173 * @ring_container: structure containing ring performance data
Greg Rose92915f72010-01-09 02:24:10 +00001174 *
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001175 * Stores a new ITR value based on packets and byte
1176 * counts during the last interrupt. The advantage of per interrupt
1177 * computation is faster updates and more accurate ITR for the current
1178 * traffic pattern. Constants in this function were computed
1179 * based on theoretical maximum wire speed and thresholds were set based
1180 * on testing data as well as attempting to minimize response time
1181 * while increasing bulk throughput.
Greg Rose92915f72010-01-09 02:24:10 +00001182 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001183static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1184 struct ixgbevf_ring_container *ring_container)
Greg Rose92915f72010-01-09 02:24:10 +00001185{
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001186 int bytes = ring_container->total_bytes;
1187 int packets = ring_container->total_packets;
Greg Rose92915f72010-01-09 02:24:10 +00001188 u32 timepassed_us;
1189 u64 bytes_perint;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001190 u8 itr_setting = ring_container->itr;
Greg Rose92915f72010-01-09 02:24:10 +00001191
1192 if (packets == 0)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001193 return;
Greg Rose92915f72010-01-09 02:24:10 +00001194
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001195 /* simple throttle rate management
Greg Rose92915f72010-01-09 02:24:10 +00001196 * 0-20MB/s lowest (100000 ints/s)
1197 * 20-100MB/s low (20000 ints/s)
1198 * 100-1249MB/s bulk (8000 ints/s)
1199 */
1200 /* what was last interrupt timeslice? */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001201 timepassed_us = q_vector->itr >> 2;
Greg Rose92915f72010-01-09 02:24:10 +00001202 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1203
1204 switch (itr_setting) {
1205 case lowest_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001206 if (bytes_perint > 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001207 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001208 break;
1209 case low_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001210 if (bytes_perint > 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001211 itr_setting = bulk_latency;
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001212 else if (bytes_perint <= 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001213 itr_setting = lowest_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001214 break;
1215 case bulk_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001216 if (bytes_perint <= 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001217 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001218 break;
1219 }
1220
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001221 /* clear work counters since we have the values we need */
1222 ring_container->total_bytes = 0;
1223 ring_container->total_packets = 0;
1224
1225 /* write updated itr to ring container */
1226 ring_container->itr = itr_setting;
Greg Rose92915f72010-01-09 02:24:10 +00001227}
1228
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001229static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
Greg Rose92915f72010-01-09 02:24:10 +00001230{
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001231 u32 new_itr = q_vector->itr;
1232 u8 current_itr;
Greg Rose92915f72010-01-09 02:24:10 +00001233
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001234 ixgbevf_update_itr(q_vector, &q_vector->tx);
1235 ixgbevf_update_itr(q_vector, &q_vector->rx);
Greg Rose92915f72010-01-09 02:24:10 +00001236
Alexander Duyck6b43c442012-05-11 08:32:45 +00001237 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Greg Rose92915f72010-01-09 02:24:10 +00001238
1239 switch (current_itr) {
1240 /* counts and packets in update_itr are dependent on these numbers */
1241 case lowest_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001242 new_itr = IXGBE_100K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001243 break;
1244 case low_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001245 new_itr = IXGBE_20K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001246 break;
1247 case bulk_latency:
1248 default:
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001249 new_itr = IXGBE_8K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001250 break;
1251 }
1252
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001253 if (new_itr != q_vector->itr) {
Greg Rose92915f72010-01-09 02:24:10 +00001254 /* do an exponential smoothing */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001255 new_itr = (10 * new_itr * q_vector->itr) /
1256 ((9 * new_itr) + q_vector->itr);
1257
1258 /* save the algorithm value here */
1259 q_vector->itr = new_itr;
1260
1261 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001262 }
Greg Rose92915f72010-01-09 02:24:10 +00001263}
1264
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001265static irqreturn_t ixgbevf_msix_other(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +00001266{
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001267 struct ixgbevf_adapter *adapter = data;
Greg Rose92915f72010-01-09 02:24:10 +00001268 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001269
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001270 hw->mac.get_link_status = 1;
Greg Rose375b27c2012-01-18 22:13:31 +00001271
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00001272 ixgbevf_service_event_schedule(adapter);
Greg Rose3a2c4032012-02-01 01:28:15 +00001273
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001274 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1275
Greg Rose92915f72010-01-09 02:24:10 +00001276 return IRQ_HANDLED;
1277}
1278
Greg Rose92915f72010-01-09 02:24:10 +00001279/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001280 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
Greg Rose92915f72010-01-09 02:24:10 +00001281 * @irq: unused
1282 * @data: pointer to our q_vector struct for this interrupt vector
1283 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001284static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +00001285{
1286 struct ixgbevf_q_vector *q_vector = data;
Greg Rose92915f72010-01-09 02:24:10 +00001287
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001288 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001289 if (q_vector->rx.ring || q_vector->tx.ring)
1290 napi_schedule(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001291
1292 return IRQ_HANDLED;
1293}
1294
1295static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1296 int r_idx)
1297{
1298 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1299
Don Skidmore87e70ab2014-01-16 02:30:08 -08001300 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1301 q_vector->rx.ring = a->rx_ring[r_idx];
Alexander Duyck6b43c442012-05-11 08:32:45 +00001302 q_vector->rx.count++;
Greg Rose92915f72010-01-09 02:24:10 +00001303}
1304
1305static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1306 int t_idx)
1307{
1308 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1309
Don Skidmore87e70ab2014-01-16 02:30:08 -08001310 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1311 q_vector->tx.ring = a->tx_ring[t_idx];
Alexander Duyck6b43c442012-05-11 08:32:45 +00001312 q_vector->tx.count++;
Greg Rose92915f72010-01-09 02:24:10 +00001313}
1314
1315/**
1316 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1317 * @adapter: board private structure to initialize
1318 *
1319 * This function maps descriptor rings to the queue-specific vectors
1320 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1321 * one vector per ring/queue, but on a constrained vector budget, we
1322 * group the rings as "efficiently" as possible. You would add new
1323 * mapping configurations in here.
1324 **/
1325static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1326{
1327 int q_vectors;
1328 int v_start = 0;
1329 int rxr_idx = 0, txr_idx = 0;
1330 int rxr_remaining = adapter->num_rx_queues;
1331 int txr_remaining = adapter->num_tx_queues;
1332 int i, j;
1333 int rqpv, tqpv;
1334 int err = 0;
1335
1336 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1337
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001338 /* The ideal configuration...
Greg Rose92915f72010-01-09 02:24:10 +00001339 * We have enough vectors to map one per queue.
1340 */
1341 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1342 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1343 map_vector_to_rxq(adapter, v_start, rxr_idx);
1344
1345 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1346 map_vector_to_txq(adapter, v_start, txr_idx);
1347 goto out;
1348 }
1349
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001350 /* If we don't have enough vectors for a 1-to-1
Greg Rose92915f72010-01-09 02:24:10 +00001351 * mapping, we'll have to group them so there are
1352 * multiple queues per vector.
1353 */
1354 /* Re-adjusting *qpv takes care of the remainder. */
1355 for (i = v_start; i < q_vectors; i++) {
1356 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1357 for (j = 0; j < rqpv; j++) {
1358 map_vector_to_rxq(adapter, i, rxr_idx);
1359 rxr_idx++;
1360 rxr_remaining--;
1361 }
1362 }
1363 for (i = v_start; i < q_vectors; i++) {
1364 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1365 for (j = 0; j < tqpv; j++) {
1366 map_vector_to_txq(adapter, i, txr_idx);
1367 txr_idx++;
1368 txr_remaining--;
1369 }
1370 }
1371
1372out:
1373 return err;
1374}
1375
1376/**
1377 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1378 * @adapter: board private structure
1379 *
1380 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1381 * interrupts from the kernel.
1382 **/
1383static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1384{
1385 struct net_device *netdev = adapter->netdev;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001386 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1387 int vector, err;
Greg Rose92915f72010-01-09 02:24:10 +00001388 int ri = 0, ti = 0;
1389
Greg Rose92915f72010-01-09 02:24:10 +00001390 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001391 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1392 struct msix_entry *entry = &adapter->msix_entries[vector];
Greg Rose92915f72010-01-09 02:24:10 +00001393
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001394 if (q_vector->tx.ring && q_vector->rx.ring) {
1395 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1396 "%s-%s-%d", netdev->name, "TxRx", ri++);
1397 ti++;
1398 } else if (q_vector->rx.ring) {
1399 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1400 "%s-%s-%d", netdev->name, "rx", ri++);
1401 } else if (q_vector->tx.ring) {
1402 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1403 "%s-%s-%d", netdev->name, "tx", ti++);
Greg Rose92915f72010-01-09 02:24:10 +00001404 } else {
1405 /* skip this unused q_vector */
1406 continue;
1407 }
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001408 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1409 q_vector->name, q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001410 if (err) {
1411 hw_dbg(&adapter->hw,
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001412 "request_irq failed for MSIX interrupt Error: %d\n",
1413 err);
Greg Rose92915f72010-01-09 02:24:10 +00001414 goto free_queue_irqs;
1415 }
1416 }
1417
Greg Rose92915f72010-01-09 02:24:10 +00001418 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001419 &ixgbevf_msix_other, 0, netdev->name, adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001420 if (err) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001421 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1422 err);
Greg Rose92915f72010-01-09 02:24:10 +00001423 goto free_queue_irqs;
1424 }
1425
1426 return 0;
1427
1428free_queue_irqs:
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001429 while (vector) {
1430 vector--;
1431 free_irq(adapter->msix_entries[vector].vector,
1432 adapter->q_vector[vector]);
1433 }
xunleera1f6c6b2013-03-05 07:44:20 +00001434 /* This failure is non-recoverable - it indicates the system is
1435 * out of MSIX vector resources and the VF driver cannot run
1436 * without them. Set the number of msix vectors to zero
1437 * indicating that not enough can be allocated. The error
1438 * will be returned to the user indicating device open failed.
1439 * Any further attempts to force the driver to open will also
1440 * fail. The only way to recover is to unload the driver and
1441 * reload it again. If the system has recovered some MSIX
1442 * vectors then it may succeed.
1443 */
1444 adapter->num_msix_vectors = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001445 return err;
1446}
1447
1448static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1449{
1450 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1451
1452 for (i = 0; i < q_vectors; i++) {
1453 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001454
Alexander Duyck6b43c442012-05-11 08:32:45 +00001455 q_vector->rx.ring = NULL;
1456 q_vector->tx.ring = NULL;
1457 q_vector->rx.count = 0;
1458 q_vector->tx.count = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001459 }
1460}
1461
1462/**
1463 * ixgbevf_request_irq - initialize interrupts
1464 * @adapter: board private structure
1465 *
1466 * Attempts to configure interrupts using the best available
1467 * capabilities of the hardware and kernel.
1468 **/
1469static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1470{
1471 int err = 0;
1472
1473 err = ixgbevf_request_msix_irqs(adapter);
1474
1475 if (err)
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001476 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +00001477
1478 return err;
1479}
1480
1481static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1482{
Greg Rose92915f72010-01-09 02:24:10 +00001483 int i, q_vectors;
1484
1485 q_vectors = adapter->num_msix_vectors;
Greg Rose92915f72010-01-09 02:24:10 +00001486 i = q_vectors - 1;
1487
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001488 free_irq(adapter->msix_entries[i].vector, adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001489 i--;
1490
1491 for (; i >= 0; i--) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001492 /* free only the irqs that were actually requested */
1493 if (!adapter->q_vector[i]->rx.ring &&
1494 !adapter->q_vector[i]->tx.ring)
1495 continue;
1496
Greg Rose92915f72010-01-09 02:24:10 +00001497 free_irq(adapter->msix_entries[i].vector,
1498 adapter->q_vector[i]);
1499 }
1500
1501 ixgbevf_reset_q_vectors(adapter);
1502}
1503
1504/**
1505 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1506 * @adapter: board private structure
1507 **/
1508static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1509{
Greg Rose92915f72010-01-09 02:24:10 +00001510 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001511 int i;
Greg Rose92915f72010-01-09 02:24:10 +00001512
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001513 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001514 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001515 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001516
1517 IXGBE_WRITE_FLUSH(hw);
1518
1519 for (i = 0; i < adapter->num_msix_vectors; i++)
1520 synchronize_irq(adapter->msix_entries[i].vector);
1521}
1522
1523/**
1524 * ixgbevf_irq_enable - Enable default interrupt generation settings
1525 * @adapter: board private structure
1526 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001527static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001528{
1529 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001530
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001531 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1532 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1533 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
Greg Rose92915f72010-01-09 02:24:10 +00001534}
1535
1536/**
Don Skidmorede02dec2014-01-16 02:30:09 -08001537 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1538 * @adapter: board private structure
1539 * @ring: structure containing ring specific data
1540 *
1541 * Configure the Tx descriptor ring after a reset.
1542 **/
1543static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1544 struct ixgbevf_ring *ring)
1545{
1546 struct ixgbe_hw *hw = &adapter->hw;
1547 u64 tdba = ring->dma;
1548 int wait_loop = 10;
1549 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1550 u8 reg_idx = ring->reg_idx;
1551
1552 /* disable queue to avoid issues while updating state */
1553 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1554 IXGBE_WRITE_FLUSH(hw);
1555
1556 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1557 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1558 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1559 ring->count * sizeof(union ixgbe_adv_tx_desc));
1560
1561 /* disable head writeback */
1562 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1563 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1564
1565 /* enable relaxed ordering */
1566 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1567 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1568 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1569
1570 /* reset head and tail pointers */
1571 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1572 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00001573 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
Don Skidmorede02dec2014-01-16 02:30:09 -08001574
1575 /* reset ntu and ntc to place SW in sync with hardwdare */
1576 ring->next_to_clean = 0;
1577 ring->next_to_use = 0;
1578
1579 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1580 * to or less than the number of on chip descriptors, which is
1581 * currently 40.
1582 */
1583 txdctl |= (8 << 16); /* WTHRESH = 8 */
1584
1585 /* Setting PTHRESH to 32 both improves performance */
1586 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1587 32; /* PTHRESH = 32 */
1588
Emil Tantilove08400b2015-01-28 03:21:24 +00001589 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1590
Don Skidmorede02dec2014-01-16 02:30:09 -08001591 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1592
1593 /* poll to verify queue is enabled */
1594 do {
1595 usleep_range(1000, 2000);
1596 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1597 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1598 if (!wait_loop)
1599 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1600}
1601
1602/**
Greg Rose92915f72010-01-09 02:24:10 +00001603 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1604 * @adapter: board private structure
1605 *
1606 * Configure the Tx unit of the MAC after a reset.
1607 **/
1608static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1609{
Don Skidmorede02dec2014-01-16 02:30:09 -08001610 u32 i;
Greg Rose92915f72010-01-09 02:24:10 +00001611
1612 /* Setup the HW Tx Head and Tail descriptor pointers */
Don Skidmorede02dec2014-01-16 02:30:09 -08001613 for (i = 0; i < adapter->num_tx_queues; i++)
1614 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00001615}
1616
1617#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1618
1619static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1620{
Greg Rose92915f72010-01-09 02:24:10 +00001621 struct ixgbe_hw *hw = &adapter->hw;
1622 u32 srrctl;
1623
Greg Rose92915f72010-01-09 02:24:10 +00001624 srrctl = IXGBE_SRRCTL_DROP_EN;
1625
Emil Tantilovbad17232014-11-21 02:57:15 +00001626 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1627 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck77d5dfc2012-05-11 08:32:19 +00001628 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Greg Rose92915f72010-01-09 02:24:10 +00001629
Greg Rose92915f72010-01-09 02:24:10 +00001630 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1631}
1632
Don Skidmore1bb9c632013-09-21 01:57:33 +00001633static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1634{
1635 struct ixgbe_hw *hw = &adapter->hw;
1636
1637 /* PSRTYPE must be initialized in 82599 */
1638 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1639 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1640 IXGBE_PSRTYPE_L2HDR;
1641
1642 if (adapter->num_rx_queues > 1)
1643 psrtype |= 1 << 29;
1644
1645 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1646}
1647
Don Skidmorede02dec2014-01-16 02:30:09 -08001648#define IXGBEVF_MAX_RX_DESC_POLL 10
1649static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1650 struct ixgbevf_ring *ring)
1651{
1652 struct ixgbe_hw *hw = &adapter->hw;
1653 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1654 u32 rxdctl;
1655 u8 reg_idx = ring->reg_idx;
1656
Mark Rustad26597802014-03-04 03:02:45 +00001657 if (IXGBE_REMOVED(hw->hw_addr))
1658 return;
Don Skidmorede02dec2014-01-16 02:30:09 -08001659 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1660 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1661
1662 /* write value back with RXDCTL.ENABLE bit cleared */
1663 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1664
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001665 /* the hardware may take up to 100us to really disable the Rx queue */
Don Skidmorede02dec2014-01-16 02:30:09 -08001666 do {
1667 udelay(10);
1668 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1669 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1670
1671 if (!wait_loop)
1672 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1673 reg_idx);
1674}
1675
1676static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1677 struct ixgbevf_ring *ring)
1678{
1679 struct ixgbe_hw *hw = &adapter->hw;
1680 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1681 u32 rxdctl;
1682 u8 reg_idx = ring->reg_idx;
1683
Mark Rustad26597802014-03-04 03:02:45 +00001684 if (IXGBE_REMOVED(hw->hw_addr))
1685 return;
Don Skidmorede02dec2014-01-16 02:30:09 -08001686 do {
1687 usleep_range(1000, 2000);
1688 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1689 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1690
1691 if (!wait_loop)
1692 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1693 reg_idx);
1694}
1695
Emil Tantilov9295edb2014-12-06 09:19:09 +00001696static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1697{
1698 struct ixgbe_hw *hw = &adapter->hw;
1699 u32 vfmrqc = 0, vfreta = 0;
1700 u32 rss_key[10];
1701 u16 rss_i = adapter->num_rx_queues;
1702 int i, j;
1703
1704 /* Fill out hash function seeds */
1705 netdev_rss_key_fill(rss_key, sizeof(rss_key));
1706 for (i = 0; i < 10; i++)
1707 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1708
1709 /* Fill out redirection table */
1710 for (i = 0, j = 0; i < 64; i++, j++) {
1711 if (j == rss_i)
1712 j = 0;
1713 vfreta = (vfreta << 8) | (j * 0x1);
1714 if ((i & 3) == 3)
1715 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1716 }
1717
1718 /* Perform hash on these packet types */
1719 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1720 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1721 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1722 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1723
1724 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1725
1726 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1727}
1728
Don Skidmorede02dec2014-01-16 02:30:09 -08001729static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1730 struct ixgbevf_ring *ring)
1731{
1732 struct ixgbe_hw *hw = &adapter->hw;
1733 u64 rdba = ring->dma;
1734 u32 rxdctl;
1735 u8 reg_idx = ring->reg_idx;
1736
1737 /* disable queue to avoid issues while updating state */
1738 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1739 ixgbevf_disable_rx_queue(adapter, ring);
1740
1741 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1742 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1743 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1744 ring->count * sizeof(union ixgbe_adv_rx_desc));
1745
1746 /* enable relaxed ordering */
1747 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1748 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1749
1750 /* reset head and tail pointers */
1751 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1752 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00001753 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
Don Skidmorede02dec2014-01-16 02:30:09 -08001754
1755 /* reset ntu and ntc to place SW in sync with hardwdare */
1756 ring->next_to_clean = 0;
1757 ring->next_to_use = 0;
Emil Tantilovbad17232014-11-21 02:57:15 +00001758 ring->next_to_alloc = 0;
Don Skidmorede02dec2014-01-16 02:30:09 -08001759
1760 ixgbevf_configure_srrctl(adapter, reg_idx);
1761
Emil Tantilovbad17232014-11-21 02:57:15 +00001762 /* allow any size packet since we can handle overflow */
1763 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1764
Don Skidmorede02dec2014-01-16 02:30:09 -08001765 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1766 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1767
1768 ixgbevf_rx_desc_queue_enable(adapter, ring);
Emil Tantilov095e2612014-01-17 18:30:00 -08001769 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
Don Skidmorede02dec2014-01-16 02:30:09 -08001770}
1771
Greg Rose92915f72010-01-09 02:24:10 +00001772/**
1773 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1774 * @adapter: board private structure
1775 *
1776 * Configure the Rx unit of the MAC after a reset.
1777 **/
1778static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1779{
Don Skidmorede02dec2014-01-16 02:30:09 -08001780 int i;
Emil Tantilovbad17232014-11-21 02:57:15 +00001781 struct ixgbe_hw *hw = &adapter->hw;
1782 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00001783
Don Skidmore1bb9c632013-09-21 01:57:33 +00001784 ixgbevf_setup_psrtype(adapter);
Emil Tantilov9295edb2014-12-06 09:19:09 +00001785 if (hw->mac.type >= ixgbe_mac_X550_vf)
1786 ixgbevf_setup_vfmrqc(adapter);
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001787
Emil Tantilovbad17232014-11-21 02:57:15 +00001788 /* notify the PF of our intent to use this size of frame */
1789 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
Greg Rose92915f72010-01-09 02:24:10 +00001790
Greg Rose92915f72010-01-09 02:24:10 +00001791 /* Setup the HW Rx Head and Tail Descriptor Pointers and
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001792 * the Base and Length of the Rx Descriptor Ring
1793 */
Don Skidmorede02dec2014-01-16 02:30:09 -08001794 for (i = 0; i < adapter->num_rx_queues; i++)
1795 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00001796}
1797
Patrick McHardy80d5c362013-04-19 02:04:28 +00001798static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1799 __be16 proto, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001800{
1801 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1802 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001803 int err;
1804
John Fastabend55fdd45b2012-10-01 14:52:20 +00001805 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001806
Greg Rose92915f72010-01-09 02:24:10 +00001807 /* add VID to filter table */
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001808 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001809
John Fastabend55fdd45b2012-10-01 14:52:20 +00001810 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001811
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001812 /* translate error return types so error makes sense */
1813 if (err == IXGBE_ERR_MBX)
1814 return -EIO;
1815
1816 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1817 return -EACCES;
1818
Jiri Pirkodadcd652011-07-21 03:25:09 +00001819 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001820
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001821 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001822}
1823
Patrick McHardy80d5c362013-04-19 02:04:28 +00001824static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1825 __be16 proto, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001826{
1827 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1828 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001829 int err = -EOPNOTSUPP;
Greg Rose92915f72010-01-09 02:24:10 +00001830
John Fastabend55fdd45b2012-10-01 14:52:20 +00001831 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001832
Greg Rose92915f72010-01-09 02:24:10 +00001833 /* remove VID from filter table */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001834 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001835
John Fastabend55fdd45b2012-10-01 14:52:20 +00001836 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001837
Jiri Pirkodadcd652011-07-21 03:25:09 +00001838 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001839
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001840 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001841}
1842
1843static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1844{
Jiri Pirkodadcd652011-07-21 03:25:09 +00001845 u16 vid;
Greg Rose92915f72010-01-09 02:24:10 +00001846
Jiri Pirkodadcd652011-07-21 03:25:09 +00001847 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
Patrick McHardy80d5c362013-04-19 02:04:28 +00001848 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1849 htons(ETH_P_8021Q), vid);
Greg Rose92915f72010-01-09 02:24:10 +00001850}
1851
Greg Rose46ec20f2011-05-13 01:33:42 +00001852static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1853{
1854 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1855 struct ixgbe_hw *hw = &adapter->hw;
1856 int count = 0;
1857
1858 if ((netdev_uc_count(netdev)) > 10) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00001859 pr_err("Too many unicast filters - No Space\n");
Greg Rose46ec20f2011-05-13 01:33:42 +00001860 return -ENOSPC;
1861 }
1862
1863 if (!netdev_uc_empty(netdev)) {
1864 struct netdev_hw_addr *ha;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001865
Greg Rose46ec20f2011-05-13 01:33:42 +00001866 netdev_for_each_uc_addr(ha, netdev) {
1867 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1868 udelay(200);
1869 }
1870 } else {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001871 /* If the list is empty then send message to PF driver to
1872 * clear all MAC VLANs on this VF.
Greg Rose46ec20f2011-05-13 01:33:42 +00001873 */
1874 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1875 }
1876
1877 return count;
1878}
1879
Greg Rose92915f72010-01-09 02:24:10 +00001880/**
Greg Rosedee847f2012-11-02 05:50:57 +00001881 * ixgbevf_set_rx_mode - Multicast and unicast set
Greg Rose92915f72010-01-09 02:24:10 +00001882 * @netdev: network interface device structure
1883 *
1884 * The set_rx_method entry point is called whenever the multicast address
Greg Rosedee847f2012-11-02 05:50:57 +00001885 * list, unicast address list or the network interface flags are updated.
1886 * This routine is responsible for configuring the hardware for proper
1887 * multicast mode and configuring requested unicast filters.
Greg Rose92915f72010-01-09 02:24:10 +00001888 **/
1889static void ixgbevf_set_rx_mode(struct net_device *netdev)
1890{
1891 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1892 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001893
John Fastabend55fdd45b2012-10-01 14:52:20 +00001894 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001895
Greg Rose92915f72010-01-09 02:24:10 +00001896 /* reprogram multicast list */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001897 hw->mac.ops.update_mc_addr_list(hw, netdev);
Greg Rose46ec20f2011-05-13 01:33:42 +00001898
1899 ixgbevf_write_uc_addr_list(netdev);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001900
John Fastabend55fdd45b2012-10-01 14:52:20 +00001901 spin_unlock_bh(&adapter->mbx_lock);
Greg Rose92915f72010-01-09 02:24:10 +00001902}
1903
1904static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1905{
1906 int q_idx;
1907 struct ixgbevf_q_vector *q_vector;
1908 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1909
1910 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Greg Rose92915f72010-01-09 02:24:10 +00001911 q_vector = adapter->q_vector[q_idx];
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001912#ifdef CONFIG_NET_RX_BUSY_POLL
1913 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1914#endif
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001915 napi_enable(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001916 }
1917}
1918
1919static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1920{
1921 int q_idx;
1922 struct ixgbevf_q_vector *q_vector;
1923 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1924
1925 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1926 q_vector = adapter->q_vector[q_idx];
Greg Rose92915f72010-01-09 02:24:10 +00001927 napi_disable(&q_vector->napi);
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001928#ifdef CONFIG_NET_RX_BUSY_POLL
1929 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1930 pr_info("QV %d locked\n", q_idx);
1931 usleep_range(1000, 20000);
1932 }
1933#endif /* CONFIG_NET_RX_BUSY_POLL */
Greg Rose92915f72010-01-09 02:24:10 +00001934 }
1935}
1936
Don Skidmore220fe052013-09-21 01:40:49 +00001937static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1938{
1939 struct ixgbe_hw *hw = &adapter->hw;
1940 unsigned int def_q = 0;
1941 unsigned int num_tcs = 0;
Emil Tantilov2dc571a2014-12-06 09:19:02 +00001942 unsigned int num_rx_queues = adapter->num_rx_queues;
1943 unsigned int num_tx_queues = adapter->num_tx_queues;
Don Skidmore220fe052013-09-21 01:40:49 +00001944 int err;
1945
1946 spin_lock_bh(&adapter->mbx_lock);
1947
1948 /* fetch queue configuration from the PF */
1949 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1950
1951 spin_unlock_bh(&adapter->mbx_lock);
1952
1953 if (err)
1954 return err;
1955
1956 if (num_tcs > 1) {
Emil Tantilov2dc571a2014-12-06 09:19:02 +00001957 /* we need only one Tx queue */
1958 num_tx_queues = 1;
1959
Don Skidmore220fe052013-09-21 01:40:49 +00001960 /* update default Tx ring register index */
Don Skidmore87e70ab2014-01-16 02:30:08 -08001961 adapter->tx_ring[0]->reg_idx = def_q;
Don Skidmore220fe052013-09-21 01:40:49 +00001962
1963 /* we need as many queues as traffic classes */
1964 num_rx_queues = num_tcs;
1965 }
1966
1967 /* if we have a bad config abort request queue reset */
Emil Tantilov2dc571a2014-12-06 09:19:02 +00001968 if ((adapter->num_rx_queues != num_rx_queues) ||
1969 (adapter->num_tx_queues != num_tx_queues)) {
Don Skidmore220fe052013-09-21 01:40:49 +00001970 /* force mailbox timeout to prevent further messages */
1971 hw->mbx.timeout = 0;
1972
1973 /* wait for watchdog to come around and bail us out */
1974 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1975 }
1976
1977 return 0;
1978}
1979
Greg Rose92915f72010-01-09 02:24:10 +00001980static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1981{
Don Skidmore220fe052013-09-21 01:40:49 +00001982 ixgbevf_configure_dcb(adapter);
1983
Don Skidmorede02dec2014-01-16 02:30:09 -08001984 ixgbevf_set_rx_mode(adapter->netdev);
Greg Rose92915f72010-01-09 02:24:10 +00001985
1986 ixgbevf_restore_vlan(adapter);
1987
1988 ixgbevf_configure_tx(adapter);
1989 ixgbevf_configure_rx(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001990}
1991
Greg Rose33bd9f62010-03-19 02:59:52 +00001992static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1993{
1994 /* Only save pre-reset stats if there are some */
1995 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1996 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1997 adapter->stats.base_vfgprc;
1998 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1999 adapter->stats.base_vfgptc;
2000 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2001 adapter->stats.base_vfgorc;
2002 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2003 adapter->stats.base_vfgotc;
2004 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2005 adapter->stats.base_vfmprc;
2006 }
2007}
2008
2009static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2010{
2011 struct ixgbe_hw *hw = &adapter->hw;
2012
2013 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2014 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2015 adapter->stats.last_vfgorc |=
2016 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2017 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2018 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2019 adapter->stats.last_vfgotc |=
2020 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2021 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2022
2023 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2024 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2025 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2026 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2027 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2028}
2029
Alexander Duyck31186782012-07-20 08:09:58 +00002030static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2031{
2032 struct ixgbe_hw *hw = &adapter->hw;
Vlad Zolotarov94cf66f2015-03-30 21:35:26 +03002033 int api[] = { ixgbe_mbox_api_12,
2034 ixgbe_mbox_api_11,
Alexander Duyck56e94092012-07-20 08:10:03 +00002035 ixgbe_mbox_api_10,
Alexander Duyck31186782012-07-20 08:09:58 +00002036 ixgbe_mbox_api_unknown };
2037 int err = 0, idx = 0;
2038
John Fastabend55fdd45b2012-10-01 14:52:20 +00002039 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00002040
2041 while (api[idx] != ixgbe_mbox_api_unknown) {
2042 err = ixgbevf_negotiate_api_version(hw, api[idx]);
2043 if (!err)
2044 break;
2045 idx++;
2046 }
2047
John Fastabend55fdd45b2012-10-01 14:52:20 +00002048 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00002049}
2050
Greg Rose795180d2012-04-17 04:29:34 +00002051static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002052{
2053 struct net_device *netdev = adapter->netdev;
2054 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00002055
2056 ixgbevf_configure_msix(adapter);
2057
John Fastabend55fdd45b2012-10-01 14:52:20 +00002058 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002059
Greg Rose92fe0bf2012-11-02 05:50:47 +00002060 if (is_valid_ether_addr(hw->mac.addr))
2061 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2062 else
2063 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
Greg Rose92915f72010-01-09 02:24:10 +00002064
John Fastabend55fdd45b2012-10-01 14:52:20 +00002065 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002066
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002067 smp_mb__before_atomic();
Greg Rose92915f72010-01-09 02:24:10 +00002068 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2069 ixgbevf_napi_enable_all(adapter);
2070
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002071 /* clear any pending interrupts, may auto mask */
2072 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2073 ixgbevf_irq_enable(adapter);
2074
Greg Rose92915f72010-01-09 02:24:10 +00002075 /* enable transmits */
2076 netif_tx_start_all_queues(netdev);
2077
Greg Rose33bd9f62010-03-19 02:59:52 +00002078 ixgbevf_save_reset_stats(adapter);
2079 ixgbevf_init_last_counter_stats(adapter);
2080
Alexander Duyck4b2cd272012-08-02 01:16:59 +00002081 hw->mac.get_link_status = 1;
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002082 mod_timer(&adapter->service_timer, jiffies);
Greg Rose92915f72010-01-09 02:24:10 +00002083}
2084
Greg Rose795180d2012-04-17 04:29:34 +00002085void ixgbevf_up(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002086{
Greg Rose92915f72010-01-09 02:24:10 +00002087 ixgbevf_configure(adapter);
2088
Greg Rose795180d2012-04-17 04:29:34 +00002089 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002090}
2091
2092/**
2093 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
Greg Rose92915f72010-01-09 02:24:10 +00002094 * @rx_ring: ring to free buffers from
2095 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002096static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002097{
Emil Tantilovbad17232014-11-21 02:57:15 +00002098 struct device *dev = rx_ring->dev;
Greg Rose92915f72010-01-09 02:24:10 +00002099 unsigned long size;
2100 unsigned int i;
2101
Emil Tantilovbad17232014-11-21 02:57:15 +00002102 /* Free Rx ring sk_buff */
2103 if (rx_ring->skb) {
2104 dev_kfree_skb(rx_ring->skb);
2105 rx_ring->skb = NULL;
2106 }
2107
2108 /* ring already cleared, nothing to do */
Greg Rosec0456c22010-01-22 22:47:18 +00002109 if (!rx_ring->rx_buffer_info)
2110 return;
Greg Rose92915f72010-01-09 02:24:10 +00002111
Emil Tantilovbad17232014-11-21 02:57:15 +00002112 /* Free all the Rx ring pages */
Greg Rose92915f72010-01-09 02:24:10 +00002113 for (i = 0; i < rx_ring->count; i++) {
Emil Tantilovbad17232014-11-21 02:57:15 +00002114 struct ixgbevf_rx_buffer *rx_buffer;
Greg Rose92915f72010-01-09 02:24:10 +00002115
Emil Tantilovbad17232014-11-21 02:57:15 +00002116 rx_buffer = &rx_ring->rx_buffer_info[i];
2117 if (rx_buffer->dma)
2118 dma_unmap_page(dev, rx_buffer->dma,
2119 PAGE_SIZE, DMA_FROM_DEVICE);
2120 rx_buffer->dma = 0;
2121 if (rx_buffer->page)
2122 __free_page(rx_buffer->page);
2123 rx_buffer->page = NULL;
Greg Rose92915f72010-01-09 02:24:10 +00002124 }
2125
2126 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2127 memset(rx_ring->rx_buffer_info, 0, size);
2128
2129 /* Zero out the descriptor ring */
2130 memset(rx_ring->desc, 0, rx_ring->size);
Greg Rose92915f72010-01-09 02:24:10 +00002131}
2132
2133/**
2134 * ixgbevf_clean_tx_ring - Free Tx Buffers
Greg Rose92915f72010-01-09 02:24:10 +00002135 * @tx_ring: ring to be cleaned
2136 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002137static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002138{
2139 struct ixgbevf_tx_buffer *tx_buffer_info;
2140 unsigned long size;
2141 unsigned int i;
2142
Greg Rosec0456c22010-01-22 22:47:18 +00002143 if (!tx_ring->tx_buffer_info)
2144 return;
2145
Greg Rose92915f72010-01-09 02:24:10 +00002146 /* Free all the Tx ring sk_buffs */
Greg Rose92915f72010-01-09 02:24:10 +00002147 for (i = 0; i < tx_ring->count; i++) {
2148 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck70a10e22012-05-11 08:33:21 +00002149 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Greg Rose92915f72010-01-09 02:24:10 +00002150 }
2151
2152 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2153 memset(tx_ring->tx_buffer_info, 0, size);
2154
2155 memset(tx_ring->desc, 0, tx_ring->size);
Greg Rose92915f72010-01-09 02:24:10 +00002156}
2157
2158/**
2159 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2160 * @adapter: board private structure
2161 **/
2162static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2163{
2164 int i;
2165
2166 for (i = 0; i < adapter->num_rx_queues; i++)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002167 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002168}
2169
2170/**
2171 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2172 * @adapter: board private structure
2173 **/
2174static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2175{
2176 int i;
2177
2178 for (i = 0; i < adapter->num_tx_queues; i++)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002179 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002180}
2181
2182void ixgbevf_down(struct ixgbevf_adapter *adapter)
2183{
2184 struct net_device *netdev = adapter->netdev;
2185 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmorede02dec2014-01-16 02:30:09 -08002186 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002187
2188 /* signal that we are down to the interrupt handler */
Mark Rustad5b346dc2014-03-04 03:02:18 +00002189 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2190 return; /* do nothing if already down */
Don Skidmore858c3dd2013-10-01 04:33:50 -07002191
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002192 /* disable all enabled Rx queues */
Don Skidmore858c3dd2013-10-01 04:33:50 -07002193 for (i = 0; i < adapter->num_rx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08002194 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002195
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002196 usleep_range(10000, 20000);
Greg Rose92915f72010-01-09 02:24:10 +00002197
2198 netif_tx_stop_all_queues(netdev);
2199
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002200 /* call carrier off first to avoid false dev_watchdog timeouts */
2201 netif_carrier_off(netdev);
2202 netif_tx_disable(netdev);
2203
Greg Rose92915f72010-01-09 02:24:10 +00002204 ixgbevf_irq_disable(adapter);
2205
2206 ixgbevf_napi_disable_all(adapter);
2207
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002208 del_timer_sync(&adapter->service_timer);
Greg Rose92915f72010-01-09 02:24:10 +00002209
2210 /* disable transmits in the hardware now that interrupts are off */
2211 for (i = 0; i < adapter->num_tx_queues; i++) {
Don Skidmorede02dec2014-01-16 02:30:09 -08002212 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2213
2214 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2215 IXGBE_TXDCTL_SWFLSH);
Greg Rose92915f72010-01-09 02:24:10 +00002216 }
2217
Greg Rose92915f72010-01-09 02:24:10 +00002218 if (!pci_channel_offline(adapter->pdev))
2219 ixgbevf_reset(adapter);
2220
2221 ixgbevf_clean_all_tx_rings(adapter);
2222 ixgbevf_clean_all_rx_rings(adapter);
2223}
2224
2225void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2226{
2227 WARN_ON(in_interrupt());
Greg Rosec0456c22010-01-22 22:47:18 +00002228
Greg Rose92915f72010-01-09 02:24:10 +00002229 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2230 msleep(1);
2231
Alexander Duyck4b2cd272012-08-02 01:16:59 +00002232 ixgbevf_down(adapter);
2233 ixgbevf_up(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002234
2235 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2236}
2237
2238void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2239{
2240 struct ixgbe_hw *hw = &adapter->hw;
2241 struct net_device *netdev = adapter->netdev;
2242
Don Skidmore798e3812013-10-01 04:33:51 -07002243 if (hw->mac.ops.reset_hw(hw)) {
Greg Rose92915f72010-01-09 02:24:10 +00002244 hw_dbg(hw, "PF still resetting\n");
Don Skidmore798e3812013-10-01 04:33:51 -07002245 } else {
Greg Rose92915f72010-01-09 02:24:10 +00002246 hw->mac.ops.init_hw(hw);
Don Skidmore798e3812013-10-01 04:33:51 -07002247 ixgbevf_negotiate_api(adapter);
2248 }
Greg Rose92915f72010-01-09 02:24:10 +00002249
2250 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2251 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2252 netdev->addr_len);
2253 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2254 netdev->addr_len);
2255 }
Emil Tantilove66c92a2015-01-28 03:21:29 +00002256
2257 adapter->last_reset = jiffies;
Greg Rose92915f72010-01-09 02:24:10 +00002258}
2259
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002260static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2261 int vectors)
Greg Rose92915f72010-01-09 02:24:10 +00002262{
Emil Tantilova5f93372012-11-13 04:03:17 +00002263 int vector_threshold;
Greg Rose92915f72010-01-09 02:24:10 +00002264
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002265 /* We'll want at least 2 (vector_threshold):
2266 * 1) TxQ[0] + RxQ[0] handler
2267 * 2) Other (Link Status Change, etc.)
Greg Rose92915f72010-01-09 02:24:10 +00002268 */
2269 vector_threshold = MIN_MSIX_COUNT;
2270
2271 /* The more we get, the more we will assign to Tx/Rx Cleanup
2272 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2273 * Right now, we simply care about how many we'll get; we'll
2274 * set them up later while requesting irq's.
2275 */
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002276 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2277 vector_threshold, vectors);
Greg Rose92915f72010-01-09 02:24:10 +00002278
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002279 if (vectors < 0) {
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002280 dev_err(&adapter->pdev->dev,
2281 "Unable to allocate MSI-X interrupts\n");
Greg Rose92915f72010-01-09 02:24:10 +00002282 kfree(adapter->msix_entries);
2283 adapter->msix_entries = NULL;
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002284 return vectors;
Greg Rose92915f72010-01-09 02:24:10 +00002285 }
Greg Rosedee847f2012-11-02 05:50:57 +00002286
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002287 /* Adjust for only the vectors we'll use, which is minimum
2288 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2289 * vectors we were allocated.
2290 */
2291 adapter->num_msix_vectors = vectors;
2292
2293 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002294}
2295
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002296/**
2297 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
Greg Rose92915f72010-01-09 02:24:10 +00002298 * @adapter: board private structure to initialize
2299 *
2300 * This is the top level queue allocation routine. The order here is very
2301 * important, starting with the "most" number of features turned on at once,
2302 * and ending with the smallest set of features. This way large combinations
2303 * can be allocated if they're turned on, and smaller combinations are the
2304 * fallthrough conditions.
2305 *
2306 **/
2307static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2308{
Don Skidmore220fe052013-09-21 01:40:49 +00002309 struct ixgbe_hw *hw = &adapter->hw;
2310 unsigned int def_q = 0;
2311 unsigned int num_tcs = 0;
2312 int err;
2313
Greg Rose92915f72010-01-09 02:24:10 +00002314 /* Start with base case */
2315 adapter->num_rx_queues = 1;
2316 adapter->num_tx_queues = 1;
Don Skidmore220fe052013-09-21 01:40:49 +00002317
2318 spin_lock_bh(&adapter->mbx_lock);
2319
2320 /* fetch queue configuration from the PF */
2321 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2322
2323 spin_unlock_bh(&adapter->mbx_lock);
2324
2325 if (err)
2326 return;
2327
2328 /* we need as many queues as traffic classes */
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002329 if (num_tcs > 1) {
Don Skidmore220fe052013-09-21 01:40:49 +00002330 adapter->num_rx_queues = num_tcs;
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002331 } else {
2332 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2333
2334 switch (hw->api_version) {
2335 case ixgbe_mbox_api_11:
Vlad Zolotarov94cf66f2015-03-30 21:35:26 +03002336 case ixgbe_mbox_api_12:
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002337 adapter->num_rx_queues = rss;
2338 adapter->num_tx_queues = rss;
2339 default:
2340 break;
2341 }
2342 }
Greg Rose92915f72010-01-09 02:24:10 +00002343}
2344
2345/**
2346 * ixgbevf_alloc_queues - Allocate memory for all rings
2347 * @adapter: board private structure to initialize
2348 *
2349 * We allocate one ring per queue at run-time since we don't know the
2350 * number of queues at compile-time. The polling_netdev array is
2351 * intended for Multiqueue, but should work fine with a single queue.
2352 **/
2353static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2354{
Don Skidmore87e70ab2014-01-16 02:30:08 -08002355 struct ixgbevf_ring *ring;
2356 int rx = 0, tx = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002357
Don Skidmore87e70ab2014-01-16 02:30:08 -08002358 for (; tx < adapter->num_tx_queues; tx++) {
2359 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2360 if (!ring)
2361 goto err_allocation;
Greg Rose92915f72010-01-09 02:24:10 +00002362
Don Skidmore87e70ab2014-01-16 02:30:08 -08002363 ring->dev = &adapter->pdev->dev;
2364 ring->netdev = adapter->netdev;
2365 ring->count = adapter->tx_ring_count;
2366 ring->queue_index = tx;
2367 ring->reg_idx = tx;
Greg Rose92915f72010-01-09 02:24:10 +00002368
Don Skidmore87e70ab2014-01-16 02:30:08 -08002369 adapter->tx_ring[tx] = ring;
Greg Rose92915f72010-01-09 02:24:10 +00002370 }
2371
Don Skidmore87e70ab2014-01-16 02:30:08 -08002372 for (; rx < adapter->num_rx_queues; rx++) {
2373 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2374 if (!ring)
2375 goto err_allocation;
2376
2377 ring->dev = &adapter->pdev->dev;
2378 ring->netdev = adapter->netdev;
2379
2380 ring->count = adapter->rx_ring_count;
2381 ring->queue_index = rx;
2382 ring->reg_idx = rx;
2383
2384 adapter->rx_ring[rx] = ring;
Greg Rose92915f72010-01-09 02:24:10 +00002385 }
2386
2387 return 0;
2388
Don Skidmore87e70ab2014-01-16 02:30:08 -08002389err_allocation:
2390 while (tx) {
2391 kfree(adapter->tx_ring[--tx]);
2392 adapter->tx_ring[tx] = NULL;
2393 }
2394
2395 while (rx) {
2396 kfree(adapter->rx_ring[--rx]);
2397 adapter->rx_ring[rx] = NULL;
2398 }
Greg Rose92915f72010-01-09 02:24:10 +00002399 return -ENOMEM;
2400}
2401
2402/**
2403 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2404 * @adapter: board private structure to initialize
2405 *
2406 * Attempt to configure the interrupts using the best available
2407 * capabilities of the hardware and the kernel.
2408 **/
2409static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2410{
Greg Rose91e2b892012-10-03 00:57:23 +00002411 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00002412 int err = 0;
2413 int vector, v_budget;
2414
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002415 /* It's easy to be greedy for MSI-X vectors, but it really
Greg Rose92915f72010-01-09 02:24:10 +00002416 * doesn't do us much good if we have a lot more vectors
2417 * than CPU's. So let's be conservative and only ask for
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002418 * (roughly) the same number of vectors as there are CPU's.
2419 * The default is to use pairs of vectors.
Greg Rose92915f72010-01-09 02:24:10 +00002420 */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002421 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2422 v_budget = min_t(int, v_budget, num_online_cpus());
2423 v_budget += NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002424
2425 /* A failure in MSI-X entry allocation isn't fatal, but it does
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002426 * mean we disable MSI-X capabilities of the adapter.
2427 */
Greg Rose92915f72010-01-09 02:24:10 +00002428 adapter->msix_entries = kcalloc(v_budget,
2429 sizeof(struct msix_entry), GFP_KERNEL);
2430 if (!adapter->msix_entries) {
2431 err = -ENOMEM;
2432 goto out;
2433 }
2434
2435 for (vector = 0; vector < v_budget; vector++)
2436 adapter->msix_entries[vector].entry = vector;
2437
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002438 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2439 if (err)
2440 goto out;
Greg Rose92915f72010-01-09 02:24:10 +00002441
Greg Rose91e2b892012-10-03 00:57:23 +00002442 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2443 if (err)
2444 goto out;
2445
2446 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2447
Greg Rose92915f72010-01-09 02:24:10 +00002448out:
2449 return err;
2450}
2451
2452/**
2453 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2454 * @adapter: board private structure to initialize
2455 *
2456 * We allocate one q_vector per queue interrupt. If allocation fails we
2457 * return -ENOMEM.
2458 **/
2459static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2460{
2461 int q_idx, num_q_vectors;
2462 struct ixgbevf_q_vector *q_vector;
Greg Rose92915f72010-01-09 02:24:10 +00002463
2464 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002465
2466 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2467 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2468 if (!q_vector)
2469 goto err_out;
2470 q_vector->adapter = adapter;
2471 q_vector->v_idx = q_idx;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002472 netif_napi_add(adapter->netdev, &q_vector->napi,
2473 ixgbevf_poll, 64);
Jacob Kellerc777cdf2013-09-21 06:24:20 +00002474#ifdef CONFIG_NET_RX_BUSY_POLL
2475 napi_hash_add(&q_vector->napi);
2476#endif
Greg Rose92915f72010-01-09 02:24:10 +00002477 adapter->q_vector[q_idx] = q_vector;
2478 }
2479
2480 return 0;
2481
2482err_out:
2483 while (q_idx) {
2484 q_idx--;
2485 q_vector = adapter->q_vector[q_idx];
Jacob Kellerc777cdf2013-09-21 06:24:20 +00002486#ifdef CONFIG_NET_RX_BUSY_POLL
2487 napi_hash_del(&q_vector->napi);
2488#endif
Greg Rose92915f72010-01-09 02:24:10 +00002489 netif_napi_del(&q_vector->napi);
2490 kfree(q_vector);
2491 adapter->q_vector[q_idx] = NULL;
2492 }
2493 return -ENOMEM;
2494}
2495
2496/**
2497 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2498 * @adapter: board private structure to initialize
2499 *
2500 * This function frees the memory allocated to the q_vectors. In addition if
2501 * NAPI is enabled it will delete any references to the NAPI struct prior
2502 * to freeing the q_vector.
2503 **/
2504static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2505{
John Fastabendf4477702012-09-16 08:19:46 +00002506 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002507
2508 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2509 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2510
2511 adapter->q_vector[q_idx] = NULL;
Jacob Kellerc777cdf2013-09-21 06:24:20 +00002512#ifdef CONFIG_NET_RX_BUSY_POLL
2513 napi_hash_del(&q_vector->napi);
2514#endif
John Fastabendf4477702012-09-16 08:19:46 +00002515 netif_napi_del(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00002516 kfree(q_vector);
2517 }
2518}
2519
2520/**
2521 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2522 * @adapter: board private structure
2523 *
2524 **/
2525static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2526{
2527 pci_disable_msix(adapter->pdev);
2528 kfree(adapter->msix_entries);
2529 adapter->msix_entries = NULL;
Greg Rose92915f72010-01-09 02:24:10 +00002530}
2531
2532/**
2533 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2534 * @adapter: board private structure to initialize
2535 *
2536 **/
2537static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2538{
2539 int err;
2540
2541 /* Number of supported queues */
2542 ixgbevf_set_num_queues(adapter);
2543
2544 err = ixgbevf_set_interrupt_capability(adapter);
2545 if (err) {
2546 hw_dbg(&adapter->hw,
2547 "Unable to setup interrupt capabilities\n");
2548 goto err_set_interrupt;
2549 }
2550
2551 err = ixgbevf_alloc_q_vectors(adapter);
2552 if (err) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002553 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
Greg Rose92915f72010-01-09 02:24:10 +00002554 goto err_alloc_q_vectors;
2555 }
2556
2557 err = ixgbevf_alloc_queues(adapter);
2558 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002559 pr_err("Unable to allocate memory for queues\n");
Greg Rose92915f72010-01-09 02:24:10 +00002560 goto err_alloc_queues;
2561 }
2562
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002563 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
Greg Rose92915f72010-01-09 02:24:10 +00002564 (adapter->num_rx_queues > 1) ? "Enabled" :
2565 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2566
2567 set_bit(__IXGBEVF_DOWN, &adapter->state);
2568
2569 return 0;
2570err_alloc_queues:
2571 ixgbevf_free_q_vectors(adapter);
2572err_alloc_q_vectors:
2573 ixgbevf_reset_interrupt_capability(adapter);
2574err_set_interrupt:
2575 return err;
2576}
2577
2578/**
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002579 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2580 * @adapter: board private structure to clear interrupt scheme on
2581 *
2582 * We go through and clear interrupt specific resources and reset the structure
2583 * to pre-load conditions
2584 **/
2585static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2586{
Don Skidmore87e70ab2014-01-16 02:30:08 -08002587 int i;
2588
2589 for (i = 0; i < adapter->num_tx_queues; i++) {
2590 kfree(adapter->tx_ring[i]);
2591 adapter->tx_ring[i] = NULL;
2592 }
2593 for (i = 0; i < adapter->num_rx_queues; i++) {
2594 kfree(adapter->rx_ring[i]);
2595 adapter->rx_ring[i] = NULL;
2596 }
2597
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002598 adapter->num_tx_queues = 0;
2599 adapter->num_rx_queues = 0;
2600
2601 ixgbevf_free_q_vectors(adapter);
2602 ixgbevf_reset_interrupt_capability(adapter);
2603}
2604
2605/**
Greg Rose92915f72010-01-09 02:24:10 +00002606 * ixgbevf_sw_init - Initialize general software structures
Greg Rose92915f72010-01-09 02:24:10 +00002607 * @adapter: board private structure to initialize
2608 *
2609 * ixgbevf_sw_init initializes the Adapter private data structure.
2610 * Fields are initialized based on PCI device information and
2611 * OS network device settings (MTU size).
2612 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05002613static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002614{
2615 struct ixgbe_hw *hw = &adapter->hw;
2616 struct pci_dev *pdev = adapter->pdev;
Greg Rosee1941a72013-02-13 03:02:05 +00002617 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00002618 int err;
2619
2620 /* PCI config space info */
Greg Rose92915f72010-01-09 02:24:10 +00002621 hw->vendor_id = pdev->vendor;
2622 hw->device_id = pdev->device;
Sergei Shtylyovff938e42011-02-28 11:57:33 -08002623 hw->revision_id = pdev->revision;
Greg Rose92915f72010-01-09 02:24:10 +00002624 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2625 hw->subsystem_device_id = pdev->subsystem_device;
2626
2627 hw->mbx.ops.init_params(hw);
Alexander Duyck56e94092012-07-20 08:10:03 +00002628
2629 /* assume legacy case in which PF would only give VF 2 queues */
2630 hw->mac.max_tx_queues = 2;
2631 hw->mac.max_rx_queues = 2;
2632
Don Skidmore798e3812013-10-01 04:33:51 -07002633 /* lock to protect mailbox accesses */
2634 spin_lock_init(&adapter->mbx_lock);
2635
Greg Rose92915f72010-01-09 02:24:10 +00002636 err = hw->mac.ops.reset_hw(hw);
2637 if (err) {
2638 dev_info(&pdev->dev,
Greg Rosee1941a72013-02-13 03:02:05 +00002639 "PF still in reset state. Is the PF interface up?\n");
Greg Rose92915f72010-01-09 02:24:10 +00002640 } else {
2641 err = hw->mac.ops.init_hw(hw);
2642 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002643 pr_err("init_shared_code failed: %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +00002644 goto out;
2645 }
Don Skidmore798e3812013-10-01 04:33:51 -07002646 ixgbevf_negotiate_api(adapter);
Greg Rosee1941a72013-02-13 03:02:05 +00002647 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2648 if (err)
2649 dev_info(&pdev->dev, "Error reading MAC address\n");
2650 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2651 dev_info(&pdev->dev,
2652 "MAC address not assigned by administrator.\n");
2653 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2654 }
2655
2656 if (!is_valid_ether_addr(netdev->dev_addr)) {
2657 dev_info(&pdev->dev, "Assigning random MAC address\n");
2658 eth_hw_addr_random(netdev);
2659 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
Greg Rose92915f72010-01-09 02:24:10 +00002660 }
2661
2662 /* Enable dynamic interrupt throttling rates */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002663 adapter->rx_itr_setting = 1;
2664 adapter->tx_itr_setting = 1;
Greg Rose92915f72010-01-09 02:24:10 +00002665
Greg Rose92915f72010-01-09 02:24:10 +00002666 /* set default ring sizes */
2667 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2668 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2669
Greg Rose92915f72010-01-09 02:24:10 +00002670 set_bit(__IXGBEVF_DOWN, &adapter->state);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00002671 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002672
2673out:
2674 return err;
2675}
2676
Greg Rose92915f72010-01-09 02:24:10 +00002677#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2678 { \
2679 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2680 if (current_counter < last_counter) \
2681 counter += 0x100000000LL; \
2682 last_counter = current_counter; \
2683 counter &= 0xFFFFFFFF00000000LL; \
2684 counter |= current_counter; \
2685 }
2686
2687#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2688 { \
2689 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2690 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002691 u64 current_counter = (current_counter_msb << 32) | \
2692 current_counter_lsb; \
Greg Rose92915f72010-01-09 02:24:10 +00002693 if (current_counter < last_counter) \
2694 counter += 0x1000000000LL; \
2695 last_counter = current_counter; \
2696 counter &= 0xFFFFFFF000000000LL; \
2697 counter |= current_counter; \
2698 }
2699/**
2700 * ixgbevf_update_stats - Update the board statistics counters.
2701 * @adapter: board private structure
2702 **/
2703void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2704{
2705 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose55fb2772012-11-06 05:53:32 +00002706 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002707
Emil Tantilove66c92a2015-01-28 03:21:29 +00002708 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2709 test_bit(__IXGBEVF_RESETTING, &adapter->state))
Greg Rose088245a2013-01-04 07:37:31 +00002710 return;
2711
Greg Rose92915f72010-01-09 02:24:10 +00002712 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2713 adapter->stats.vfgprc);
2714 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2715 adapter->stats.vfgptc);
2716 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2717 adapter->stats.last_vfgorc,
2718 adapter->stats.vfgorc);
2719 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2720 adapter->stats.last_vfgotc,
2721 adapter->stats.vfgotc);
2722 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2723 adapter->stats.vfmprc);
Greg Rose55fb2772012-11-06 05:53:32 +00002724
2725 for (i = 0; i < adapter->num_rx_queues; i++) {
2726 adapter->hw_csum_rx_error +=
Don Skidmore87e70ab2014-01-16 02:30:08 -08002727 adapter->rx_ring[i]->hw_csum_rx_error;
Don Skidmore87e70ab2014-01-16 02:30:08 -08002728 adapter->rx_ring[i]->hw_csum_rx_error = 0;
Greg Rose55fb2772012-11-06 05:53:32 +00002729 }
Greg Rose92915f72010-01-09 02:24:10 +00002730}
2731
2732/**
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002733 * ixgbevf_service_timer - Timer Call-back
Greg Rose92915f72010-01-09 02:24:10 +00002734 * @data: pointer to adapter cast into an unsigned long
2735 **/
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002736static void ixgbevf_service_timer(unsigned long data)
Greg Rose92915f72010-01-09 02:24:10 +00002737{
2738 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
Emil Tantilove66c92a2015-01-28 03:21:29 +00002739
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002740 /* Reset the timer */
2741 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2742
2743 ixgbevf_service_event_schedule(adapter);
Emil Tantilove66c92a2015-01-28 03:21:29 +00002744}
2745
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002746static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
Emil Tantilove66c92a2015-01-28 03:21:29 +00002747{
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002748 if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
2749 return;
Emil Tantilove66c92a2015-01-28 03:21:29 +00002750
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002751 adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
Emil Tantilove66c92a2015-01-28 03:21:29 +00002752
2753 /* If we're already down or resetting, just bail */
2754 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2755 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2756 return;
2757
2758 adapter->tx_timeout_count++;
2759
2760 ixgbevf_reinit_locked(adapter);
2761}
2762
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002763/**
2764 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2765 * @adapter: pointer to the device adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002766 *
2767 * This function serves two purposes. First it strobes the interrupt lines
2768 * in order to make certain interrupts are occurring. Secondly it sets the
2769 * bits needed to check for TX hangs. As a result we should immediately
2770 * determine if a hang has occurred.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002771 **/
Emil Tantilove66c92a2015-01-28 03:21:29 +00002772static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2773{
Greg Rose92915f72010-01-09 02:24:10 +00002774 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002775 u32 eics = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002776 int i;
2777
Emil Tantilove66c92a2015-01-28 03:21:29 +00002778 /* If we're down or resetting, just bail */
2779 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2780 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2781 return;
Greg Rose92915f72010-01-09 02:24:10 +00002782
Emil Tantilove08400b2015-01-28 03:21:24 +00002783 /* Force detection of hung controller */
2784 if (netif_carrier_ok(adapter->netdev)) {
2785 for (i = 0; i < adapter->num_tx_queues; i++)
2786 set_check_for_tx_hang(adapter->tx_ring[i]);
2787 }
2788
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002789 /* get one bit for every active Tx/Rx interrupt vector */
Greg Rose92915f72010-01-09 02:24:10 +00002790 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2791 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002792
Alexander Duyck6b43c442012-05-11 08:32:45 +00002793 if (qv->rx.ring || qv->tx.ring)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002794 eics |= 1 << i;
Greg Rose92915f72010-01-09 02:24:10 +00002795 }
2796
Emil Tantilove66c92a2015-01-28 03:21:29 +00002797 /* Cause software interrupt to ensure rings are cleaned */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002798 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
Greg Rose92915f72010-01-09 02:24:10 +00002799}
2800
Emil Tantilove66c92a2015-01-28 03:21:29 +00002801/**
2802 * ixgbevf_watchdog_update_link - update the link status
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002803 * @adapter: pointer to the device adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002804 **/
2805static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002806{
Emil Tantilove66c92a2015-01-28 03:21:29 +00002807 struct ixgbe_hw *hw = &adapter->hw;
2808 u32 link_speed = adapter->link_speed;
2809 bool link_up = adapter->link_up;
2810 s32 err;
Greg Rose92915f72010-01-09 02:24:10 +00002811
Emil Tantilove66c92a2015-01-28 03:21:29 +00002812 spin_lock_bh(&adapter->mbx_lock);
2813
2814 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2815
2816 spin_unlock_bh(&adapter->mbx_lock);
2817
2818 /* if check for link returns error we will need to reset */
2819 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002820 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
Emil Tantilove66c92a2015-01-28 03:21:29 +00002821 link_up = false;
2822 }
2823
2824 adapter->link_up = link_up;
2825 adapter->link_speed = link_speed;
2826}
2827
2828/**
2829 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2830 * print link up message
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002831 * @adapter: pointer to the device adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002832 **/
2833static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
2834{
2835 struct net_device *netdev = adapter->netdev;
2836
2837 /* only continue if link was previously down */
2838 if (netif_carrier_ok(netdev))
Greg Rose92915f72010-01-09 02:24:10 +00002839 return;
2840
Emil Tantilove66c92a2015-01-28 03:21:29 +00002841 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2842 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2843 "10 Gbps" :
2844 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2845 "1 Gbps" :
2846 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2847 "100 Mbps" :
2848 "unknown speed");
Greg Rose92915f72010-01-09 02:24:10 +00002849
Emil Tantilove66c92a2015-01-28 03:21:29 +00002850 netif_carrier_on(netdev);
2851}
2852
2853/**
2854 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2855 * print link down message
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002856 * @adapter: pointer to the adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002857 **/
2858static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2859{
2860 struct net_device *netdev = adapter->netdev;
2861
2862 adapter->link_speed = 0;
2863
2864 /* only continue if link was up previously */
2865 if (!netif_carrier_ok(netdev))
2866 return;
2867
2868 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2869
2870 netif_carrier_off(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00002871}
2872
2873/**
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002874 * ixgbevf_watchdog_subtask - worker thread to bring link up
Greg Rose92915f72010-01-09 02:24:10 +00002875 * @work: pointer to work_struct containing our data
2876 **/
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002877static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2878{
2879 /* if interface is down do nothing */
2880 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2881 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2882 return;
2883
2884 ixgbevf_watchdog_update_link(adapter);
2885
2886 if (adapter->link_up)
2887 ixgbevf_watchdog_link_is_up(adapter);
2888 else
2889 ixgbevf_watchdog_link_is_down(adapter);
2890
2891 ixgbevf_update_stats(adapter);
2892}
2893
2894/**
2895 * ixgbevf_service_task - manages and runs subtasks
2896 * @work: pointer to work_struct containing our data
2897 **/
2898static void ixgbevf_service_task(struct work_struct *work)
Greg Rose92915f72010-01-09 02:24:10 +00002899{
2900 struct ixgbevf_adapter *adapter = container_of(work,
2901 struct ixgbevf_adapter,
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002902 service_task);
Greg Rose92915f72010-01-09 02:24:10 +00002903 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00002904
Mark Rustad26597802014-03-04 03:02:45 +00002905 if (IXGBE_REMOVED(hw->hw_addr)) {
2906 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2907 rtnl_lock();
2908 ixgbevf_down(adapter);
2909 rtnl_unlock();
2910 }
2911 return;
2912 }
Emil Tantilove66c92a2015-01-28 03:21:29 +00002913
Don Skidmore220fe052013-09-21 01:40:49 +00002914 ixgbevf_queue_reset_subtask(adapter);
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002915 ixgbevf_reset_subtask(adapter);
2916 ixgbevf_watchdog_subtask(adapter);
Emil Tantilove66c92a2015-01-28 03:21:29 +00002917 ixgbevf_check_hang_subtask(adapter);
2918
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002919 ixgbevf_service_event_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002920}
2921
2922/**
2923 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
Greg Rose92915f72010-01-09 02:24:10 +00002924 * @tx_ring: Tx descriptor ring for a specific queue
2925 *
2926 * Free all transmit software resources
2927 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002928void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002929{
Emil Tantilov05d063a2014-01-17 18:29:59 -08002930 ixgbevf_clean_tx_ring(tx_ring);
Greg Rose92915f72010-01-09 02:24:10 +00002931
2932 vfree(tx_ring->tx_buffer_info);
2933 tx_ring->tx_buffer_info = NULL;
2934
Don Skidmorede02dec2014-01-16 02:30:09 -08002935 /* if not set, then don't free */
2936 if (!tx_ring->desc)
2937 return;
2938
Emil Tantilov05d063a2014-01-17 18:29:59 -08002939 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002940 tx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00002941
2942 tx_ring->desc = NULL;
2943}
2944
2945/**
2946 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2947 * @adapter: board private structure
2948 *
2949 * Free all transmit software resources
2950 **/
2951static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2952{
2953 int i;
2954
2955 for (i = 0; i < adapter->num_tx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08002956 if (adapter->tx_ring[i]->desc)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002957 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002958}
2959
2960/**
2961 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002962 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
Greg Rose92915f72010-01-09 02:24:10 +00002963 *
2964 * Return 0 on success, negative on failure
2965 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002966int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002967{
Greg Rose92915f72010-01-09 02:24:10 +00002968 int size;
2969
2970 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002971 tx_ring->tx_buffer_info = vzalloc(size);
Greg Rose92915f72010-01-09 02:24:10 +00002972 if (!tx_ring->tx_buffer_info)
2973 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00002974
2975 /* round up to nearest 4K */
2976 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2977 tx_ring->size = ALIGN(tx_ring->size, 4096);
2978
Emil Tantilov05d063a2014-01-17 18:29:59 -08002979 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002980 &tx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00002981 if (!tx_ring->desc)
2982 goto err;
2983
Greg Rose92915f72010-01-09 02:24:10 +00002984 return 0;
2985
2986err:
2987 vfree(tx_ring->tx_buffer_info);
2988 tx_ring->tx_buffer_info = NULL;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002989 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
Greg Rose92915f72010-01-09 02:24:10 +00002990 return -ENOMEM;
2991}
2992
2993/**
2994 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2995 * @adapter: board private structure
2996 *
2997 * If this function returns with an error, then it's possible one or
2998 * more of the rings is populated (while the rest are not). It is the
2999 * callers duty to clean those orphaned rings.
3000 *
3001 * Return 0 on success, negative on failure
3002 **/
3003static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3004{
3005 int i, err = 0;
3006
3007 for (i = 0; i < adapter->num_tx_queues; i++) {
Emil Tantilov05d063a2014-01-17 18:29:59 -08003008 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00003009 if (!err)
3010 continue;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003011 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
Greg Rose92915f72010-01-09 02:24:10 +00003012 break;
3013 }
3014
3015 return err;
3016}
3017
3018/**
3019 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003020 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
Greg Rose92915f72010-01-09 02:24:10 +00003021 *
3022 * Returns 0 on success, negative on failure
3023 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08003024int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00003025{
Greg Rose92915f72010-01-09 02:24:10 +00003026 int size;
3027
3028 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00003029 rx_ring->rx_buffer_info = vzalloc(size);
Joe Perchese404dec2012-01-29 12:56:23 +00003030 if (!rx_ring->rx_buffer_info)
Emil Tantilov05d063a2014-01-17 18:29:59 -08003031 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00003032
3033 /* Round up to nearest 4K */
3034 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3035 rx_ring->size = ALIGN(rx_ring->size, 4096);
3036
Emil Tantilov05d063a2014-01-17 18:29:59 -08003037 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
Nick Nunley2a1f8792010-04-27 13:10:50 +00003038 &rx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00003039
Emil Tantilov05d063a2014-01-17 18:29:59 -08003040 if (!rx_ring->desc)
3041 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00003042
Greg Rose92915f72010-01-09 02:24:10 +00003043 return 0;
Emil Tantilov05d063a2014-01-17 18:29:59 -08003044err:
3045 vfree(rx_ring->rx_buffer_info);
3046 rx_ring->rx_buffer_info = NULL;
3047 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
Greg Rose92915f72010-01-09 02:24:10 +00003048 return -ENOMEM;
3049}
3050
3051/**
3052 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3053 * @adapter: board private structure
3054 *
3055 * If this function returns with an error, then it's possible one or
3056 * more of the rings is populated (while the rest are not). It is the
3057 * callers duty to clean those orphaned rings.
3058 *
3059 * Return 0 on success, negative on failure
3060 **/
3061static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3062{
3063 int i, err = 0;
3064
3065 for (i = 0; i < adapter->num_rx_queues; i++) {
Emil Tantilov05d063a2014-01-17 18:29:59 -08003066 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00003067 if (!err)
3068 continue;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003069 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
Greg Rose92915f72010-01-09 02:24:10 +00003070 break;
3071 }
3072 return err;
3073}
3074
3075/**
3076 * ixgbevf_free_rx_resources - Free Rx Resources
Greg Rose92915f72010-01-09 02:24:10 +00003077 * @rx_ring: ring to clean the resources from
3078 *
3079 * Free all receive software resources
3080 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08003081void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00003082{
Emil Tantilov05d063a2014-01-17 18:29:59 -08003083 ixgbevf_clean_rx_ring(rx_ring);
Greg Rose92915f72010-01-09 02:24:10 +00003084
3085 vfree(rx_ring->rx_buffer_info);
3086 rx_ring->rx_buffer_info = NULL;
3087
Emil Tantilov05d063a2014-01-17 18:29:59 -08003088 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
Nick Nunley2a1f8792010-04-27 13:10:50 +00003089 rx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00003090
3091 rx_ring->desc = NULL;
3092}
3093
3094/**
3095 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3096 * @adapter: board private structure
3097 *
3098 * Free all receive software resources
3099 **/
3100static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3101{
3102 int i;
3103
3104 for (i = 0; i < adapter->num_rx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08003105 if (adapter->rx_ring[i]->desc)
Emil Tantilov05d063a2014-01-17 18:29:59 -08003106 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00003107}
3108
3109/**
3110 * ixgbevf_open - Called when a network interface is made active
3111 * @netdev: network interface device structure
3112 *
3113 * Returns 0 on success, negative value on failure
3114 *
3115 * The open entry point is called when a network interface is made
3116 * active by the system (IFF_UP). At this point all resources needed
3117 * for transmit and receive operations are allocated, the interrupt
3118 * handler is registered with the OS, the watchdog timer is started,
3119 * and the stack is notified that the interface is ready.
3120 **/
3121static int ixgbevf_open(struct net_device *netdev)
3122{
3123 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3124 struct ixgbe_hw *hw = &adapter->hw;
3125 int err;
3126
xunleera1f6c6b2013-03-05 07:44:20 +00003127 /* A previous failure to open the device because of a lack of
3128 * available MSIX vector resources may have reset the number
3129 * of msix vectors variable to zero. The only way to recover
3130 * is to unload/reload the driver and hope that the system has
3131 * been able to recover some MSIX vector resources.
3132 */
3133 if (!adapter->num_msix_vectors)
3134 return -ENOMEM;
3135
Greg Rose92915f72010-01-09 02:24:10 +00003136 if (hw->adapter_stopped) {
3137 ixgbevf_reset(adapter);
3138 /* if adapter is still stopped then PF isn't up and
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003139 * the VF can't start.
3140 */
Greg Rose92915f72010-01-09 02:24:10 +00003141 if (hw->adapter_stopped) {
3142 err = IXGBE_ERR_MBX;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003143 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
Greg Rose92915f72010-01-09 02:24:10 +00003144 goto err_setup_reset;
3145 }
3146 }
3147
Emil Tantilovd9bdb572015-01-28 03:21:18 +00003148 /* disallow open during test */
3149 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3150 return -EBUSY;
3151
3152 netif_carrier_off(netdev);
3153
Greg Rose92915f72010-01-09 02:24:10 +00003154 /* allocate transmit descriptors */
3155 err = ixgbevf_setup_all_tx_resources(adapter);
3156 if (err)
3157 goto err_setup_tx;
3158
3159 /* allocate receive descriptors */
3160 err = ixgbevf_setup_all_rx_resources(adapter);
3161 if (err)
3162 goto err_setup_rx;
3163
3164 ixgbevf_configure(adapter);
3165
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003166 /* Map the Tx/Rx rings to the vectors we were allotted.
Greg Rose92915f72010-01-09 02:24:10 +00003167 * if request_irq will be called in this function map_rings
3168 * must be called *before* up_complete
3169 */
3170 ixgbevf_map_rings_to_vectors(adapter);
3171
Greg Rose92915f72010-01-09 02:24:10 +00003172 err = ixgbevf_request_irq(adapter);
3173 if (err)
3174 goto err_req_irq;
3175
Emil Tantilovd9bdb572015-01-28 03:21:18 +00003176 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003177
3178 return 0;
3179
3180err_req_irq:
3181 ixgbevf_down(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003182err_setup_rx:
3183 ixgbevf_free_all_rx_resources(adapter);
3184err_setup_tx:
3185 ixgbevf_free_all_tx_resources(adapter);
3186 ixgbevf_reset(adapter);
3187
3188err_setup_reset:
3189
3190 return err;
3191}
3192
3193/**
3194 * ixgbevf_close - Disables a network interface
3195 * @netdev: network interface device structure
3196 *
3197 * Returns 0, this is not allowed to fail
3198 *
3199 * The close entry point is called when an interface is de-activated
3200 * by the OS. The hardware is still under the drivers control, but
3201 * needs to be disabled. A global MAC reset is issued to stop the
3202 * hardware, and all transmit and receive resources are freed.
3203 **/
3204static int ixgbevf_close(struct net_device *netdev)
3205{
3206 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3207
3208 ixgbevf_down(adapter);
3209 ixgbevf_free_irq(adapter);
3210
3211 ixgbevf_free_all_tx_resources(adapter);
3212 ixgbevf_free_all_rx_resources(adapter);
3213
3214 return 0;
3215}
3216
Don Skidmore220fe052013-09-21 01:40:49 +00003217static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3218{
3219 struct net_device *dev = adapter->netdev;
3220
3221 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
3222 return;
3223
3224 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
3225
3226 /* if interface is down do nothing */
3227 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3228 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3229 return;
3230
3231 /* Hardware has to reinitialize queues and interrupts to
3232 * match packet buffer alignment. Unfortunately, the
3233 * hardware is not flexible enough to do this dynamically.
3234 */
3235 if (netif_running(dev))
3236 ixgbevf_close(dev);
3237
3238 ixgbevf_clear_interrupt_scheme(adapter);
3239 ixgbevf_init_interrupt_scheme(adapter);
3240
3241 if (netif_running(dev))
3242 ixgbevf_open(dev);
3243}
3244
Alexander Duyck70a10e22012-05-11 08:33:21 +00003245static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3246 u32 vlan_macip_lens, u32 type_tucmd,
3247 u32 mss_l4len_idx)
3248{
3249 struct ixgbe_adv_tx_context_desc *context_desc;
3250 u16 i = tx_ring->next_to_use;
3251
3252 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3253
3254 i++;
3255 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3256
3257 /* set bits to identify this as an advanced context descriptor */
3258 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3259
3260 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3261 context_desc->seqnum_seed = 0;
3262 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3263 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3264}
3265
3266static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003267 struct ixgbevf_tx_buffer *first,
3268 u8 *hdr_len)
Greg Rose92915f72010-01-09 02:24:10 +00003269{
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003270 struct sk_buff *skb = first->skb;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003271 u32 vlan_macip_lens, type_tucmd;
Greg Rose92915f72010-01-09 02:24:10 +00003272 u32 mss_l4len_idx, l4len;
Francois Romieu8f12c032014-03-30 03:14:32 +00003273 int err;
Greg Rose92915f72010-01-09 02:24:10 +00003274
Emil Tantilov01a545c2014-02-27 20:32:45 -08003275 if (skb->ip_summed != CHECKSUM_PARTIAL)
3276 return 0;
3277
Alexander Duyck70a10e22012-05-11 08:33:21 +00003278 if (!skb_is_gso(skb))
3279 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00003280
Francois Romieu8f12c032014-03-30 03:14:32 +00003281 err = skb_cow_head(skb, 0);
3282 if (err < 0)
3283 return err;
Greg Rose92915f72010-01-09 02:24:10 +00003284
Alexander Duyck70a10e22012-05-11 08:33:21 +00003285 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3286 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3287
Toshiaki Makita10e4fb32015-01-29 20:37:10 +09003288 if (first->protocol == htons(ETH_P_IP)) {
Alexander Duyck70a10e22012-05-11 08:33:21 +00003289 struct iphdr *iph = ip_hdr(skb);
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003290
Alexander Duyck70a10e22012-05-11 08:33:21 +00003291 iph->tot_len = 0;
3292 iph->check = 0;
3293 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3294 iph->daddr, 0,
3295 IPPROTO_TCP,
3296 0);
3297 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003298 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3299 IXGBE_TX_FLAGS_CSUM |
3300 IXGBE_TX_FLAGS_IPV4;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003301 } else if (skb_is_gso_v6(skb)) {
3302 ipv6_hdr(skb)->payload_len = 0;
3303 tcp_hdr(skb)->check =
3304 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3305 &ipv6_hdr(skb)->daddr,
3306 0, IPPROTO_TCP, 0);
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003307 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3308 IXGBE_TX_FLAGS_CSUM;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003309 }
3310
3311 /* compute header lengths */
3312 l4len = tcp_hdrlen(skb);
3313 *hdr_len += l4len;
3314 *hdr_len = skb_transport_offset(skb) + l4len;
3315
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003316 /* update GSO size and bytecount with header size */
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003317 first->gso_segs = skb_shinfo(skb)->gso_segs;
3318 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3319
Alexander Duyck70a10e22012-05-11 08:33:21 +00003320 /* mss_l4len_id: use 1 as index for TSO */
3321 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
3322 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3323 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
3324
3325 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3326 vlan_macip_lens = skb_network_header_len(skb);
3327 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003328 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003329
3330 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3331 type_tucmd, mss_l4len_idx);
3332
3333 return 1;
Greg Rose92915f72010-01-09 02:24:10 +00003334}
3335
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003336static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3337 struct ixgbevf_tx_buffer *first)
Greg Rose92915f72010-01-09 02:24:10 +00003338{
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003339 struct sk_buff *skb = first->skb;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003340 u32 vlan_macip_lens = 0;
3341 u32 mss_l4len_idx = 0;
3342 u32 type_tucmd = 0;
Greg Rose92915f72010-01-09 02:24:10 +00003343
Alexander Duyck70a10e22012-05-11 08:33:21 +00003344 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3345 u8 l4_hdr = 0;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003346
Toshiaki Makita10e4fb32015-01-29 20:37:10 +09003347 switch (first->protocol) {
Joe Perches0933ce42014-03-13 05:19:30 +00003348 case htons(ETH_P_IP):
Alexander Duyck70a10e22012-05-11 08:33:21 +00003349 vlan_macip_lens |= skb_network_header_len(skb);
3350 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3351 l4_hdr = ip_hdr(skb)->protocol;
3352 break;
Joe Perches0933ce42014-03-13 05:19:30 +00003353 case htons(ETH_P_IPV6):
Alexander Duyck70a10e22012-05-11 08:33:21 +00003354 vlan_macip_lens |= skb_network_header_len(skb);
3355 l4_hdr = ipv6_hdr(skb)->nexthdr;
3356 break;
3357 default:
3358 if (unlikely(net_ratelimit())) {
3359 dev_warn(tx_ring->dev,
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003360 "partial checksum but proto=%x!\n",
3361 first->protocol);
Greg Rose92915f72010-01-09 02:24:10 +00003362 }
Alexander Duyck70a10e22012-05-11 08:33:21 +00003363 break;
Greg Rose92915f72010-01-09 02:24:10 +00003364 }
3365
Alexander Duyck70a10e22012-05-11 08:33:21 +00003366 switch (l4_hdr) {
3367 case IPPROTO_TCP:
3368 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3369 mss_l4len_idx = tcp_hdrlen(skb) <<
3370 IXGBE_ADVTXD_L4LEN_SHIFT;
3371 break;
3372 case IPPROTO_SCTP:
3373 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3374 mss_l4len_idx = sizeof(struct sctphdr) <<
3375 IXGBE_ADVTXD_L4LEN_SHIFT;
3376 break;
3377 case IPPROTO_UDP:
3378 mss_l4len_idx = sizeof(struct udphdr) <<
3379 IXGBE_ADVTXD_L4LEN_SHIFT;
3380 break;
3381 default:
3382 if (unlikely(net_ratelimit())) {
3383 dev_warn(tx_ring->dev,
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003384 "partial checksum but l4 proto=%x!\n",
3385 l4_hdr);
Alexander Duyck70a10e22012-05-11 08:33:21 +00003386 }
3387 break;
3388 }
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003389
3390 /* update TX checksum flag */
3391 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
Greg Rose92915f72010-01-09 02:24:10 +00003392 }
3393
Alexander Duyck70a10e22012-05-11 08:33:21 +00003394 /* vlan_macip_lens: MACLEN, VLAN tag */
3395 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003396 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003397
3398 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3399 type_tucmd, mss_l4len_idx);
Greg Rose92915f72010-01-09 02:24:10 +00003400}
3401
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003402static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3403{
3404 /* set type for advanced descriptor with frame checksum insertion */
3405 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3406 IXGBE_ADVTXD_DCMD_IFCS |
3407 IXGBE_ADVTXD_DCMD_DEXT);
3408
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003409 /* set HW VLAN bit if VLAN is present */
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003410 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3411 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3412
3413 /* set segmentation enable bits for TSO/FSO */
3414 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3415 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3416
3417 return cmd_type;
3418}
3419
3420static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3421 u32 tx_flags, unsigned int paylen)
3422{
3423 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3424
3425 /* enable L4 checksum for TSO and TX checksum offload */
3426 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3427 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3428
3429 /* enble IPv4 checksum for TSO */
3430 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3431 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3432
3433 /* use index 1 context for TSO/FSO/FCOE */
3434 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3435 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
3436
3437 /* Check Context must be set if Tx switch is enabled, which it
3438 * always is for case where virtual functions are running
3439 */
3440 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3441
3442 tx_desc->read.olinfo_status = olinfo_status;
3443}
3444
3445static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3446 struct ixgbevf_tx_buffer *first,
3447 const u8 hdr_len)
Greg Rose92915f72010-01-09 02:24:10 +00003448{
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003449 dma_addr_t dma;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003450 struct sk_buff *skb = first->skb;
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003451 struct ixgbevf_tx_buffer *tx_buffer;
3452 union ixgbe_adv_tx_desc *tx_desc;
3453 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3454 unsigned int data_len = skb->data_len;
3455 unsigned int size = skb_headlen(skb);
3456 unsigned int paylen = skb->len - hdr_len;
3457 u32 tx_flags = first->tx_flags;
3458 __le32 cmd_type;
3459 u16 i = tx_ring->next_to_use;
Greg Rose92915f72010-01-09 02:24:10 +00003460
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003461 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +00003462
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003463 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3464 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
Greg Rose92915f72010-01-09 02:24:10 +00003465
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003466 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3467 if (dma_mapping_error(tx_ring->dev, dma))
3468 goto dma_error;
3469
3470 /* record length, and DMA address */
3471 dma_unmap_len_set(first, len, size);
3472 dma_unmap_addr_set(first, dma, dma);
3473
3474 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3475
3476 for (;;) {
3477 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3478 tx_desc->read.cmd_type_len =
3479 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3480
3481 i++;
3482 tx_desc++;
3483 if (i == tx_ring->count) {
3484 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3485 i = 0;
3486 }
3487
3488 dma += IXGBE_MAX_DATA_PER_TXD;
3489 size -= IXGBE_MAX_DATA_PER_TXD;
3490
3491 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3492 tx_desc->read.olinfo_status = 0;
3493 }
3494
3495 if (likely(!data_len))
3496 break;
3497
3498 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3499
3500 i++;
3501 tx_desc++;
3502 if (i == tx_ring->count) {
3503 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3504 i = 0;
3505 }
3506
3507 size = skb_frag_size(frag);
3508 data_len -= size;
3509
3510 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3511 DMA_TO_DEVICE);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003512 if (dma_mapping_error(tx_ring->dev, dma))
Greg Rose92915f72010-01-09 02:24:10 +00003513 goto dma_error;
Greg Rose92915f72010-01-09 02:24:10 +00003514
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003515 tx_buffer = &tx_ring->tx_buffer_info[i];
3516 dma_unmap_len_set(tx_buffer, len, size);
3517 dma_unmap_addr_set(tx_buffer, dma, dma);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003518
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003519 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3520 tx_desc->read.olinfo_status = 0;
3521
3522 frag++;
Greg Rose92915f72010-01-09 02:24:10 +00003523 }
3524
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003525 /* write last descriptor with RS and EOP bits */
3526 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3527 tx_desc->read.cmd_type_len = cmd_type;
Greg Rose92915f72010-01-09 02:24:10 +00003528
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003529 /* set the timestamp */
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003530 first->time_stamp = jiffies;
Greg Rose92915f72010-01-09 02:24:10 +00003531
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003532 /* Force memory writes to complete before letting h/w know there
3533 * are new descriptors to fetch. (Only applicable for weak-ordered
3534 * memory model archs, such as IA-64).
3535 *
3536 * We also need this memory barrier (wmb) to make certain all of the
3537 * status bits have been updated before next_to_watch is written.
3538 */
3539 wmb();
Greg Rose92915f72010-01-09 02:24:10 +00003540
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003541 /* set next_to_watch value indicating a packet is present */
3542 first->next_to_watch = tx_desc;
3543
3544 i++;
3545 if (i == tx_ring->count)
3546 i = 0;
3547
3548 tx_ring->next_to_use = i;
3549
3550 /* notify HW of packet */
Mark Rustad06380db2014-03-04 03:02:23 +00003551 ixgbevf_write_tail(tx_ring, i);
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003552
3553 return;
Greg Rose92915f72010-01-09 02:24:10 +00003554dma_error:
Alexander Duyck70a10e22012-05-11 08:33:21 +00003555 dev_err(tx_ring->dev, "TX DMA map failed\n");
Greg Rose92915f72010-01-09 02:24:10 +00003556
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003557 /* clear dma mappings for failed tx_buffer_info map */
3558 for (;;) {
3559 tx_buffer = &tx_ring->tx_buffer_info[i];
3560 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3561 if (tx_buffer == first)
3562 break;
3563 if (i == 0)
3564 i = tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +00003565 i--;
Greg Rose92915f72010-01-09 02:24:10 +00003566 }
3567
Greg Rose92915f72010-01-09 02:24:10 +00003568 tx_ring->next_to_use = i;
Greg Rose92915f72010-01-09 02:24:10 +00003569}
3570
Alexander Duyckfb401952012-05-11 08:33:16 +00003571static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00003572{
Alexander Duyckfb401952012-05-11 08:33:16 +00003573 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +00003574 /* Herbert's original patch had:
3575 * smp_mb__after_netif_stop_queue();
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003576 * but since that doesn't exist yet, just open code it.
3577 */
Greg Rose92915f72010-01-09 02:24:10 +00003578 smp_mb();
3579
3580 /* We need to check again in a case another CPU has just
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003581 * made room available.
3582 */
Don Skidmoref880d072013-10-23 02:17:52 +00003583 if (likely(ixgbevf_desc_unused(tx_ring) < size))
Greg Rose92915f72010-01-09 02:24:10 +00003584 return -EBUSY;
3585
3586 /* A reprieve! - use start_queue because it doesn't call schedule */
Alexander Duyckfb401952012-05-11 08:33:16 +00003587 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
Emil Tantilov095e2612014-01-17 18:30:00 -08003588 ++tx_ring->tx_stats.restart_queue;
3589
Greg Rose92915f72010-01-09 02:24:10 +00003590 return 0;
3591}
3592
Alexander Duyckfb401952012-05-11 08:33:16 +00003593static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00003594{
Don Skidmoref880d072013-10-23 02:17:52 +00003595 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
Greg Rose92915f72010-01-09 02:24:10 +00003596 return 0;
Alexander Duyckfb401952012-05-11 08:33:16 +00003597 return __ixgbevf_maybe_stop_tx(tx_ring, size);
Greg Rose92915f72010-01-09 02:24:10 +00003598}
3599
3600static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3601{
3602 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003603 struct ixgbevf_tx_buffer *first;
Greg Rose92915f72010-01-09 02:24:10 +00003604 struct ixgbevf_ring *tx_ring;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003605 int tso;
3606 u32 tx_flags = 0;
Alexander Duyck35959902012-05-11 08:32:40 +00003607 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3608#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3609 unsigned short f;
3610#endif
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003611 u8 hdr_len = 0;
Greg Rosef9d08f12012-10-02 00:50:52 +00003612 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003613
Ben Hutchings46acc462012-11-01 09:11:11 +00003614 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
Alexander Duycke7fcd542015-05-01 10:34:50 -07003615 dev_kfree_skb_any(skb);
Greg Rosef9d08f12012-10-02 00:50:52 +00003616 return NETDEV_TX_OK;
3617 }
Greg Rose92915f72010-01-09 02:24:10 +00003618
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003619 tx_ring = adapter->tx_ring[skb->queue_mapping];
Greg Rose92915f72010-01-09 02:24:10 +00003620
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003621 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
Alexander Duyck35959902012-05-11 08:32:40 +00003622 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3623 * + 2 desc gap to keep tail from touching head,
3624 * + 1 desc for context descriptor,
3625 * otherwise try next time
3626 */
3627#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3628 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3629 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3630#else
3631 count += skb_shinfo(skb)->nr_frags;
3632#endif
Alexander Duyckfb401952012-05-11 08:33:16 +00003633 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
Emil Tantilov095e2612014-01-17 18:30:00 -08003634 tx_ring->tx_stats.tx_busy++;
Alexander Duyck35959902012-05-11 08:32:40 +00003635 return NETDEV_TX_BUSY;
3636 }
3637
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003638 /* record the location of the first descriptor for this packet */
3639 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3640 first->skb = skb;
3641 first->bytecount = skb->len;
3642 first->gso_segs = 1;
3643
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003644 if (skb_vlan_tag_present(skb)) {
3645 tx_flags |= skb_vlan_tag_get(skb);
Greg Rose92915f72010-01-09 02:24:10 +00003646 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3647 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3648 }
3649
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003650 /* record initial flags and protocol */
3651 first->tx_flags = tx_flags;
3652 first->protocol = vlan_get_protocol(skb);
Greg Rose92915f72010-01-09 02:24:10 +00003653
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003654 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3655 if (tso < 0)
3656 goto out_drop;
Emil Tantilovb5d217f2014-02-27 20:32:44 -08003657 else if (!tso)
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003658 ixgbevf_tx_csum(tx_ring, first);
Greg Rose92915f72010-01-09 02:24:10 +00003659
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003660 ixgbevf_tx_map(tx_ring, first, hdr_len);
Greg Rose92915f72010-01-09 02:24:10 +00003661
Alexander Duyckfb401952012-05-11 08:33:16 +00003662 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
Greg Rose92915f72010-01-09 02:24:10 +00003663
3664 return NETDEV_TX_OK;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003665
3666out_drop:
3667 dev_kfree_skb_any(first->skb);
3668 first->skb = NULL;
3669
3670 return NETDEV_TX_OK;
Greg Rose92915f72010-01-09 02:24:10 +00003671}
3672
3673/**
Greg Rose92915f72010-01-09 02:24:10 +00003674 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3675 * @netdev: network interface device structure
3676 * @p: pointer to an address structure
3677 *
3678 * Returns 0 on success, negative on failure
3679 **/
3680static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3681{
3682 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3683 struct ixgbe_hw *hw = &adapter->hw;
3684 struct sockaddr *addr = p;
3685
3686 if (!is_valid_ether_addr(addr->sa_data))
3687 return -EADDRNOTAVAIL;
3688
3689 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3690 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3691
John Fastabend55fdd45b2012-10-01 14:52:20 +00003692 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003693
Greg Rose92fe0bf2012-11-02 05:50:47 +00003694 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
Greg Rose92915f72010-01-09 02:24:10 +00003695
John Fastabend55fdd45b2012-10-01 14:52:20 +00003696 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003697
Greg Rose92915f72010-01-09 02:24:10 +00003698 return 0;
3699}
3700
3701/**
3702 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3703 * @netdev: network interface device structure
3704 * @new_mtu: new value for maximum frame size
3705 *
3706 * Returns 0 on success, negative on failure
3707 **/
3708static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3709{
3710 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Emil Tantilovbad17232014-11-21 02:57:15 +00003711 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00003712 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Greg Rose69bfbec2011-01-26 01:06:12 +00003713 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
Greg Rose69bfbec2011-01-26 01:06:12 +00003714
Alexander Duyck56e94092012-07-20 08:10:03 +00003715 switch (adapter->hw.api_version) {
3716 case ixgbe_mbox_api_11:
Vlad Zolotarov94cf66f2015-03-30 21:35:26 +03003717 case ixgbe_mbox_api_12:
Greg Rose69bfbec2011-01-26 01:06:12 +00003718 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
Alexander Duyck56e94092012-07-20 08:10:03 +00003719 break;
3720 default:
Emil Tantilov47068b02014-11-22 07:59:56 +00003721 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
Alexander Duyck56e94092012-07-20 08:10:03 +00003722 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3723 break;
3724 }
Greg Rose92915f72010-01-09 02:24:10 +00003725
3726 /* MTU < 68 is an error and causes problems on some kernels */
Greg Rose69bfbec2011-01-26 01:06:12 +00003727 if ((new_mtu < 68) || (max_frame > max_possible_frame))
Greg Rose92915f72010-01-09 02:24:10 +00003728 return -EINVAL;
3729
Emil Tantilovbad17232014-11-21 02:57:15 +00003730 hw_dbg(hw, "changing MTU from %d to %d\n",
Greg Rose92915f72010-01-09 02:24:10 +00003731 netdev->mtu, new_mtu);
3732 /* must set new MTU before calling down or up */
3733 netdev->mtu = new_mtu;
3734
Emil Tantilovbad17232014-11-21 02:57:15 +00003735 /* notify the PF of our intent to use this size of frame */
3736 ixgbevf_rlpml_set_vf(hw, max_frame);
Greg Rose92915f72010-01-09 02:24:10 +00003737
3738 return 0;
3739}
3740
Emil Tantilov688ff322014-11-08 01:39:56 +00003741#ifdef CONFIG_NET_POLL_CONTROLLER
3742/* Polling 'interrupt' - used by things like netconsole to send skbs
3743 * without having to re-enable interrupts. It's not called while
3744 * the interrupt routine is executing.
3745 */
3746static void ixgbevf_netpoll(struct net_device *netdev)
3747{
3748 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3749 int i;
3750
3751 /* if interface is down do nothing */
3752 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3753 return;
3754 for (i = 0; i < adapter->num_rx_queues; i++)
3755 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3756}
3757#endif /* CONFIG_NET_POLL_CONTROLLER */
3758
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003759static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
Greg Rose92915f72010-01-09 02:24:10 +00003760{
3761 struct net_device *netdev = pci_get_drvdata(pdev);
3762 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003763#ifdef CONFIG_PM
3764 int retval = 0;
3765#endif
Greg Rose92915f72010-01-09 02:24:10 +00003766
3767 netif_device_detach(netdev);
3768
3769 if (netif_running(netdev)) {
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003770 rtnl_lock();
Greg Rose92915f72010-01-09 02:24:10 +00003771 ixgbevf_down(adapter);
3772 ixgbevf_free_irq(adapter);
3773 ixgbevf_free_all_tx_resources(adapter);
3774 ixgbevf_free_all_rx_resources(adapter);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003775 rtnl_unlock();
Greg Rose92915f72010-01-09 02:24:10 +00003776 }
3777
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003778 ixgbevf_clear_interrupt_scheme(adapter);
3779
3780#ifdef CONFIG_PM
3781 retval = pci_save_state(pdev);
3782 if (retval)
3783 return retval;
3784
3785#endif
Mark Rustadbc0c7152014-03-12 00:38:45 +00003786 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3787 pci_disable_device(pdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003788
3789 return 0;
3790}
3791
3792#ifdef CONFIG_PM
3793static int ixgbevf_resume(struct pci_dev *pdev)
3794{
Wei Yongjun27ae2962014-01-16 02:30:07 -08003795 struct net_device *netdev = pci_get_drvdata(pdev);
3796 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003797 u32 err;
3798
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003799 pci_restore_state(pdev);
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003800 /* pci_restore_state clears dev->state_saved so call
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003801 * pci_save_state to restore it.
3802 */
Greg Rose92915f72010-01-09 02:24:10 +00003803 pci_save_state(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00003804
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003805 err = pci_enable_device_mem(pdev);
3806 if (err) {
3807 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3808 return err;
3809 }
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003810 smp_mb__before_atomic();
Mark Rustadbc0c7152014-03-12 00:38:45 +00003811 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003812 pci_set_master(pdev);
3813
Don Skidmore798e3812013-10-01 04:33:51 -07003814 ixgbevf_reset(adapter);
3815
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003816 rtnl_lock();
3817 err = ixgbevf_init_interrupt_scheme(adapter);
3818 rtnl_unlock();
3819 if (err) {
3820 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3821 return err;
3822 }
3823
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003824 if (netif_running(netdev)) {
3825 err = ixgbevf_open(netdev);
3826 if (err)
3827 return err;
3828 }
3829
3830 netif_device_attach(netdev);
3831
3832 return err;
3833}
3834
3835#endif /* CONFIG_PM */
3836static void ixgbevf_shutdown(struct pci_dev *pdev)
3837{
3838 ixgbevf_suspend(pdev, PMSG_SUSPEND);
Greg Rose92915f72010-01-09 02:24:10 +00003839}
3840
Eric Dumazet4197aa72011-06-22 05:01:35 +00003841static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3842 struct rtnl_link_stats64 *stats)
3843{
3844 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3845 unsigned int start;
3846 u64 bytes, packets;
3847 const struct ixgbevf_ring *ring;
3848 int i;
3849
3850 ixgbevf_update_stats(adapter);
3851
3852 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3853
3854 for (i = 0; i < adapter->num_rx_queues; i++) {
Don Skidmore87e70ab2014-01-16 02:30:08 -08003855 ring = adapter->rx_ring[i];
Eric Dumazet4197aa72011-06-22 05:01:35 +00003856 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07003857 start = u64_stats_fetch_begin_irq(&ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -08003858 bytes = ring->stats.bytes;
3859 packets = ring->stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07003860 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
Eric Dumazet4197aa72011-06-22 05:01:35 +00003861 stats->rx_bytes += bytes;
3862 stats->rx_packets += packets;
3863 }
3864
3865 for (i = 0; i < adapter->num_tx_queues; i++) {
Don Skidmore87e70ab2014-01-16 02:30:08 -08003866 ring = adapter->tx_ring[i];
Eric Dumazet4197aa72011-06-22 05:01:35 +00003867 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07003868 start = u64_stats_fetch_begin_irq(&ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -08003869 bytes = ring->stats.bytes;
3870 packets = ring->stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07003871 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
Eric Dumazet4197aa72011-06-22 05:01:35 +00003872 stats->tx_bytes += bytes;
3873 stats->tx_packets += packets;
3874 }
3875
3876 return stats;
3877}
3878
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003879static const struct net_device_ops ixgbevf_netdev_ops = {
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003880 .ndo_open = ixgbevf_open,
3881 .ndo_stop = ixgbevf_close,
3882 .ndo_start_xmit = ixgbevf_xmit_frame,
3883 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
Eric Dumazet4197aa72011-06-22 05:01:35 +00003884 .ndo_get_stats64 = ixgbevf_get_stats,
Greg Rose92915f72010-01-09 02:24:10 +00003885 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003886 .ndo_set_mac_address = ixgbevf_set_mac,
3887 .ndo_change_mtu = ixgbevf_change_mtu,
3888 .ndo_tx_timeout = ixgbevf_tx_timeout,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003889 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3890 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
Jacob Kellerc777cdf2013-09-21 06:24:20 +00003891#ifdef CONFIG_NET_RX_BUSY_POLL
3892 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3893#endif
Emil Tantilov688ff322014-11-08 01:39:56 +00003894#ifdef CONFIG_NET_POLL_CONTROLLER
3895 .ndo_poll_controller = ixgbevf_netpoll,
3896#endif
Greg Rose92915f72010-01-09 02:24:10 +00003897};
Greg Rose92915f72010-01-09 02:24:10 +00003898
3899static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3900{
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003901 dev->netdev_ops = &ixgbevf_netdev_ops;
Greg Rose92915f72010-01-09 02:24:10 +00003902 ixgbevf_set_ethtool_ops(dev);
3903 dev->watchdog_timeo = 5 * HZ;
3904}
3905
3906/**
3907 * ixgbevf_probe - Device Initialization Routine
3908 * @pdev: PCI device information struct
3909 * @ent: entry in ixgbevf_pci_tbl
3910 *
3911 * Returns 0 on success, negative on failure
3912 *
3913 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3914 * The OS initialization, configuring of the adapter private structure,
3915 * and a hardware reset occur.
3916 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003917static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Greg Rose92915f72010-01-09 02:24:10 +00003918{
3919 struct net_device *netdev;
3920 struct ixgbevf_adapter *adapter = NULL;
3921 struct ixgbe_hw *hw = NULL;
3922 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
Greg Rose92915f72010-01-09 02:24:10 +00003923 int err, pci_using_dac;
Emil Tantilov03334642014-12-05 04:32:44 +00003924 bool disable_dev = false;
Greg Rose92915f72010-01-09 02:24:10 +00003925
3926 err = pci_enable_device(pdev);
3927 if (err)
3928 return err;
3929
Russell King53567aa2013-06-10 12:49:38 +01003930 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
Greg Rose92915f72010-01-09 02:24:10 +00003931 pci_using_dac = 1;
3932 } else {
Russell King53567aa2013-06-10 12:49:38 +01003933 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Greg Rose92915f72010-01-09 02:24:10 +00003934 if (err) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003935 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
Russell King53567aa2013-06-10 12:49:38 +01003936 goto err_dma;
Greg Rose92915f72010-01-09 02:24:10 +00003937 }
3938 pci_using_dac = 0;
3939 }
3940
3941 err = pci_request_regions(pdev, ixgbevf_driver_name);
3942 if (err) {
3943 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3944 goto err_pci_reg;
3945 }
3946
3947 pci_set_master(pdev);
3948
Greg Rose92915f72010-01-09 02:24:10 +00003949 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3950 MAX_TX_QUEUES);
Greg Rose92915f72010-01-09 02:24:10 +00003951 if (!netdev) {
3952 err = -ENOMEM;
3953 goto err_alloc_etherdev;
3954 }
3955
3956 SET_NETDEV_DEV(netdev, &pdev->dev);
3957
Greg Rose92915f72010-01-09 02:24:10 +00003958 adapter = netdev_priv(netdev);
3959
3960 adapter->netdev = netdev;
3961 adapter->pdev = pdev;
3962 hw = &adapter->hw;
3963 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00003964 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Greg Rose92915f72010-01-09 02:24:10 +00003965
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003966 /* call save state here in standalone driver because it relies on
Greg Rose92915f72010-01-09 02:24:10 +00003967 * adapter struct to exist, and needs to call netdev_priv
3968 */
3969 pci_save_state(pdev);
3970
3971 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3972 pci_resource_len(pdev, 0));
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00003973 adapter->io_addr = hw->hw_addr;
Greg Rose92915f72010-01-09 02:24:10 +00003974 if (!hw->hw_addr) {
3975 err = -EIO;
3976 goto err_ioremap;
3977 }
3978
3979 ixgbevf_assign_netdev_ops(netdev);
3980
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003981 /* Setup HW API */
Greg Rose92915f72010-01-09 02:24:10 +00003982 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3983 hw->mac.type = ii->mac;
3984
3985 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
Greg Rosef416dfc2011-06-08 07:32:38 +00003986 sizeof(struct ixgbe_mbx_operations));
Greg Rose92915f72010-01-09 02:24:10 +00003987
Greg Rose92915f72010-01-09 02:24:10 +00003988 /* setup the private structure */
3989 err = ixgbevf_sw_init(adapter);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00003990 if (err)
3991 goto err_sw_init;
3992
3993 /* The HW MAC address was set and/or determined in sw_init */
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00003994 if (!is_valid_ether_addr(netdev->dev_addr)) {
3995 pr_err("invalid MAC address\n");
3996 err = -EIO;
3997 goto err_sw_init;
3998 }
Greg Rose92915f72010-01-09 02:24:10 +00003999
Michał Mirosław471a76d2011-06-08 08:53:03 +00004000 netdev->hw_features = NETIF_F_SG |
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004001 NETIF_F_IP_CSUM |
4002 NETIF_F_IPV6_CSUM |
4003 NETIF_F_TSO |
4004 NETIF_F_TSO6 |
4005 NETIF_F_RXCSUM;
Michał Mirosław471a76d2011-06-08 08:53:03 +00004006
4007 netdev->features = netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004008 NETIF_F_HW_VLAN_CTAG_TX |
4009 NETIF_F_HW_VLAN_CTAG_RX |
4010 NETIF_F_HW_VLAN_CTAG_FILTER;
Greg Rose92915f72010-01-09 02:24:10 +00004011
Emil Tantilov39f35a32015-01-28 03:21:13 +00004012 netdev->vlan_features |= NETIF_F_TSO |
4013 NETIF_F_TSO6 |
4014 NETIF_F_IP_CSUM |
4015 NETIF_F_IPV6_CSUM |
4016 NETIF_F_SG;
Greg Rose92915f72010-01-09 02:24:10 +00004017
4018 if (pci_using_dac)
4019 netdev->features |= NETIF_F_HIGHDMA;
4020
Jiri Pirko01789342011-08-16 06:29:00 +00004021 netdev->priv_flags |= IFF_UNICAST_FLT;
4022
Mark Rustadea699562014-03-12 00:38:51 +00004023 if (IXGBE_REMOVED(hw->hw_addr)) {
4024 err = -EIO;
4025 goto err_sw_init;
4026 }
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00004027
4028 setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
4029 (unsigned long)adapter);
4030
4031 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4032 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4033 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00004034
4035 err = ixgbevf_init_interrupt_scheme(adapter);
4036 if (err)
4037 goto err_sw_init;
4038
Greg Rose92915f72010-01-09 02:24:10 +00004039 strcpy(netdev->name, "eth%d");
4040
4041 err = register_netdev(netdev);
4042 if (err)
4043 goto err_register;
4044
Emil Tantilov03334642014-12-05 04:32:44 +00004045 pci_set_drvdata(pdev, netdev);
Greg Rose5d426ad2010-11-16 19:27:19 -08004046 netif_carrier_off(netdev);
4047
Greg Rose33bd9f62010-03-19 02:59:52 +00004048 ixgbevf_init_last_counter_stats(adapter);
4049
Emil Tantilov47068b02014-11-22 07:59:56 +00004050 /* print the VF info */
4051 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4052 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
Greg Rose92915f72010-01-09 02:24:10 +00004053
Emil Tantilov47068b02014-11-22 07:59:56 +00004054 switch (hw->mac.type) {
4055 case ixgbe_mac_X550_vf:
4056 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4057 break;
4058 case ixgbe_mac_X540_vf:
4059 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4060 break;
4061 case ixgbe_mac_82599_vf:
4062 default:
4063 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4064 break;
4065 }
Greg Rose92915f72010-01-09 02:24:10 +00004066
Greg Rose92915f72010-01-09 02:24:10 +00004067 return 0;
4068
4069err_register:
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004070 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00004071err_sw_init:
4072 ixgbevf_reset_interrupt_capability(adapter);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00004073 iounmap(adapter->io_addr);
Greg Rose92915f72010-01-09 02:24:10 +00004074err_ioremap:
Emil Tantilov03334642014-12-05 04:32:44 +00004075 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00004076 free_netdev(netdev);
4077err_alloc_etherdev:
4078 pci_release_regions(pdev);
4079err_pci_reg:
4080err_dma:
Emil Tantilov03334642014-12-05 04:32:44 +00004081 if (!adapter || disable_dev)
Mark Rustadbc0c7152014-03-12 00:38:45 +00004082 pci_disable_device(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00004083 return err;
4084}
4085
4086/**
4087 * ixgbevf_remove - Device Removal Routine
4088 * @pdev: PCI device information struct
4089 *
4090 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4091 * that it should release a PCI device. The could be caused by a
4092 * Hot-Plug event, or because the driver is going to be removed from
4093 * memory.
4094 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05004095static void ixgbevf_remove(struct pci_dev *pdev)
Greg Rose92915f72010-01-09 02:24:10 +00004096{
4097 struct net_device *netdev = pci_get_drvdata(pdev);
Emil Tantilov03334642014-12-05 04:32:44 +00004098 struct ixgbevf_adapter *adapter;
4099 bool disable_dev;
4100
4101 if (!netdev)
4102 return;
4103
4104 adapter = netdev_priv(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00004105
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00004106 set_bit(__IXGBEVF_REMOVING, &adapter->state);
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00004107 cancel_work_sync(&adapter->service_task);
Greg Rose92915f72010-01-09 02:24:10 +00004108
Alexander Duyckfd13a9a2012-05-11 08:32:24 +00004109 if (netdev->reg_state == NETREG_REGISTERED)
Greg Rose92915f72010-01-09 02:24:10 +00004110 unregister_netdev(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00004111
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004112 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00004113 ixgbevf_reset_interrupt_capability(adapter);
4114
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00004115 iounmap(adapter->io_addr);
Greg Rose92915f72010-01-09 02:24:10 +00004116 pci_release_regions(pdev);
4117
4118 hw_dbg(&adapter->hw, "Remove complete\n");
4119
Emil Tantilov03334642014-12-05 04:32:44 +00004120 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00004121 free_netdev(netdev);
4122
Emil Tantilov03334642014-12-05 04:32:44 +00004123 if (disable_dev)
Mark Rustadbc0c7152014-03-12 00:38:45 +00004124 pci_disable_device(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00004125}
4126
Alexander Duyck9f19f312012-05-11 08:33:32 +00004127/**
4128 * ixgbevf_io_error_detected - called when PCI error is detected
4129 * @pdev: Pointer to PCI device
4130 * @state: The current pci connection state
4131 *
4132 * This function is called after a PCI bus error affecting
4133 * this device has been detected.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004134 **/
Alexander Duyck9f19f312012-05-11 08:33:32 +00004135static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4136 pci_channel_state_t state)
4137{
4138 struct net_device *netdev = pci_get_drvdata(pdev);
4139 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4140
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00004141 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
Mark Rustadea699562014-03-12 00:38:51 +00004142 return PCI_ERS_RESULT_DISCONNECT;
4143
Mark Rustadbc0c7152014-03-12 00:38:45 +00004144 rtnl_lock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004145 netif_device_detach(netdev);
4146
Mark Rustadbc0c7152014-03-12 00:38:45 +00004147 if (state == pci_channel_io_perm_failure) {
4148 rtnl_unlock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004149 return PCI_ERS_RESULT_DISCONNECT;
Mark Rustadbc0c7152014-03-12 00:38:45 +00004150 }
Alexander Duyck9f19f312012-05-11 08:33:32 +00004151
4152 if (netif_running(netdev))
4153 ixgbevf_down(adapter);
4154
Mark Rustadbc0c7152014-03-12 00:38:45 +00004155 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4156 pci_disable_device(pdev);
4157 rtnl_unlock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004158
4159 /* Request a slot slot reset. */
4160 return PCI_ERS_RESULT_NEED_RESET;
4161}
4162
4163/**
4164 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4165 * @pdev: Pointer to PCI device
4166 *
4167 * Restart the card from scratch, as if from a cold-boot. Implementation
4168 * resembles the first-half of the ixgbevf_resume routine.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004169 **/
Alexander Duyck9f19f312012-05-11 08:33:32 +00004170static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4171{
4172 struct net_device *netdev = pci_get_drvdata(pdev);
4173 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4174
4175 if (pci_enable_device_mem(pdev)) {
4176 dev_err(&pdev->dev,
4177 "Cannot re-enable PCI device after reset.\n");
4178 return PCI_ERS_RESULT_DISCONNECT;
4179 }
4180
Peter Zijlstra4e857c52014-03-17 18:06:10 +01004181 smp_mb__before_atomic();
Mark Rustadbc0c7152014-03-12 00:38:45 +00004182 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
Alexander Duyck9f19f312012-05-11 08:33:32 +00004183 pci_set_master(pdev);
4184
4185 ixgbevf_reset(adapter);
4186
4187 return PCI_ERS_RESULT_RECOVERED;
4188}
4189
4190/**
4191 * ixgbevf_io_resume - called when traffic can start flowing again.
4192 * @pdev: Pointer to PCI device
4193 *
4194 * This callback is called when the error recovery driver tells us that
4195 * its OK to resume normal operation. Implementation resembles the
4196 * second-half of the ixgbevf_resume routine.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004197 **/
Alexander Duyck9f19f312012-05-11 08:33:32 +00004198static void ixgbevf_io_resume(struct pci_dev *pdev)
4199{
4200 struct net_device *netdev = pci_get_drvdata(pdev);
4201 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4202
4203 if (netif_running(netdev))
4204 ixgbevf_up(adapter);
4205
4206 netif_device_attach(netdev);
4207}
4208
4209/* PCI Error Recovery (ERS) */
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004210static const struct pci_error_handlers ixgbevf_err_handler = {
Alexander Duyck9f19f312012-05-11 08:33:32 +00004211 .error_detected = ixgbevf_io_error_detected,
4212 .slot_reset = ixgbevf_io_slot_reset,
4213 .resume = ixgbevf_io_resume,
4214};
4215
Greg Rose92915f72010-01-09 02:24:10 +00004216static struct pci_driver ixgbevf_driver = {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004217 .name = ixgbevf_driver_name,
4218 .id_table = ixgbevf_pci_tbl,
4219 .probe = ixgbevf_probe,
4220 .remove = ixgbevf_remove,
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004221#ifdef CONFIG_PM
4222 /* Power Management Hooks */
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004223 .suspend = ixgbevf_suspend,
4224 .resume = ixgbevf_resume,
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004225#endif
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004226 .shutdown = ixgbevf_shutdown,
4227 .err_handler = &ixgbevf_err_handler
Greg Rose92915f72010-01-09 02:24:10 +00004228};
4229
4230/**
Greg Rose65d676c2011-02-03 06:54:13 +00004231 * ixgbevf_init_module - Driver Registration Routine
Greg Rose92915f72010-01-09 02:24:10 +00004232 *
Greg Rose65d676c2011-02-03 06:54:13 +00004233 * ixgbevf_init_module is the first routine called when the driver is
Greg Rose92915f72010-01-09 02:24:10 +00004234 * loaded. All it does is register with the PCI subsystem.
4235 **/
4236static int __init ixgbevf_init_module(void)
4237{
4238 int ret;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004239
Jeff Kirsherdbd96362011-10-21 19:38:18 +00004240 pr_info("%s - version %s\n", ixgbevf_driver_string,
4241 ixgbevf_driver_version);
Greg Rose92915f72010-01-09 02:24:10 +00004242
Jeff Kirsherdbd96362011-10-21 19:38:18 +00004243 pr_info("%s\n", ixgbevf_copyright);
Greg Rose92915f72010-01-09 02:24:10 +00004244
4245 ret = pci_register_driver(&ixgbevf_driver);
4246 return ret;
4247}
4248
4249module_init(ixgbevf_init_module);
4250
4251/**
Greg Rose65d676c2011-02-03 06:54:13 +00004252 * ixgbevf_exit_module - Driver Exit Cleanup Routine
Greg Rose92915f72010-01-09 02:24:10 +00004253 *
Greg Rose65d676c2011-02-03 06:54:13 +00004254 * ixgbevf_exit_module is called just before the driver is removed
Greg Rose92915f72010-01-09 02:24:10 +00004255 * from memory.
4256 **/
4257static void __exit ixgbevf_exit_module(void)
4258{
4259 pci_unregister_driver(&ixgbevf_driver);
4260}
4261
4262#ifdef DEBUG
4263/**
Greg Rose65d676c2011-02-03 06:54:13 +00004264 * ixgbevf_get_hw_dev_name - return device name string
Greg Rose92915f72010-01-09 02:24:10 +00004265 * used by hardware layer to print debugging information
4266 **/
4267char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4268{
4269 struct ixgbevf_adapter *adapter = hw->back;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004270
Greg Rose92915f72010-01-09 02:24:10 +00004271 return adapter->netdev->name;
4272}
4273
4274#endif
4275module_exit(ixgbevf_exit_module);
4276
4277/* ixgbevf_main.c */