blob: dbbd1be474626f400f178413fa480272ddc40747 [file] [log] [blame]
Greg Rose92915f72010-01-09 02:24:10 +00001/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004 Copyright(c) 1999 - 2015 Intel Corporation.
Greg Rose92915f72010-01-09 02:24:10 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +000016 this program; if not, see <http://www.gnu.org/licenses/>.
Greg Rose92915f72010-01-09 02:24:10 +000017
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26
Greg Rose92915f72010-01-09 02:24:10 +000027/******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29******************************************************************************/
Jeff Kirsherdbd96362011-10-21 19:38:18 +000030
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
Greg Rose92915f72010-01-09 02:24:10 +000033#include <linux/types.h>
Jiri Pirkodadcd652011-07-21 03:25:09 +000034#include <linux/bitops.h>
Greg Rose92915f72010-01-09 02:24:10 +000035#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39#include <linux/string.h>
40#include <linux/in.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
Alexander Duyck70a10e22012-05-11 08:33:21 +000043#include <linux/sctp.h>
Greg Rose92915f72010-01-09 02:24:10 +000044#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090045#include <linux/slab.h>
Greg Rose92915f72010-01-09 02:24:10 +000046#include <net/checksum.h>
47#include <net/ip6_checksum.h>
48#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000049#include <linux/if.h>
Greg Rose92915f72010-01-09 02:24:10 +000050#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040051#include <linux/prefetch.h>
Greg Rose92915f72010-01-09 02:24:10 +000052
53#include "ixgbevf.h"
54
Stephen Hemminger3d8fe982012-01-18 22:13:34 +000055const char ixgbevf_driver_name[] = "ixgbevf";
Greg Rose92915f72010-01-09 02:24:10 +000056static const char ixgbevf_driver_string[] =
Greg Rose422e05d2011-03-12 02:01:29 +000057 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
Greg Rose92915f72010-01-09 02:24:10 +000058
Don Skidmore86f359f2014-01-17 01:21:38 -080059#define DRV_VERSION "2.12.1-k"
Greg Rose92915f72010-01-09 02:24:10 +000060const char ixgbevf_driver_version[] = DRV_VERSION;
Greg Rose66c87bd2010-11-16 19:26:43 -080061static char ixgbevf_copyright[] =
Greg Rose5c47a2b2012-01-06 02:53:30 +000062 "Copyright (c) 2009 - 2012 Intel Corporation.";
Greg Rose92915f72010-01-09 02:24:10 +000063
64static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
Greg Rose2316aa22010-12-02 07:12:26 +000065 [board_82599_vf] = &ixgbevf_82599_vf_info,
66 [board_X540_vf] = &ixgbevf_X540_vf_info,
Emil Tantilov47068b02014-11-22 07:59:56 +000067 [board_X550_vf] = &ixgbevf_X550_vf_info,
68 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
Greg Rose92915f72010-01-09 02:24:10 +000069};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
Benoit Taine9baa3c32014-08-08 15:56:03 +020079static const struct pci_device_id ixgbevf_pci_tbl[] = {
Stephen Hemminger39ba22b2013-02-06 02:37:04 +000080 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
Emil Tantilov47068b02014-11-22 07:59:56 +000082 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
Greg Rose92915f72010-01-09 02:24:10 +000084 /* required last entry */
85 {0, }
86};
87MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
88
89MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
Emil Tantilovb8ce18c2014-04-05 05:39:42 +000090MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
Greg Rose92915f72010-01-09 02:24:10 +000091MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION);
93
stephen hemmingerb3f4d592012-03-13 06:04:20 +000094#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
95static int debug = -1;
96module_param(debug, int, 0);
97MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
Greg Rose92915f72010-01-09 02:24:10 +000098
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +000099static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
100{
101 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
102 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
103 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
104 schedule_work(&adapter->service_task);
105}
106
107static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
108{
109 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
110
111 /* flush memory to make sure state is correct before next watchdog */
112 smp_mb__before_atomic();
113 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
114}
115
Greg Rose92915f72010-01-09 02:24:10 +0000116/* forward decls */
Don Skidmore220fe052013-09-21 01:40:49 +0000117static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000118static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
Alexander Duyck56e94092012-07-20 08:10:03 +0000119static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000120
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000121static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
122{
123 struct ixgbevf_adapter *adapter = hw->back;
124
125 if (!hw->hw_addr)
126 return;
127 hw->hw_addr = NULL;
128 dev_err(&adapter->pdev->dev, "Adapter removed\n");
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000129 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
130 ixgbevf_service_event_schedule(adapter);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000131}
132
133static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
134{
135 u32 value;
136
137 /* The following check not only optimizes a bit by not
138 * performing a read on the status register when the
139 * register just read was a status register read that
140 * returned IXGBE_FAILED_READ_REG. It also blocks any
141 * potential recursion.
142 */
143 if (reg == IXGBE_VFSTATUS) {
144 ixgbevf_remove_adapter(hw);
145 return;
146 }
Mark Rustad32c74942014-03-18 07:03:35 +0000147 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000148 if (value == IXGBE_FAILED_READ_REG)
149 ixgbevf_remove_adapter(hw);
150}
151
Mark Rustad32c74942014-03-18 07:03:35 +0000152u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000153{
154 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
155 u32 value;
156
157 if (IXGBE_REMOVED(reg_addr))
158 return IXGBE_FAILED_READ_REG;
159 value = readl(reg_addr + reg);
160 if (unlikely(value == IXGBE_FAILED_READ_REG))
161 ixgbevf_check_remove(hw, reg);
162 return value;
163}
164
Ben Hutchings49ce9c22012-07-10 10:56:00 +0000165/**
Greg Rose65d676c2011-02-03 06:54:13 +0000166 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
Greg Rose92915f72010-01-09 02:24:10 +0000167 * @adapter: pointer to adapter struct
168 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
169 * @queue: queue to map the corresponding interrupt to
170 * @msix_vector: the vector to map to the corresponding queue
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000171 **/
Greg Rose92915f72010-01-09 02:24:10 +0000172static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
173 u8 queue, u8 msix_vector)
174{
175 u32 ivar, index;
176 struct ixgbe_hw *hw = &adapter->hw;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000177
Greg Rose92915f72010-01-09 02:24:10 +0000178 if (direction == -1) {
179 /* other causes */
180 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
181 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
182 ivar &= ~0xFF;
183 ivar |= msix_vector;
184 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
185 } else {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000186 /* Tx or Rx causes */
Greg Rose92915f72010-01-09 02:24:10 +0000187 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
188 index = ((16 * (queue & 1)) + (8 * direction));
189 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
190 ivar &= ~(0xFF << index);
191 ivar |= (msix_vector << index);
192 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
193 }
194}
195
Alexander Duyck70a10e22012-05-11 08:33:21 +0000196static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800197 struct ixgbevf_tx_buffer *tx_buffer)
Greg Rose92915f72010-01-09 02:24:10 +0000198{
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800199 if (tx_buffer->skb) {
200 dev_kfree_skb_any(tx_buffer->skb);
201 if (dma_unmap_len(tx_buffer, len))
Alexander Duyck70a10e22012-05-11 08:33:21 +0000202 dma_unmap_single(tx_ring->dev,
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800203 dma_unmap_addr(tx_buffer, dma),
204 dma_unmap_len(tx_buffer, len),
Nick Nunley2a1f8792010-04-27 13:10:50 +0000205 DMA_TO_DEVICE);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800206 } else if (dma_unmap_len(tx_buffer, len)) {
207 dma_unmap_page(tx_ring->dev,
208 dma_unmap_addr(tx_buffer, dma),
209 dma_unmap_len(tx_buffer, len),
210 DMA_TO_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000211 }
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800212 tx_buffer->next_to_watch = NULL;
213 tx_buffer->skb = NULL;
214 dma_unmap_len_set(tx_buffer, len, 0);
215 /* tx_buffer must be completely set up in the transmit path */
Greg Rose92915f72010-01-09 02:24:10 +0000216}
217
Emil Tantilove08400b2015-01-28 03:21:24 +0000218static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
219{
220 return ring->stats.packets;
221}
Greg Rose92915f72010-01-09 02:24:10 +0000222
Emil Tantilove08400b2015-01-28 03:21:24 +0000223static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
224{
225 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
226 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +0000227
Emil Tantilove08400b2015-01-28 03:21:24 +0000228 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
229 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
230
231 if (head != tail)
232 return (head < tail) ?
233 tail - head : (tail + ring->count - head);
234
235 return 0;
236}
237
238static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
239{
240 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
241 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
242 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
243
244 clear_check_for_tx_hang(tx_ring);
245
246 /* Check for a hung queue, but be thorough. This verifies
247 * that a transmit has been completed since the previous
248 * check AND there is at least one packet pending. The
249 * ARMED bit is set to indicate a potential hang.
250 */
251 if ((tx_done_old == tx_done) && tx_pending) {
252 /* make sure it is true for two checks in a row */
253 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
254 &tx_ring->state);
255 }
256 /* reset the countdown */
257 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
258
259 /* update completed stats and continue */
260 tx_ring->tx_stats.tx_done_old = tx_done;
261
262 return false;
263}
264
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000265static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
266{
267 /* Do the reset outside of interrupt context */
268 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
269 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
270 ixgbevf_service_event_schedule(adapter);
271 }
272}
273
Emil Tantilove08400b2015-01-28 03:21:24 +0000274/**
275 * ixgbevf_tx_timeout - Respond to a Tx Hang
276 * @netdev: network interface device structure
277 **/
278static void ixgbevf_tx_timeout(struct net_device *netdev)
279{
280 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
281
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000282 ixgbevf_tx_timeout_reset(adapter);
Emil Tantilove08400b2015-01-28 03:21:24 +0000283}
Greg Rose92915f72010-01-09 02:24:10 +0000284
285/**
286 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000287 * @q_vector: board private structure
Greg Rose92915f72010-01-09 02:24:10 +0000288 * @tx_ring: tx ring to clean
289 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000290static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
Greg Rose92915f72010-01-09 02:24:10 +0000291 struct ixgbevf_ring *tx_ring)
292{
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000293 struct ixgbevf_adapter *adapter = q_vector->adapter;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800294 struct ixgbevf_tx_buffer *tx_buffer;
295 union ixgbe_adv_tx_desc *tx_desc;
Greg Rose92915f72010-01-09 02:24:10 +0000296 unsigned int total_bytes = 0, total_packets = 0;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800297 unsigned int budget = tx_ring->count / 2;
298 unsigned int i = tx_ring->next_to_clean;
Greg Rose92915f72010-01-09 02:24:10 +0000299
Alexander Duyck10cc1bd2012-07-16 23:44:48 +0000300 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
301 return true;
302
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800303 tx_buffer = &tx_ring->tx_buffer_info[i];
304 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
305 i -= tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +0000306
Alexander Duycke757e3e2013-01-31 07:43:22 +0000307 do {
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800308 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000309
310 /* if next_to_watch is not set then there is no work pending */
311 if (!eop_desc)
312 break;
313
314 /* prevent any other reads prior to eop_desc */
315 read_barrier_depends();
316
317 /* if DD is not set pending work has not been completed */
318 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
319 break;
320
321 /* clear next_to_watch to prevent false hangs */
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800322 tx_buffer->next_to_watch = NULL;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000323
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800324 /* update the statistics for this packet */
325 total_bytes += tx_buffer->bytecount;
326 total_packets += tx_buffer->gso_segs;
Greg Rose92915f72010-01-09 02:24:10 +0000327
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800328 /* free the skb */
329 dev_kfree_skb_any(tx_buffer->skb);
330
331 /* unmap skb header data */
332 dma_unmap_single(tx_ring->dev,
333 dma_unmap_addr(tx_buffer, dma),
334 dma_unmap_len(tx_buffer, len),
335 DMA_TO_DEVICE);
336
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800337 /* clear tx_buffer data */
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800338 tx_buffer->skb = NULL;
339 dma_unmap_len_set(tx_buffer, len, 0);
Greg Rose92915f72010-01-09 02:24:10 +0000340
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800341 /* unmap remaining buffers */
342 while (tx_desc != eop_desc) {
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800343 tx_buffer++;
344 tx_desc++;
Greg Rose92915f72010-01-09 02:24:10 +0000345 i++;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800346 if (unlikely(!i)) {
347 i -= tx_ring->count;
348 tx_buffer = tx_ring->tx_buffer_info;
349 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
350 }
Alexander Duycke757e3e2013-01-31 07:43:22 +0000351
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800352 /* unmap any remaining paged data */
353 if (dma_unmap_len(tx_buffer, len)) {
354 dma_unmap_page(tx_ring->dev,
355 dma_unmap_addr(tx_buffer, dma),
356 dma_unmap_len(tx_buffer, len),
357 DMA_TO_DEVICE);
358 dma_unmap_len_set(tx_buffer, len, 0);
359 }
Greg Rose92915f72010-01-09 02:24:10 +0000360 }
361
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800362 /* move us one more past the eop_desc for start of next pkt */
363 tx_buffer++;
364 tx_desc++;
365 i++;
366 if (unlikely(!i)) {
367 i -= tx_ring->count;
368 tx_buffer = tx_ring->tx_buffer_info;
369 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
370 }
371
372 /* issue prefetch for next Tx descriptor */
373 prefetch(tx_desc);
374
375 /* update budget accounting */
376 budget--;
377 } while (likely(budget));
378
379 i += tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +0000380 tx_ring->next_to_clean = i;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000381 u64_stats_update_begin(&tx_ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -0800382 tx_ring->stats.bytes += total_bytes;
383 tx_ring->stats.packets += total_packets;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000384 u64_stats_update_end(&tx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +0000385 q_vector->tx.total_bytes += total_bytes;
386 q_vector->tx.total_packets += total_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000387
Emil Tantilove08400b2015-01-28 03:21:24 +0000388 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
389 struct ixgbe_hw *hw = &adapter->hw;
390 union ixgbe_adv_tx_desc *eop_desc;
391
392 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
393
394 pr_err("Detected Tx Unit Hang\n"
395 " Tx Queue <%d>\n"
396 " TDH, TDT <%x>, <%x>\n"
397 " next_to_use <%x>\n"
398 " next_to_clean <%x>\n"
399 "tx_buffer_info[next_to_clean]\n"
400 " next_to_watch <%p>\n"
401 " eop_desc->wb.status <%x>\n"
402 " time_stamp <%lx>\n"
403 " jiffies <%lx>\n",
404 tx_ring->queue_index,
405 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
406 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
407 tx_ring->next_to_use, i,
408 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
409 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
410
411 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
412
413 /* schedule immediate reset if we believe we hung */
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000414 ixgbevf_tx_timeout_reset(adapter);
Emil Tantilove08400b2015-01-28 03:21:24 +0000415
416 return true;
417 }
418
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800419#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
420 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
421 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
422 /* Make sure that anybody stopping the queue after this
423 * sees the new next_to_clean.
424 */
425 smp_mb();
426
427 if (__netif_subqueue_stopped(tx_ring->netdev,
428 tx_ring->queue_index) &&
429 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
430 netif_wake_subqueue(tx_ring->netdev,
431 tx_ring->queue_index);
432 ++tx_ring->tx_stats.restart_queue;
433 }
434 }
435
436 return !!budget;
Greg Rose92915f72010-01-09 02:24:10 +0000437}
438
439/**
Jacob Keller08681612013-09-21 06:24:09 +0000440 * ixgbevf_rx_skb - Helper function to determine proper Rx method
441 * @q_vector: structure containing interrupt and ring information
442 * @skb: packet to send up
Jacob Keller08681612013-09-21 06:24:09 +0000443 **/
444static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
Emil Tantilovdff80522014-11-08 01:39:25 +0000445 struct sk_buff *skb)
Jacob Keller08681612013-09-21 06:24:09 +0000446{
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000447#ifdef CONFIG_NET_RX_BUSY_POLL
448 skb_mark_napi_id(skb, &q_vector->napi);
449
450 if (ixgbevf_qv_busy_polling(q_vector)) {
451 netif_receive_skb(skb);
452 /* exit early if we busy polled */
453 return;
454 }
455#endif /* CONFIG_NET_RX_BUSY_POLL */
Emil Tantilov688ff322014-11-08 01:39:56 +0000456
457 napi_gro_receive(&q_vector->napi, skb);
Jacob Keller08681612013-09-21 06:24:09 +0000458}
459
Fan Du1e1429d2015-04-29 10:57:40 +0800460#define IXGBE_RSS_L4_TYPES_MASK \
461 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
462 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
463 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
464 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
465
466static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
467 union ixgbe_adv_rx_desc *rx_desc,
468 struct sk_buff *skb)
469{
470 u16 rss_type;
471
472 if (!(ring->netdev->features & NETIF_F_RXHASH))
473 return;
474
475 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
476 IXGBE_RXDADV_RSSTYPE_MASK;
477
478 if (!rss_type)
479 return;
480
481 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
482 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
483 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
484}
485
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000486/**
487 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
Emil Tantilovec62fe22014-11-08 01:39:20 +0000488 * @ring: structure containig ring specific data
489 * @rx_desc: current Rx descriptor being processed
Greg Rose92915f72010-01-09 02:24:10 +0000490 * @skb: skb currently being received and modified
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000491 **/
Greg Rose55fb2772012-11-06 05:53:32 +0000492static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
Emil Tantilovec62fe22014-11-08 01:39:20 +0000493 union ixgbe_adv_rx_desc *rx_desc,
494 struct sk_buff *skb)
Greg Rose92915f72010-01-09 02:24:10 +0000495{
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700496 skb_checksum_none_assert(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000497
498 /* Rx csum disabled */
Alexander Duyckfb401952012-05-11 08:33:16 +0000499 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Greg Rose92915f72010-01-09 02:24:10 +0000500 return;
501
502 /* if IP and error */
Emil Tantilovec62fe22014-11-08 01:39:20 +0000503 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
504 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
Emil Tantilov095e2612014-01-17 18:30:00 -0800505 ring->rx_stats.csum_err++;
Greg Rose92915f72010-01-09 02:24:10 +0000506 return;
507 }
508
Emil Tantilovec62fe22014-11-08 01:39:20 +0000509 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
Greg Rose92915f72010-01-09 02:24:10 +0000510 return;
511
Emil Tantilovec62fe22014-11-08 01:39:20 +0000512 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
Emil Tantilov095e2612014-01-17 18:30:00 -0800513 ring->rx_stats.csum_err++;
Greg Rose92915f72010-01-09 02:24:10 +0000514 return;
515 }
516
517 /* It must be a TCP or UDP packet with a valid checksum */
518 skb->ip_summed = CHECKSUM_UNNECESSARY;
Greg Rose92915f72010-01-09 02:24:10 +0000519}
520
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000521/**
522 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
Emil Tantilovdff80522014-11-08 01:39:25 +0000523 * @rx_ring: rx descriptor ring packet is being transacted on
524 * @rx_desc: pointer to the EOP Rx descriptor
525 * @skb: pointer to current skb being populated
526 *
527 * This function checks the ring, descriptor, and packet information in
528 * order to populate the checksum, VLAN, protocol, and other fields within
529 * the skb.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000530 **/
Emil Tantilovdff80522014-11-08 01:39:25 +0000531static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
532 union ixgbe_adv_rx_desc *rx_desc,
533 struct sk_buff *skb)
534{
Fan Du1e1429d2015-04-29 10:57:40 +0800535 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
Emil Tantilovdff80522014-11-08 01:39:25 +0000536 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
537
538 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
539 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
540 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
541
542 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
543 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
544 }
545
546 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
547}
548
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000549/**
550 * ixgbevf_is_non_eop - process handling of non-EOP buffers
551 * @rx_ring: Rx ring being processed
552 * @rx_desc: Rx descriptor for current buffer
553 * @skb: current socket buffer containing buffer in progress
554 *
555 * This function updates next to clean. If the buffer is an EOP buffer
556 * this function exits returning false, otherwise it will place the
557 * sk_buff in the next buffer to be chained and return true indicating
558 * that this is in fact a non-EOP buffer.
559 **/
560static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
Emil Tantilovbad17232014-11-21 02:57:15 +0000561 union ixgbe_adv_rx_desc *rx_desc)
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000562{
563 u32 ntc = rx_ring->next_to_clean + 1;
564
565 /* fetch, update, and store next to clean */
566 ntc = (ntc < rx_ring->count) ? ntc : 0;
567 rx_ring->next_to_clean = ntc;
568
569 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
570
571 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
572 return false;
573
574 return true;
575}
576
Emil Tantilovbad17232014-11-21 02:57:15 +0000577static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
578 struct ixgbevf_rx_buffer *bi)
Emil Tantilovbafa5782014-11-08 01:39:15 +0000579{
Emil Tantilovbad17232014-11-21 02:57:15 +0000580 struct page *page = bi->page;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000581 dma_addr_t dma = bi->dma;
582
Emil Tantilovbad17232014-11-21 02:57:15 +0000583 /* since we are recycling buffers we should seldom need to alloc */
584 if (likely(page))
Emil Tantilovbafa5782014-11-08 01:39:15 +0000585 return true;
586
Emil Tantilovbad17232014-11-21 02:57:15 +0000587 /* alloc new page for storage */
588 page = dev_alloc_page();
589 if (unlikely(!page)) {
590 rx_ring->rx_stats.alloc_rx_page_failed++;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000591 return false;
592 }
593
Emil Tantilovbad17232014-11-21 02:57:15 +0000594 /* map page for use */
595 dma = dma_map_page(rx_ring->dev, page, 0,
596 PAGE_SIZE, DMA_FROM_DEVICE);
Emil Tantilovbafa5782014-11-08 01:39:15 +0000597
598 /* if mapping failed free memory back to system since
599 * there isn't much point in holding memory we can't use
600 */
601 if (dma_mapping_error(rx_ring->dev, dma)) {
Emil Tantilovbad17232014-11-21 02:57:15 +0000602 __free_page(page);
Emil Tantilovbafa5782014-11-08 01:39:15 +0000603
604 rx_ring->rx_stats.alloc_rx_buff_failed++;
605 return false;
606 }
607
Emil Tantilovbafa5782014-11-08 01:39:15 +0000608 bi->dma = dma;
Emil Tantilovbad17232014-11-21 02:57:15 +0000609 bi->page = page;
610 bi->page_offset = 0;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000611
612 return true;
613}
614
Greg Rose92915f72010-01-09 02:24:10 +0000615/**
616 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
Emil Tantilov095e2612014-01-17 18:30:00 -0800617 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
Emil Tantilovbafa5782014-11-08 01:39:15 +0000618 * @cleaned_count: number of buffers to replace
Greg Rose92915f72010-01-09 02:24:10 +0000619 **/
Emil Tantilov095e2612014-01-17 18:30:00 -0800620static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
Emil Tantilovbafa5782014-11-08 01:39:15 +0000621 u16 cleaned_count)
Greg Rose92915f72010-01-09 02:24:10 +0000622{
Greg Rose92915f72010-01-09 02:24:10 +0000623 union ixgbe_adv_rx_desc *rx_desc;
624 struct ixgbevf_rx_buffer *bi;
Alexander Duyckfb401952012-05-11 08:33:16 +0000625 unsigned int i = rx_ring->next_to_use;
Greg Rose92915f72010-01-09 02:24:10 +0000626
Emil Tantilovbafa5782014-11-08 01:39:15 +0000627 /* nothing to do or no valid netdev defined */
628 if (!cleaned_count || !rx_ring->netdev)
629 return;
Greg Roseb9dd2452012-11-02 05:50:21 +0000630
Emil Tantilovbafa5782014-11-08 01:39:15 +0000631 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
632 bi = &rx_ring->rx_buffer_info[i];
633 i -= rx_ring->count;
Greg Roseb9dd2452012-11-02 05:50:21 +0000634
Emil Tantilovbafa5782014-11-08 01:39:15 +0000635 do {
Emil Tantilovbad17232014-11-21 02:57:15 +0000636 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
Emil Tantilovbafa5782014-11-08 01:39:15 +0000637 break;
Emil Tantilov05d063a2014-01-17 18:29:59 -0800638
Emil Tantilovbafa5782014-11-08 01:39:15 +0000639 /* Refresh the desc even if pkt_addr didn't change
640 * because each write-back erases this info.
641 */
Emil Tantilovbad17232014-11-21 02:57:15 +0000642 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Greg Rose92915f72010-01-09 02:24:10 +0000643
Emil Tantilovbafa5782014-11-08 01:39:15 +0000644 rx_desc++;
645 bi++;
Greg Rose92915f72010-01-09 02:24:10 +0000646 i++;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000647 if (unlikely(!i)) {
648 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
649 bi = rx_ring->rx_buffer_info;
650 i -= rx_ring->count;
651 }
Greg Rose92915f72010-01-09 02:24:10 +0000652
Emil Tantilovbafa5782014-11-08 01:39:15 +0000653 /* clear the hdr_addr for the next_to_use descriptor */
654 rx_desc->read.hdr_addr = 0;
655
656 cleaned_count--;
657 } while (cleaned_count);
658
659 i += rx_ring->count;
660
661 if (rx_ring->next_to_use != i) {
662 /* record the next descriptor to use */
663 rx_ring->next_to_use = i;
664
Emil Tantilovbad17232014-11-21 02:57:15 +0000665 /* update next to alloc since we have filled the ring */
666 rx_ring->next_to_alloc = i;
667
Emil Tantilovbafa5782014-11-08 01:39:15 +0000668 /* Force memory writes to complete before letting h/w
669 * know there are new descriptors to fetch. (Only
670 * applicable for weak-ordered memory model archs,
671 * such as IA-64).
672 */
673 wmb();
674 ixgbevf_write_tail(rx_ring, i);
675 }
Greg Rose92915f72010-01-09 02:24:10 +0000676}
677
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000678/**
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000679 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
Emil Tantilovbad17232014-11-21 02:57:15 +0000680 * @rx_ring: rx descriptor ring packet is being transacted on
681 * @rx_desc: pointer to the EOP Rx descriptor
682 * @skb: pointer to current skb being fixed
683 *
684 * Check for corrupted packet headers caused by senders on the local L2
685 * embedded NIC switch not setting up their Tx Descriptors right. These
686 * should be very rare.
687 *
688 * Also address the case where we are pulling data in on pages only
689 * and as such no data is present in the skb header.
690 *
691 * In addition if skb is not at least 60 bytes we need to pad it so that
692 * it is large enough to qualify as a valid Ethernet frame.
693 *
694 * Returns true if an error was encountered and skb was freed.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000695 **/
Emil Tantilovbad17232014-11-21 02:57:15 +0000696static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
697 union ixgbe_adv_rx_desc *rx_desc,
698 struct sk_buff *skb)
699{
700 /* verify that the packet does not have any known errors */
701 if (unlikely(ixgbevf_test_staterr(rx_desc,
702 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
703 struct net_device *netdev = rx_ring->netdev;
704
705 if (!(netdev->features & NETIF_F_RXALL)) {
706 dev_kfree_skb_any(skb);
707 return true;
708 }
709 }
710
Alexander Duycka94d9e22014-12-03 08:17:39 -0800711 /* if eth_skb_pad returns an error the skb was freed */
712 if (eth_skb_pad(skb))
713 return true;
Emil Tantilovbad17232014-11-21 02:57:15 +0000714
715 return false;
716}
717
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000718/**
719 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
Emil Tantilovbad17232014-11-21 02:57:15 +0000720 * @rx_ring: rx descriptor ring to store buffers on
721 * @old_buff: donor buffer to have page reused
722 *
723 * Synchronizes page for reuse by the adapter
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000724 **/
Emil Tantilovbad17232014-11-21 02:57:15 +0000725static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
726 struct ixgbevf_rx_buffer *old_buff)
727{
728 struct ixgbevf_rx_buffer *new_buff;
729 u16 nta = rx_ring->next_to_alloc;
730
731 new_buff = &rx_ring->rx_buffer_info[nta];
732
733 /* update, and store next to alloc */
734 nta++;
735 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
736
737 /* transfer page from old buffer to new buffer */
738 new_buff->page = old_buff->page;
739 new_buff->dma = old_buff->dma;
740 new_buff->page_offset = old_buff->page_offset;
741
742 /* sync the buffer for use by the device */
743 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
744 new_buff->page_offset,
745 IXGBEVF_RX_BUFSZ,
746 DMA_FROM_DEVICE);
747}
748
749static inline bool ixgbevf_page_is_reserved(struct page *page)
750{
Michal Hocko2f064f32015-08-21 14:11:51 -0700751 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
Emil Tantilovbad17232014-11-21 02:57:15 +0000752}
753
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000754/**
755 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
Emil Tantilovbad17232014-11-21 02:57:15 +0000756 * @rx_ring: rx descriptor ring to transact packets on
757 * @rx_buffer: buffer containing page to add
758 * @rx_desc: descriptor containing length of buffer written by hardware
759 * @skb: sk_buff to place the data into
760 *
761 * This function will add the data contained in rx_buffer->page to the skb.
762 * This is done either through a direct copy if the data in the buffer is
763 * less than the skb header size, otherwise it will just attach the page as
764 * a frag to the skb.
765 *
766 * The function will then update the page offset if necessary and return
767 * true if the buffer can be reused by the adapter.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000768 **/
Emil Tantilovbad17232014-11-21 02:57:15 +0000769static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
770 struct ixgbevf_rx_buffer *rx_buffer,
771 union ixgbe_adv_rx_desc *rx_desc,
772 struct sk_buff *skb)
773{
774 struct page *page = rx_buffer->page;
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700775 unsigned char *va = page_address(page) + rx_buffer->page_offset;
Emil Tantilovbad17232014-11-21 02:57:15 +0000776 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
777#if (PAGE_SIZE < 8192)
778 unsigned int truesize = IXGBEVF_RX_BUFSZ;
779#else
780 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
781#endif
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700782 unsigned int pull_len;
Emil Tantilovbad17232014-11-21 02:57:15 +0000783
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700784 if (unlikely(skb_is_nonlinear(skb)))
785 goto add_tail_frag;
Emil Tantilovbad17232014-11-21 02:57:15 +0000786
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700787 if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
Emil Tantilovbad17232014-11-21 02:57:15 +0000788 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
789
790 /* page is not reserved, we can reuse buffer as is */
791 if (likely(!ixgbevf_page_is_reserved(page)))
792 return true;
793
794 /* this page cannot be reused so discard it */
795 put_page(page);
796 return false;
797 }
798
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700799 /* we need the header to contain the greater of either ETH_HLEN or
800 * 60 bytes if the skb->len is less than 60 for skb_pad.
801 */
802 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
803
804 /* align pull length to size of long to optimize memcpy performance */
805 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
806
807 /* update all of the pointers */
808 va += pull_len;
809 size -= pull_len;
810
811add_tail_frag:
Emil Tantilovbad17232014-11-21 02:57:15 +0000812 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700813 (unsigned long)va & ~PAGE_MASK, size, truesize);
Emil Tantilovbad17232014-11-21 02:57:15 +0000814
815 /* avoid re-using remote pages */
816 if (unlikely(ixgbevf_page_is_reserved(page)))
817 return false;
818
819#if (PAGE_SIZE < 8192)
820 /* if we are only owner of page we can reuse it */
821 if (unlikely(page_count(page) != 1))
822 return false;
823
824 /* flip page offset to other buffer */
825 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
826
827#else
828 /* move offset up to the next cache line */
829 rx_buffer->page_offset += truesize;
830
831 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
832 return false;
833
834#endif
835 /* Even if we own the page, we are not allowed to use atomic_set()
836 * This would break get_page_unless_zero() users.
837 */
838 atomic_inc(&page->_count);
839
840 return true;
841}
842
843static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
844 union ixgbe_adv_rx_desc *rx_desc,
845 struct sk_buff *skb)
846{
847 struct ixgbevf_rx_buffer *rx_buffer;
848 struct page *page;
849
850 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
851 page = rx_buffer->page;
852 prefetchw(page);
853
854 if (likely(!skb)) {
855 void *page_addr = page_address(page) +
856 rx_buffer->page_offset;
857
858 /* prefetch first cache line of first page */
859 prefetch(page_addr);
860#if L1_CACHE_BYTES < 128
861 prefetch(page_addr + L1_CACHE_BYTES);
862#endif
863
864 /* allocate a skb to store the frags */
865 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
866 IXGBEVF_RX_HDR_SIZE);
867 if (unlikely(!skb)) {
868 rx_ring->rx_stats.alloc_rx_buff_failed++;
869 return NULL;
870 }
871
872 /* we will be copying header into skb->data in
873 * pskb_may_pull so it is in our interest to prefetch
874 * it now to avoid a possible cache miss
875 */
876 prefetchw(skb->data);
877 }
878
879 /* we are reusing so sync this buffer for CPU use */
880 dma_sync_single_range_for_cpu(rx_ring->dev,
881 rx_buffer->dma,
882 rx_buffer->page_offset,
883 IXGBEVF_RX_BUFSZ,
884 DMA_FROM_DEVICE);
885
886 /* pull page into skb */
887 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
888 /* hand second half of page back to the ring */
889 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
890 } else {
891 /* we are not reusing the buffer so unmap it */
892 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
893 PAGE_SIZE, DMA_FROM_DEVICE);
894 }
895
896 /* clear contents of buffer_info */
897 rx_buffer->dma = 0;
898 rx_buffer->page = NULL;
899
900 return skb;
901}
902
Greg Rose92915f72010-01-09 02:24:10 +0000903static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000904 u32 qmask)
Greg Rose92915f72010-01-09 02:24:10 +0000905{
Greg Rose92915f72010-01-09 02:24:10 +0000906 struct ixgbe_hw *hw = &adapter->hw;
907
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000908 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
Greg Rose92915f72010-01-09 02:24:10 +0000909}
910
Jacob Keller08e50a22013-09-21 06:24:14 +0000911static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
912 struct ixgbevf_ring *rx_ring,
913 int budget)
Greg Rose92915f72010-01-09 02:24:10 +0000914{
Greg Rose92915f72010-01-09 02:24:10 +0000915 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000916 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
Emil Tantilovbad17232014-11-21 02:57:15 +0000917 struct sk_buff *skb = rx_ring->skb;
Greg Rose92915f72010-01-09 02:24:10 +0000918
Emil Tantilov66224022014-11-08 01:39:51 +0000919 while (likely(total_rx_packets < budget)) {
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000920 union ixgbe_adv_rx_desc *rx_desc;
Emil Tantilovb97fe3b2014-11-08 01:39:30 +0000921
Emil Tantilov0579eef2014-11-08 01:39:35 +0000922 /* return some buffers to hardware, one at a time is too slow */
923 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
924 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
925 cleaned_count = 0;
926 }
927
Emil Tantilovbad17232014-11-21 02:57:15 +0000928 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
Emil Tantilov0579eef2014-11-08 01:39:35 +0000929
930 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
Greg Rose92915f72010-01-09 02:24:10 +0000931 break;
Greg Rose92915f72010-01-09 02:24:10 +0000932
Emil Tantilov0579eef2014-11-08 01:39:35 +0000933 /* This memory barrier is needed to keep us from reading
934 * any other fields out of the rx_desc until we know the
935 * RXD_STAT_DD bit is set
936 */
937 rmb();
Emil Tantilovec62fe22014-11-08 01:39:20 +0000938
Emil Tantilovbad17232014-11-21 02:57:15 +0000939 /* retrieve a buffer from the ring */
940 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000941
Emil Tantilovbad17232014-11-21 02:57:15 +0000942 /* exit if we failed to retrieve a buffer */
943 if (!skb)
944 break;
Greg Rose92915f72010-01-09 02:24:10 +0000945
Emil Tantilovb97fe3b2014-11-08 01:39:30 +0000946 cleaned_count++;
947
Emil Tantilovbad17232014-11-21 02:57:15 +0000948 /* fetch next buffer in frame if non-eop */
949 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
Emil Tantilov0579eef2014-11-08 01:39:35 +0000950 continue;
Greg Rose92915f72010-01-09 02:24:10 +0000951
Emil Tantilovbad17232014-11-21 02:57:15 +0000952 /* verify the packet layout is correct */
953 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
954 skb = NULL;
Emil Tantilov0579eef2014-11-08 01:39:35 +0000955 continue;
Greg Rose92915f72010-01-09 02:24:10 +0000956 }
957
Greg Rose92915f72010-01-09 02:24:10 +0000958 /* probably a little skewed due to removing CRC */
959 total_rx_bytes += skb->len;
Greg Rose92915f72010-01-09 02:24:10 +0000960
John Fastabend815cccb2012-10-24 08:13:09 +0000961 /* Workaround hardware that can't do proper VEPA multicast
962 * source pruning.
963 */
Florian Fainellibd9d5592014-02-28 15:46:49 -0800964 if ((skb->pkt_type == PACKET_BROADCAST ||
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000965 skb->pkt_type == PACKET_MULTICAST) &&
Emil Tantilov095e2612014-01-17 18:30:00 -0800966 ether_addr_equal(rx_ring->netdev->dev_addr,
Joe Perches7367d0b2013-09-01 11:51:23 -0700967 eth_hdr(skb)->h_source)) {
John Fastabend815cccb2012-10-24 08:13:09 +0000968 dev_kfree_skb_irq(skb);
Emil Tantilov0579eef2014-11-08 01:39:35 +0000969 continue;
John Fastabend815cccb2012-10-24 08:13:09 +0000970 }
971
Emil Tantilovdff80522014-11-08 01:39:25 +0000972 /* populate checksum, VLAN, and protocol */
973 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
974
975 ixgbevf_rx_skb(q_vector, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000976
Emil Tantilovbad17232014-11-21 02:57:15 +0000977 /* reset skb pointer */
978 skb = NULL;
979
Emil Tantilov0579eef2014-11-08 01:39:35 +0000980 /* update budget accounting */
Emil Tantilov66224022014-11-08 01:39:51 +0000981 total_rx_packets++;
982 }
Greg Rose92915f72010-01-09 02:24:10 +0000983
Emil Tantilovbad17232014-11-21 02:57:15 +0000984 /* place incomplete frames back on ring for completion */
985 rx_ring->skb = skb;
986
Eric Dumazet4197aa72011-06-22 05:01:35 +0000987 u64_stats_update_begin(&rx_ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -0800988 rx_ring->stats.packets += total_rx_packets;
989 rx_ring->stats.bytes += total_rx_bytes;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000990 u64_stats_update_end(&rx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +0000991 q_vector->rx.total_packets += total_rx_packets;
992 q_vector->rx.total_bytes += total_rx_bytes;
Greg Rose92915f72010-01-09 02:24:10 +0000993
Jacob Keller08e50a22013-09-21 06:24:14 +0000994 return total_rx_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000995}
996
997/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000998 * ixgbevf_poll - NAPI polling calback
Greg Rose92915f72010-01-09 02:24:10 +0000999 * @napi: napi struct with our devices info in it
1000 * @budget: amount of work driver is allowed to do this pass, in packets
1001 *
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001002 * This function will clean more than one or more rings associated with a
Greg Rose92915f72010-01-09 02:24:10 +00001003 * q_vector.
1004 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001005static int ixgbevf_poll(struct napi_struct *napi, int budget)
Greg Rose92915f72010-01-09 02:24:10 +00001006{
1007 struct ixgbevf_q_vector *q_vector =
1008 container_of(napi, struct ixgbevf_q_vector, napi);
1009 struct ixgbevf_adapter *adapter = q_vector->adapter;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001010 struct ixgbevf_ring *ring;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001011 int per_ring_budget, work_done = 0;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001012 bool clean_complete = true;
1013
1014 ixgbevf_for_each_ring(ring, q_vector->tx)
1015 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
Greg Rose92915f72010-01-09 02:24:10 +00001016
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001017#ifdef CONFIG_NET_RX_BUSY_POLL
1018 if (!ixgbevf_qv_lock_napi(q_vector))
1019 return budget;
1020#endif
1021
Greg Rose92915f72010-01-09 02:24:10 +00001022 /* attempt to distribute budget to each queue fairly, but don't allow
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001023 * the budget to go below 1 because we'll exit polling
1024 */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001025 if (q_vector->rx.count > 1)
1026 per_ring_budget = max(budget/q_vector->rx.count, 1);
1027 else
1028 per_ring_budget = budget;
Greg Rose92915f72010-01-09 02:24:10 +00001029
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001030 ixgbevf_for_each_ring(ring, q_vector->rx) {
1031 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1032 per_ring_budget);
1033 work_done += cleaned;
1034 clean_complete &= (cleaned < per_ring_budget);
1035 }
Greg Rose92915f72010-01-09 02:24:10 +00001036
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001037#ifdef CONFIG_NET_RX_BUSY_POLL
1038 ixgbevf_qv_unlock_napi(q_vector);
1039#endif
1040
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001041 /* If all work not completed, return budget and keep polling */
1042 if (!clean_complete)
1043 return budget;
1044 /* all work done, exit the polling mode */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001045 napi_complete_done(napi, work_done);
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001046 if (adapter->rx_itr_setting & 1)
1047 ixgbevf_set_itr(q_vector);
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00001048 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1049 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001050 ixgbevf_irq_enable_queues(adapter,
1051 1 << q_vector->v_idx);
Greg Rose92915f72010-01-09 02:24:10 +00001052
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001053 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00001054}
1055
Greg Rosece422602012-05-22 02:17:49 +00001056/**
1057 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1058 * @q_vector: structure containing interrupt and ring information
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001059 **/
Jacob Keller38496232013-10-22 06:19:18 +00001060void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
Greg Rosece422602012-05-22 02:17:49 +00001061{
1062 struct ixgbevf_adapter *adapter = q_vector->adapter;
1063 struct ixgbe_hw *hw = &adapter->hw;
1064 int v_idx = q_vector->v_idx;
1065 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1066
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001067 /* set the WDIS bit to not clear the timer bits and cause an
Greg Rosece422602012-05-22 02:17:49 +00001068 * immediate assertion of the interrupt
1069 */
1070 itr_reg |= IXGBE_EITR_CNT_WDIS;
1071
1072 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1073}
Greg Rose92915f72010-01-09 02:24:10 +00001074
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001075#ifdef CONFIG_NET_RX_BUSY_POLL
1076/* must be called with local_bh_disable()d */
1077static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
1078{
1079 struct ixgbevf_q_vector *q_vector =
1080 container_of(napi, struct ixgbevf_q_vector, napi);
1081 struct ixgbevf_adapter *adapter = q_vector->adapter;
1082 struct ixgbevf_ring *ring;
1083 int found = 0;
1084
1085 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
1086 return LL_FLUSH_FAILED;
1087
1088 if (!ixgbevf_qv_lock_poll(q_vector))
1089 return LL_FLUSH_BUSY;
1090
1091 ixgbevf_for_each_ring(ring, q_vector->rx) {
1092 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
Jacob Keller3b5dca22013-09-21 06:24:25 +00001093#ifdef BP_EXTENDED_STATS
1094 if (found)
Emil Tantilov095e2612014-01-17 18:30:00 -08001095 ring->stats.cleaned += found;
Jacob Keller3b5dca22013-09-21 06:24:25 +00001096 else
Emil Tantilov095e2612014-01-17 18:30:00 -08001097 ring->stats.misses++;
Jacob Keller3b5dca22013-09-21 06:24:25 +00001098#endif
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001099 if (found)
1100 break;
1101 }
1102
1103 ixgbevf_qv_unlock_poll(q_vector);
1104
1105 return found;
1106}
1107#endif /* CONFIG_NET_RX_BUSY_POLL */
1108
Greg Rose92915f72010-01-09 02:24:10 +00001109/**
1110 * ixgbevf_configure_msix - Configure MSI-X hardware
1111 * @adapter: board private structure
1112 *
1113 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1114 * interrupts.
1115 **/
1116static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1117{
1118 struct ixgbevf_q_vector *q_vector;
Alexander Duyck6b43c442012-05-11 08:32:45 +00001119 int q_vectors, v_idx;
Greg Rose92915f72010-01-09 02:24:10 +00001120
1121 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001122 adapter->eims_enable_mask = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001123
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001124 /* Populate the IVAR table and set the ITR values to the
Greg Rose92915f72010-01-09 02:24:10 +00001125 * corresponding register.
1126 */
1127 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck6b43c442012-05-11 08:32:45 +00001128 struct ixgbevf_ring *ring;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001129
Greg Rose92915f72010-01-09 02:24:10 +00001130 q_vector = adapter->q_vector[v_idx];
Greg Rose92915f72010-01-09 02:24:10 +00001131
Alexander Duyck6b43c442012-05-11 08:32:45 +00001132 ixgbevf_for_each_ring(ring, q_vector->rx)
1133 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +00001134
Alexander Duyck6b43c442012-05-11 08:32:45 +00001135 ixgbevf_for_each_ring(ring, q_vector->tx)
1136 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +00001137
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001138 if (q_vector->tx.ring && !q_vector->rx.ring) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001139 /* Tx only vector */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001140 if (adapter->tx_itr_setting == 1)
Alexander Duyck8a9ca112015-09-29 13:11:15 -07001141 q_vector->itr = IXGBE_12K_ITR;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001142 else
1143 q_vector->itr = adapter->tx_itr_setting;
1144 } else {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001145 /* Rx or Rx/Tx vector */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001146 if (adapter->rx_itr_setting == 1)
1147 q_vector->itr = IXGBE_20K_ITR;
1148 else
1149 q_vector->itr = adapter->rx_itr_setting;
1150 }
Greg Rose92915f72010-01-09 02:24:10 +00001151
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001152 /* add q_vector eims value to global eims_enable_mask */
1153 adapter->eims_enable_mask |= 1 << v_idx;
1154
1155 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001156 }
1157
1158 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001159 /* setup eims_other and add value to global eims_enable_mask */
1160 adapter->eims_other = 1 << v_idx;
1161 adapter->eims_enable_mask |= adapter->eims_other;
Greg Rose92915f72010-01-09 02:24:10 +00001162}
1163
1164enum latency_range {
1165 lowest_latency = 0,
1166 low_latency = 1,
1167 bulk_latency = 2,
1168 latency_invalid = 255
1169};
1170
1171/**
1172 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001173 * @q_vector: structure containing interrupt and ring information
1174 * @ring_container: structure containing ring performance data
Greg Rose92915f72010-01-09 02:24:10 +00001175 *
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001176 * Stores a new ITR value based on packets and byte
1177 * counts during the last interrupt. The advantage of per interrupt
1178 * computation is faster updates and more accurate ITR for the current
1179 * traffic pattern. Constants in this function were computed
1180 * based on theoretical maximum wire speed and thresholds were set based
1181 * on testing data as well as attempting to minimize response time
1182 * while increasing bulk throughput.
Greg Rose92915f72010-01-09 02:24:10 +00001183 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001184static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1185 struct ixgbevf_ring_container *ring_container)
Greg Rose92915f72010-01-09 02:24:10 +00001186{
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001187 int bytes = ring_container->total_bytes;
1188 int packets = ring_container->total_packets;
Greg Rose92915f72010-01-09 02:24:10 +00001189 u32 timepassed_us;
1190 u64 bytes_perint;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001191 u8 itr_setting = ring_container->itr;
Greg Rose92915f72010-01-09 02:24:10 +00001192
1193 if (packets == 0)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001194 return;
Greg Rose92915f72010-01-09 02:24:10 +00001195
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001196 /* simple throttle rate management
Greg Rose92915f72010-01-09 02:24:10 +00001197 * 0-20MB/s lowest (100000 ints/s)
1198 * 20-100MB/s low (20000 ints/s)
Alexander Duyck8a9ca112015-09-29 13:11:15 -07001199 * 100-1249MB/s bulk (12000 ints/s)
Greg Rose92915f72010-01-09 02:24:10 +00001200 */
1201 /* what was last interrupt timeslice? */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001202 timepassed_us = q_vector->itr >> 2;
Greg Rose92915f72010-01-09 02:24:10 +00001203 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1204
1205 switch (itr_setting) {
1206 case lowest_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001207 if (bytes_perint > 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001208 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001209 break;
1210 case low_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001211 if (bytes_perint > 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001212 itr_setting = bulk_latency;
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001213 else if (bytes_perint <= 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001214 itr_setting = lowest_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001215 break;
1216 case bulk_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001217 if (bytes_perint <= 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001218 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001219 break;
1220 }
1221
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001222 /* clear work counters since we have the values we need */
1223 ring_container->total_bytes = 0;
1224 ring_container->total_packets = 0;
1225
1226 /* write updated itr to ring container */
1227 ring_container->itr = itr_setting;
Greg Rose92915f72010-01-09 02:24:10 +00001228}
1229
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001230static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
Greg Rose92915f72010-01-09 02:24:10 +00001231{
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001232 u32 new_itr = q_vector->itr;
1233 u8 current_itr;
Greg Rose92915f72010-01-09 02:24:10 +00001234
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001235 ixgbevf_update_itr(q_vector, &q_vector->tx);
1236 ixgbevf_update_itr(q_vector, &q_vector->rx);
Greg Rose92915f72010-01-09 02:24:10 +00001237
Alexander Duyck6b43c442012-05-11 08:32:45 +00001238 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Greg Rose92915f72010-01-09 02:24:10 +00001239
1240 switch (current_itr) {
1241 /* counts and packets in update_itr are dependent on these numbers */
1242 case lowest_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001243 new_itr = IXGBE_100K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001244 break;
1245 case low_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001246 new_itr = IXGBE_20K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001247 break;
1248 case bulk_latency:
1249 default:
Alexander Duyck8a9ca112015-09-29 13:11:15 -07001250 new_itr = IXGBE_12K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001251 break;
1252 }
1253
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001254 if (new_itr != q_vector->itr) {
Greg Rose92915f72010-01-09 02:24:10 +00001255 /* do an exponential smoothing */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001256 new_itr = (10 * new_itr * q_vector->itr) /
1257 ((9 * new_itr) + q_vector->itr);
1258
1259 /* save the algorithm value here */
1260 q_vector->itr = new_itr;
1261
1262 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001263 }
Greg Rose92915f72010-01-09 02:24:10 +00001264}
1265
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001266static irqreturn_t ixgbevf_msix_other(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +00001267{
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001268 struct ixgbevf_adapter *adapter = data;
Greg Rose92915f72010-01-09 02:24:10 +00001269 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001270
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001271 hw->mac.get_link_status = 1;
Greg Rose375b27c2012-01-18 22:13:31 +00001272
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00001273 ixgbevf_service_event_schedule(adapter);
Greg Rose3a2c4032012-02-01 01:28:15 +00001274
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001275 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1276
Greg Rose92915f72010-01-09 02:24:10 +00001277 return IRQ_HANDLED;
1278}
1279
Greg Rose92915f72010-01-09 02:24:10 +00001280/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001281 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
Greg Rose92915f72010-01-09 02:24:10 +00001282 * @irq: unused
1283 * @data: pointer to our q_vector struct for this interrupt vector
1284 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001285static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +00001286{
1287 struct ixgbevf_q_vector *q_vector = data;
Greg Rose92915f72010-01-09 02:24:10 +00001288
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001289 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001290 if (q_vector->rx.ring || q_vector->tx.ring)
Alexander Duyckef2662b2015-09-29 15:19:43 -07001291 napi_schedule_irqoff(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001292
1293 return IRQ_HANDLED;
1294}
1295
1296static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1297 int r_idx)
1298{
1299 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1300
Don Skidmore87e70ab2014-01-16 02:30:08 -08001301 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1302 q_vector->rx.ring = a->rx_ring[r_idx];
Alexander Duyck6b43c442012-05-11 08:32:45 +00001303 q_vector->rx.count++;
Greg Rose92915f72010-01-09 02:24:10 +00001304}
1305
1306static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1307 int t_idx)
1308{
1309 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1310
Don Skidmore87e70ab2014-01-16 02:30:08 -08001311 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1312 q_vector->tx.ring = a->tx_ring[t_idx];
Alexander Duyck6b43c442012-05-11 08:32:45 +00001313 q_vector->tx.count++;
Greg Rose92915f72010-01-09 02:24:10 +00001314}
1315
1316/**
1317 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1318 * @adapter: board private structure to initialize
1319 *
1320 * This function maps descriptor rings to the queue-specific vectors
1321 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1322 * one vector per ring/queue, but on a constrained vector budget, we
1323 * group the rings as "efficiently" as possible. You would add new
1324 * mapping configurations in here.
1325 **/
1326static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1327{
1328 int q_vectors;
1329 int v_start = 0;
1330 int rxr_idx = 0, txr_idx = 0;
1331 int rxr_remaining = adapter->num_rx_queues;
1332 int txr_remaining = adapter->num_tx_queues;
1333 int i, j;
1334 int rqpv, tqpv;
1335 int err = 0;
1336
1337 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1338
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001339 /* The ideal configuration...
Greg Rose92915f72010-01-09 02:24:10 +00001340 * We have enough vectors to map one per queue.
1341 */
1342 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1343 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1344 map_vector_to_rxq(adapter, v_start, rxr_idx);
1345
1346 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1347 map_vector_to_txq(adapter, v_start, txr_idx);
1348 goto out;
1349 }
1350
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001351 /* If we don't have enough vectors for a 1-to-1
Greg Rose92915f72010-01-09 02:24:10 +00001352 * mapping, we'll have to group them so there are
1353 * multiple queues per vector.
1354 */
1355 /* Re-adjusting *qpv takes care of the remainder. */
1356 for (i = v_start; i < q_vectors; i++) {
1357 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1358 for (j = 0; j < rqpv; j++) {
1359 map_vector_to_rxq(adapter, i, rxr_idx);
1360 rxr_idx++;
1361 rxr_remaining--;
1362 }
1363 }
1364 for (i = v_start; i < q_vectors; i++) {
1365 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1366 for (j = 0; j < tqpv; j++) {
1367 map_vector_to_txq(adapter, i, txr_idx);
1368 txr_idx++;
1369 txr_remaining--;
1370 }
1371 }
1372
1373out:
1374 return err;
1375}
1376
1377/**
1378 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1379 * @adapter: board private structure
1380 *
1381 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1382 * interrupts from the kernel.
1383 **/
1384static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1385{
1386 struct net_device *netdev = adapter->netdev;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001387 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1388 int vector, err;
Greg Rose92915f72010-01-09 02:24:10 +00001389 int ri = 0, ti = 0;
1390
Greg Rose92915f72010-01-09 02:24:10 +00001391 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001392 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1393 struct msix_entry *entry = &adapter->msix_entries[vector];
Greg Rose92915f72010-01-09 02:24:10 +00001394
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001395 if (q_vector->tx.ring && q_vector->rx.ring) {
1396 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1397 "%s-%s-%d", netdev->name, "TxRx", ri++);
1398 ti++;
1399 } else if (q_vector->rx.ring) {
1400 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1401 "%s-%s-%d", netdev->name, "rx", ri++);
1402 } else if (q_vector->tx.ring) {
1403 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1404 "%s-%s-%d", netdev->name, "tx", ti++);
Greg Rose92915f72010-01-09 02:24:10 +00001405 } else {
1406 /* skip this unused q_vector */
1407 continue;
1408 }
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001409 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1410 q_vector->name, q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001411 if (err) {
1412 hw_dbg(&adapter->hw,
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001413 "request_irq failed for MSIX interrupt Error: %d\n",
1414 err);
Greg Rose92915f72010-01-09 02:24:10 +00001415 goto free_queue_irqs;
1416 }
1417 }
1418
Greg Rose92915f72010-01-09 02:24:10 +00001419 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001420 &ixgbevf_msix_other, 0, netdev->name, adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001421 if (err) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001422 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1423 err);
Greg Rose92915f72010-01-09 02:24:10 +00001424 goto free_queue_irqs;
1425 }
1426
1427 return 0;
1428
1429free_queue_irqs:
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001430 while (vector) {
1431 vector--;
1432 free_irq(adapter->msix_entries[vector].vector,
1433 adapter->q_vector[vector]);
1434 }
xunleera1f6c6b2013-03-05 07:44:20 +00001435 /* This failure is non-recoverable - it indicates the system is
1436 * out of MSIX vector resources and the VF driver cannot run
1437 * without them. Set the number of msix vectors to zero
1438 * indicating that not enough can be allocated. The error
1439 * will be returned to the user indicating device open failed.
1440 * Any further attempts to force the driver to open will also
1441 * fail. The only way to recover is to unload the driver and
1442 * reload it again. If the system has recovered some MSIX
1443 * vectors then it may succeed.
1444 */
1445 adapter->num_msix_vectors = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001446 return err;
1447}
1448
1449static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1450{
1451 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1452
1453 for (i = 0; i < q_vectors; i++) {
1454 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001455
Alexander Duyck6b43c442012-05-11 08:32:45 +00001456 q_vector->rx.ring = NULL;
1457 q_vector->tx.ring = NULL;
1458 q_vector->rx.count = 0;
1459 q_vector->tx.count = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001460 }
1461}
1462
1463/**
1464 * ixgbevf_request_irq - initialize interrupts
1465 * @adapter: board private structure
1466 *
1467 * Attempts to configure interrupts using the best available
1468 * capabilities of the hardware and kernel.
1469 **/
1470static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1471{
1472 int err = 0;
1473
1474 err = ixgbevf_request_msix_irqs(adapter);
1475
1476 if (err)
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001477 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +00001478
1479 return err;
1480}
1481
1482static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1483{
Greg Rose92915f72010-01-09 02:24:10 +00001484 int i, q_vectors;
1485
1486 q_vectors = adapter->num_msix_vectors;
Greg Rose92915f72010-01-09 02:24:10 +00001487 i = q_vectors - 1;
1488
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001489 free_irq(adapter->msix_entries[i].vector, adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001490 i--;
1491
1492 for (; i >= 0; i--) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001493 /* free only the irqs that were actually requested */
1494 if (!adapter->q_vector[i]->rx.ring &&
1495 !adapter->q_vector[i]->tx.ring)
1496 continue;
1497
Greg Rose92915f72010-01-09 02:24:10 +00001498 free_irq(adapter->msix_entries[i].vector,
1499 adapter->q_vector[i]);
1500 }
1501
1502 ixgbevf_reset_q_vectors(adapter);
1503}
1504
1505/**
1506 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1507 * @adapter: board private structure
1508 **/
1509static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1510{
Greg Rose92915f72010-01-09 02:24:10 +00001511 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001512 int i;
Greg Rose92915f72010-01-09 02:24:10 +00001513
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001514 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001515 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001516 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001517
1518 IXGBE_WRITE_FLUSH(hw);
1519
1520 for (i = 0; i < adapter->num_msix_vectors; i++)
1521 synchronize_irq(adapter->msix_entries[i].vector);
1522}
1523
1524/**
1525 * ixgbevf_irq_enable - Enable default interrupt generation settings
1526 * @adapter: board private structure
1527 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001528static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001529{
1530 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001531
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001532 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1533 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1534 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
Greg Rose92915f72010-01-09 02:24:10 +00001535}
1536
1537/**
Don Skidmorede02dec2014-01-16 02:30:09 -08001538 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1539 * @adapter: board private structure
1540 * @ring: structure containing ring specific data
1541 *
1542 * Configure the Tx descriptor ring after a reset.
1543 **/
1544static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1545 struct ixgbevf_ring *ring)
1546{
1547 struct ixgbe_hw *hw = &adapter->hw;
1548 u64 tdba = ring->dma;
1549 int wait_loop = 10;
1550 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1551 u8 reg_idx = ring->reg_idx;
1552
1553 /* disable queue to avoid issues while updating state */
1554 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1555 IXGBE_WRITE_FLUSH(hw);
1556
1557 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1558 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1559 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1560 ring->count * sizeof(union ixgbe_adv_tx_desc));
1561
1562 /* disable head writeback */
1563 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1564 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1565
1566 /* enable relaxed ordering */
1567 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1568 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1569 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1570
1571 /* reset head and tail pointers */
1572 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1573 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00001574 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
Don Skidmorede02dec2014-01-16 02:30:09 -08001575
1576 /* reset ntu and ntc to place SW in sync with hardwdare */
1577 ring->next_to_clean = 0;
1578 ring->next_to_use = 0;
1579
1580 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1581 * to or less than the number of on chip descriptors, which is
1582 * currently 40.
1583 */
1584 txdctl |= (8 << 16); /* WTHRESH = 8 */
1585
1586 /* Setting PTHRESH to 32 both improves performance */
1587 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1588 32; /* PTHRESH = 32 */
1589
Emil Tantilove08400b2015-01-28 03:21:24 +00001590 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1591
Don Skidmorede02dec2014-01-16 02:30:09 -08001592 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1593
1594 /* poll to verify queue is enabled */
1595 do {
1596 usleep_range(1000, 2000);
1597 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1598 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1599 if (!wait_loop)
1600 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1601}
1602
1603/**
Greg Rose92915f72010-01-09 02:24:10 +00001604 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1605 * @adapter: board private structure
1606 *
1607 * Configure the Tx unit of the MAC after a reset.
1608 **/
1609static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1610{
Don Skidmorede02dec2014-01-16 02:30:09 -08001611 u32 i;
Greg Rose92915f72010-01-09 02:24:10 +00001612
1613 /* Setup the HW Tx Head and Tail descriptor pointers */
Don Skidmorede02dec2014-01-16 02:30:09 -08001614 for (i = 0; i < adapter->num_tx_queues; i++)
1615 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00001616}
1617
1618#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1619
1620static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1621{
Greg Rose92915f72010-01-09 02:24:10 +00001622 struct ixgbe_hw *hw = &adapter->hw;
1623 u32 srrctl;
1624
Greg Rose92915f72010-01-09 02:24:10 +00001625 srrctl = IXGBE_SRRCTL_DROP_EN;
1626
Emil Tantilovbad17232014-11-21 02:57:15 +00001627 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1628 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck77d5dfc2012-05-11 08:32:19 +00001629 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Greg Rose92915f72010-01-09 02:24:10 +00001630
Greg Rose92915f72010-01-09 02:24:10 +00001631 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1632}
1633
Don Skidmore1bb9c632013-09-21 01:57:33 +00001634static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1635{
1636 struct ixgbe_hw *hw = &adapter->hw;
1637
1638 /* PSRTYPE must be initialized in 82599 */
1639 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1640 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1641 IXGBE_PSRTYPE_L2HDR;
1642
1643 if (adapter->num_rx_queues > 1)
1644 psrtype |= 1 << 29;
1645
1646 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1647}
1648
Don Skidmorede02dec2014-01-16 02:30:09 -08001649#define IXGBEVF_MAX_RX_DESC_POLL 10
1650static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1651 struct ixgbevf_ring *ring)
1652{
1653 struct ixgbe_hw *hw = &adapter->hw;
1654 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1655 u32 rxdctl;
1656 u8 reg_idx = ring->reg_idx;
1657
Mark Rustad26597802014-03-04 03:02:45 +00001658 if (IXGBE_REMOVED(hw->hw_addr))
1659 return;
Don Skidmorede02dec2014-01-16 02:30:09 -08001660 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1661 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1662
1663 /* write value back with RXDCTL.ENABLE bit cleared */
1664 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1665
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001666 /* the hardware may take up to 100us to really disable the Rx queue */
Don Skidmorede02dec2014-01-16 02:30:09 -08001667 do {
1668 udelay(10);
1669 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1670 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1671
1672 if (!wait_loop)
1673 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1674 reg_idx);
1675}
1676
1677static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1678 struct ixgbevf_ring *ring)
1679{
1680 struct ixgbe_hw *hw = &adapter->hw;
1681 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1682 u32 rxdctl;
1683 u8 reg_idx = ring->reg_idx;
1684
Mark Rustad26597802014-03-04 03:02:45 +00001685 if (IXGBE_REMOVED(hw->hw_addr))
1686 return;
Don Skidmorede02dec2014-01-16 02:30:09 -08001687 do {
1688 usleep_range(1000, 2000);
1689 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1690 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1691
1692 if (!wait_loop)
1693 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1694 reg_idx);
1695}
1696
Emil Tantilov9295edb2014-12-06 09:19:09 +00001697static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1698{
1699 struct ixgbe_hw *hw = &adapter->hw;
1700 u32 vfmrqc = 0, vfreta = 0;
Emil Tantilov9295edb2014-12-06 09:19:09 +00001701 u16 rss_i = adapter->num_rx_queues;
Emil Tantilov9cba4342015-04-30 11:50:55 -07001702 u8 i, j;
Emil Tantilov9295edb2014-12-06 09:19:09 +00001703
1704 /* Fill out hash function seeds */
Emil Tantilov9cba4342015-04-30 11:50:55 -07001705 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
1706 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1707 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
Emil Tantilov9295edb2014-12-06 09:19:09 +00001708
Emil Tantilov9cba4342015-04-30 11:50:55 -07001709 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
Emil Tantilov9295edb2014-12-06 09:19:09 +00001710 if (j == rss_i)
1711 j = 0;
Emil Tantilov9cba4342015-04-30 11:50:55 -07001712
1713 adapter->rss_indir_tbl[i] = j;
1714
1715 vfreta |= j << (i & 0x3) * 8;
1716 if ((i & 3) == 3) {
Emil Tantilov9295edb2014-12-06 09:19:09 +00001717 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
Emil Tantilov9cba4342015-04-30 11:50:55 -07001718 vfreta = 0;
1719 }
Emil Tantilov9295edb2014-12-06 09:19:09 +00001720 }
1721
1722 /* Perform hash on these packet types */
1723 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1724 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1725 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1726 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1727
1728 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1729
1730 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1731}
1732
Don Skidmorede02dec2014-01-16 02:30:09 -08001733static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1734 struct ixgbevf_ring *ring)
1735{
1736 struct ixgbe_hw *hw = &adapter->hw;
1737 u64 rdba = ring->dma;
1738 u32 rxdctl;
1739 u8 reg_idx = ring->reg_idx;
1740
1741 /* disable queue to avoid issues while updating state */
1742 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1743 ixgbevf_disable_rx_queue(adapter, ring);
1744
1745 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1746 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1747 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1748 ring->count * sizeof(union ixgbe_adv_rx_desc));
1749
1750 /* enable relaxed ordering */
1751 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1752 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1753
1754 /* reset head and tail pointers */
1755 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1756 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00001757 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
Don Skidmorede02dec2014-01-16 02:30:09 -08001758
1759 /* reset ntu and ntc to place SW in sync with hardwdare */
1760 ring->next_to_clean = 0;
1761 ring->next_to_use = 0;
Emil Tantilovbad17232014-11-21 02:57:15 +00001762 ring->next_to_alloc = 0;
Don Skidmorede02dec2014-01-16 02:30:09 -08001763
1764 ixgbevf_configure_srrctl(adapter, reg_idx);
1765
Emil Tantilovbad17232014-11-21 02:57:15 +00001766 /* allow any size packet since we can handle overflow */
1767 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1768
Don Skidmorede02dec2014-01-16 02:30:09 -08001769 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1770 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1771
1772 ixgbevf_rx_desc_queue_enable(adapter, ring);
Emil Tantilov095e2612014-01-17 18:30:00 -08001773 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
Don Skidmorede02dec2014-01-16 02:30:09 -08001774}
1775
Greg Rose92915f72010-01-09 02:24:10 +00001776/**
1777 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1778 * @adapter: board private structure
1779 *
1780 * Configure the Rx unit of the MAC after a reset.
1781 **/
1782static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1783{
Don Skidmorede02dec2014-01-16 02:30:09 -08001784 int i;
Emil Tantilovbad17232014-11-21 02:57:15 +00001785 struct ixgbe_hw *hw = &adapter->hw;
1786 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00001787
Don Skidmore1bb9c632013-09-21 01:57:33 +00001788 ixgbevf_setup_psrtype(adapter);
Emil Tantilov9295edb2014-12-06 09:19:09 +00001789 if (hw->mac.type >= ixgbe_mac_X550_vf)
1790 ixgbevf_setup_vfmrqc(adapter);
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001791
Emil Tantilovbad17232014-11-21 02:57:15 +00001792 /* notify the PF of our intent to use this size of frame */
1793 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
Greg Rose92915f72010-01-09 02:24:10 +00001794
Greg Rose92915f72010-01-09 02:24:10 +00001795 /* Setup the HW Rx Head and Tail Descriptor Pointers and
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001796 * the Base and Length of the Rx Descriptor Ring
1797 */
Don Skidmorede02dec2014-01-16 02:30:09 -08001798 for (i = 0; i < adapter->num_rx_queues; i++)
1799 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00001800}
1801
Patrick McHardy80d5c362013-04-19 02:04:28 +00001802static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1803 __be16 proto, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001804{
1805 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1806 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001807 int err;
1808
John Fastabend55fdd45b2012-10-01 14:52:20 +00001809 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001810
Greg Rose92915f72010-01-09 02:24:10 +00001811 /* add VID to filter table */
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001812 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001813
John Fastabend55fdd45b2012-10-01 14:52:20 +00001814 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001815
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001816 /* translate error return types so error makes sense */
1817 if (err == IXGBE_ERR_MBX)
1818 return -EIO;
1819
1820 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1821 return -EACCES;
1822
Jiri Pirkodadcd652011-07-21 03:25:09 +00001823 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001824
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001825 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001826}
1827
Patrick McHardy80d5c362013-04-19 02:04:28 +00001828static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1829 __be16 proto, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001830{
1831 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1832 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001833 int err = -EOPNOTSUPP;
Greg Rose92915f72010-01-09 02:24:10 +00001834
John Fastabend55fdd45b2012-10-01 14:52:20 +00001835 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001836
Greg Rose92915f72010-01-09 02:24:10 +00001837 /* remove VID from filter table */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001838 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001839
John Fastabend55fdd45b2012-10-01 14:52:20 +00001840 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001841
Jiri Pirkodadcd652011-07-21 03:25:09 +00001842 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001843
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001844 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001845}
1846
1847static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1848{
Jiri Pirkodadcd652011-07-21 03:25:09 +00001849 u16 vid;
Greg Rose92915f72010-01-09 02:24:10 +00001850
Jiri Pirkodadcd652011-07-21 03:25:09 +00001851 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
Patrick McHardy80d5c362013-04-19 02:04:28 +00001852 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1853 htons(ETH_P_8021Q), vid);
Greg Rose92915f72010-01-09 02:24:10 +00001854}
1855
Greg Rose46ec20f2011-05-13 01:33:42 +00001856static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1857{
1858 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1859 struct ixgbe_hw *hw = &adapter->hw;
1860 int count = 0;
1861
1862 if ((netdev_uc_count(netdev)) > 10) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00001863 pr_err("Too many unicast filters - No Space\n");
Greg Rose46ec20f2011-05-13 01:33:42 +00001864 return -ENOSPC;
1865 }
1866
1867 if (!netdev_uc_empty(netdev)) {
1868 struct netdev_hw_addr *ha;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001869
Greg Rose46ec20f2011-05-13 01:33:42 +00001870 netdev_for_each_uc_addr(ha, netdev) {
1871 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1872 udelay(200);
1873 }
1874 } else {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001875 /* If the list is empty then send message to PF driver to
1876 * clear all MAC VLANs on this VF.
Greg Rose46ec20f2011-05-13 01:33:42 +00001877 */
1878 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1879 }
1880
1881 return count;
1882}
1883
Greg Rose92915f72010-01-09 02:24:10 +00001884/**
Greg Rosedee847f2012-11-02 05:50:57 +00001885 * ixgbevf_set_rx_mode - Multicast and unicast set
Greg Rose92915f72010-01-09 02:24:10 +00001886 * @netdev: network interface device structure
1887 *
1888 * The set_rx_method entry point is called whenever the multicast address
Greg Rosedee847f2012-11-02 05:50:57 +00001889 * list, unicast address list or the network interface flags are updated.
1890 * This routine is responsible for configuring the hardware for proper
1891 * multicast mode and configuring requested unicast filters.
Greg Rose92915f72010-01-09 02:24:10 +00001892 **/
1893static void ixgbevf_set_rx_mode(struct net_device *netdev)
1894{
1895 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1896 struct ixgbe_hw *hw = &adapter->hw;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001897 unsigned int flags = netdev->flags;
1898 int xcast_mode;
1899
1900 xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
1901 (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
1902 IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
Greg Rose92915f72010-01-09 02:24:10 +00001903
John Fastabend55fdd45b2012-10-01 14:52:20 +00001904 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001905
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001906 hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
1907
Greg Rose92915f72010-01-09 02:24:10 +00001908 /* reprogram multicast list */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001909 hw->mac.ops.update_mc_addr_list(hw, netdev);
Greg Rose46ec20f2011-05-13 01:33:42 +00001910
1911 ixgbevf_write_uc_addr_list(netdev);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001912
John Fastabend55fdd45b2012-10-01 14:52:20 +00001913 spin_unlock_bh(&adapter->mbx_lock);
Greg Rose92915f72010-01-09 02:24:10 +00001914}
1915
1916static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1917{
1918 int q_idx;
1919 struct ixgbevf_q_vector *q_vector;
1920 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1921
1922 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Greg Rose92915f72010-01-09 02:24:10 +00001923 q_vector = adapter->q_vector[q_idx];
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001924#ifdef CONFIG_NET_RX_BUSY_POLL
1925 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1926#endif
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001927 napi_enable(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001928 }
1929}
1930
1931static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1932{
1933 int q_idx;
1934 struct ixgbevf_q_vector *q_vector;
1935 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1936
1937 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1938 q_vector = adapter->q_vector[q_idx];
Greg Rose92915f72010-01-09 02:24:10 +00001939 napi_disable(&q_vector->napi);
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001940#ifdef CONFIG_NET_RX_BUSY_POLL
1941 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1942 pr_info("QV %d locked\n", q_idx);
1943 usleep_range(1000, 20000);
1944 }
1945#endif /* CONFIG_NET_RX_BUSY_POLL */
Greg Rose92915f72010-01-09 02:24:10 +00001946 }
1947}
1948
Don Skidmore220fe052013-09-21 01:40:49 +00001949static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1950{
1951 struct ixgbe_hw *hw = &adapter->hw;
1952 unsigned int def_q = 0;
1953 unsigned int num_tcs = 0;
Emil Tantilov2dc571a2014-12-06 09:19:02 +00001954 unsigned int num_rx_queues = adapter->num_rx_queues;
1955 unsigned int num_tx_queues = adapter->num_tx_queues;
Don Skidmore220fe052013-09-21 01:40:49 +00001956 int err;
1957
1958 spin_lock_bh(&adapter->mbx_lock);
1959
1960 /* fetch queue configuration from the PF */
1961 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1962
1963 spin_unlock_bh(&adapter->mbx_lock);
1964
1965 if (err)
1966 return err;
1967
1968 if (num_tcs > 1) {
Emil Tantilov2dc571a2014-12-06 09:19:02 +00001969 /* we need only one Tx queue */
1970 num_tx_queues = 1;
1971
Don Skidmore220fe052013-09-21 01:40:49 +00001972 /* update default Tx ring register index */
Don Skidmore87e70ab2014-01-16 02:30:08 -08001973 adapter->tx_ring[0]->reg_idx = def_q;
Don Skidmore220fe052013-09-21 01:40:49 +00001974
1975 /* we need as many queues as traffic classes */
1976 num_rx_queues = num_tcs;
1977 }
1978
1979 /* if we have a bad config abort request queue reset */
Emil Tantilov2dc571a2014-12-06 09:19:02 +00001980 if ((adapter->num_rx_queues != num_rx_queues) ||
1981 (adapter->num_tx_queues != num_tx_queues)) {
Don Skidmore220fe052013-09-21 01:40:49 +00001982 /* force mailbox timeout to prevent further messages */
1983 hw->mbx.timeout = 0;
1984
1985 /* wait for watchdog to come around and bail us out */
1986 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1987 }
1988
1989 return 0;
1990}
1991
Greg Rose92915f72010-01-09 02:24:10 +00001992static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1993{
Don Skidmore220fe052013-09-21 01:40:49 +00001994 ixgbevf_configure_dcb(adapter);
1995
Don Skidmorede02dec2014-01-16 02:30:09 -08001996 ixgbevf_set_rx_mode(adapter->netdev);
Greg Rose92915f72010-01-09 02:24:10 +00001997
1998 ixgbevf_restore_vlan(adapter);
1999
2000 ixgbevf_configure_tx(adapter);
2001 ixgbevf_configure_rx(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002002}
2003
Greg Rose33bd9f62010-03-19 02:59:52 +00002004static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2005{
2006 /* Only save pre-reset stats if there are some */
2007 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2008 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2009 adapter->stats.base_vfgprc;
2010 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2011 adapter->stats.base_vfgptc;
2012 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2013 adapter->stats.base_vfgorc;
2014 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2015 adapter->stats.base_vfgotc;
2016 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2017 adapter->stats.base_vfmprc;
2018 }
2019}
2020
2021static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2022{
2023 struct ixgbe_hw *hw = &adapter->hw;
2024
2025 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2026 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2027 adapter->stats.last_vfgorc |=
2028 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2029 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2030 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2031 adapter->stats.last_vfgotc |=
2032 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2033 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2034
2035 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2036 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2037 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2038 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2039 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2040}
2041
Alexander Duyck31186782012-07-20 08:09:58 +00002042static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2043{
2044 struct ixgbe_hw *hw = &adapter->hw;
Vlad Zolotarov94cf66f2015-03-30 21:35:26 +03002045 int api[] = { ixgbe_mbox_api_12,
2046 ixgbe_mbox_api_11,
Alexander Duyck56e94092012-07-20 08:10:03 +00002047 ixgbe_mbox_api_10,
Alexander Duyck31186782012-07-20 08:09:58 +00002048 ixgbe_mbox_api_unknown };
2049 int err = 0, idx = 0;
2050
John Fastabend55fdd45b2012-10-01 14:52:20 +00002051 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00002052
2053 while (api[idx] != ixgbe_mbox_api_unknown) {
2054 err = ixgbevf_negotiate_api_version(hw, api[idx]);
2055 if (!err)
2056 break;
2057 idx++;
2058 }
2059
John Fastabend55fdd45b2012-10-01 14:52:20 +00002060 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00002061}
2062
Greg Rose795180d2012-04-17 04:29:34 +00002063static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002064{
2065 struct net_device *netdev = adapter->netdev;
2066 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00002067
2068 ixgbevf_configure_msix(adapter);
2069
John Fastabend55fdd45b2012-10-01 14:52:20 +00002070 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002071
Greg Rose92fe0bf2012-11-02 05:50:47 +00002072 if (is_valid_ether_addr(hw->mac.addr))
2073 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2074 else
2075 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
Greg Rose92915f72010-01-09 02:24:10 +00002076
John Fastabend55fdd45b2012-10-01 14:52:20 +00002077 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002078
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002079 smp_mb__before_atomic();
Greg Rose92915f72010-01-09 02:24:10 +00002080 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2081 ixgbevf_napi_enable_all(adapter);
2082
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002083 /* clear any pending interrupts, may auto mask */
2084 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2085 ixgbevf_irq_enable(adapter);
2086
Greg Rose92915f72010-01-09 02:24:10 +00002087 /* enable transmits */
2088 netif_tx_start_all_queues(netdev);
2089
Greg Rose33bd9f62010-03-19 02:59:52 +00002090 ixgbevf_save_reset_stats(adapter);
2091 ixgbevf_init_last_counter_stats(adapter);
2092
Alexander Duyck4b2cd272012-08-02 01:16:59 +00002093 hw->mac.get_link_status = 1;
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002094 mod_timer(&adapter->service_timer, jiffies);
Greg Rose92915f72010-01-09 02:24:10 +00002095}
2096
Greg Rose795180d2012-04-17 04:29:34 +00002097void ixgbevf_up(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002098{
Greg Rose92915f72010-01-09 02:24:10 +00002099 ixgbevf_configure(adapter);
2100
Greg Rose795180d2012-04-17 04:29:34 +00002101 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002102}
2103
2104/**
2105 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
Greg Rose92915f72010-01-09 02:24:10 +00002106 * @rx_ring: ring to free buffers from
2107 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002108static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002109{
Emil Tantilovbad17232014-11-21 02:57:15 +00002110 struct device *dev = rx_ring->dev;
Greg Rose92915f72010-01-09 02:24:10 +00002111 unsigned long size;
2112 unsigned int i;
2113
Emil Tantilovbad17232014-11-21 02:57:15 +00002114 /* Free Rx ring sk_buff */
2115 if (rx_ring->skb) {
2116 dev_kfree_skb(rx_ring->skb);
2117 rx_ring->skb = NULL;
2118 }
2119
2120 /* ring already cleared, nothing to do */
Greg Rosec0456c22010-01-22 22:47:18 +00002121 if (!rx_ring->rx_buffer_info)
2122 return;
Greg Rose92915f72010-01-09 02:24:10 +00002123
Emil Tantilovbad17232014-11-21 02:57:15 +00002124 /* Free all the Rx ring pages */
Greg Rose92915f72010-01-09 02:24:10 +00002125 for (i = 0; i < rx_ring->count; i++) {
Emil Tantilovbad17232014-11-21 02:57:15 +00002126 struct ixgbevf_rx_buffer *rx_buffer;
Greg Rose92915f72010-01-09 02:24:10 +00002127
Emil Tantilovbad17232014-11-21 02:57:15 +00002128 rx_buffer = &rx_ring->rx_buffer_info[i];
2129 if (rx_buffer->dma)
2130 dma_unmap_page(dev, rx_buffer->dma,
2131 PAGE_SIZE, DMA_FROM_DEVICE);
2132 rx_buffer->dma = 0;
2133 if (rx_buffer->page)
2134 __free_page(rx_buffer->page);
2135 rx_buffer->page = NULL;
Greg Rose92915f72010-01-09 02:24:10 +00002136 }
2137
2138 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2139 memset(rx_ring->rx_buffer_info, 0, size);
2140
2141 /* Zero out the descriptor ring */
2142 memset(rx_ring->desc, 0, rx_ring->size);
Greg Rose92915f72010-01-09 02:24:10 +00002143}
2144
2145/**
2146 * ixgbevf_clean_tx_ring - Free Tx Buffers
Greg Rose92915f72010-01-09 02:24:10 +00002147 * @tx_ring: ring to be cleaned
2148 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002149static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002150{
2151 struct ixgbevf_tx_buffer *tx_buffer_info;
2152 unsigned long size;
2153 unsigned int i;
2154
Greg Rosec0456c22010-01-22 22:47:18 +00002155 if (!tx_ring->tx_buffer_info)
2156 return;
2157
Greg Rose92915f72010-01-09 02:24:10 +00002158 /* Free all the Tx ring sk_buffs */
Greg Rose92915f72010-01-09 02:24:10 +00002159 for (i = 0; i < tx_ring->count; i++) {
2160 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck70a10e22012-05-11 08:33:21 +00002161 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Greg Rose92915f72010-01-09 02:24:10 +00002162 }
2163
2164 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2165 memset(tx_ring->tx_buffer_info, 0, size);
2166
2167 memset(tx_ring->desc, 0, tx_ring->size);
Greg Rose92915f72010-01-09 02:24:10 +00002168}
2169
2170/**
2171 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2172 * @adapter: board private structure
2173 **/
2174static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2175{
2176 int i;
2177
2178 for (i = 0; i < adapter->num_rx_queues; i++)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002179 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002180}
2181
2182/**
2183 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2184 * @adapter: board private structure
2185 **/
2186static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2187{
2188 int i;
2189
2190 for (i = 0; i < adapter->num_tx_queues; i++)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002191 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002192}
2193
2194void ixgbevf_down(struct ixgbevf_adapter *adapter)
2195{
2196 struct net_device *netdev = adapter->netdev;
2197 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmorede02dec2014-01-16 02:30:09 -08002198 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002199
2200 /* signal that we are down to the interrupt handler */
Mark Rustad5b346dc2014-03-04 03:02:18 +00002201 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2202 return; /* do nothing if already down */
Don Skidmore858c3dd2013-10-01 04:33:50 -07002203
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002204 /* disable all enabled Rx queues */
Don Skidmore858c3dd2013-10-01 04:33:50 -07002205 for (i = 0; i < adapter->num_rx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08002206 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002207
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002208 usleep_range(10000, 20000);
Greg Rose92915f72010-01-09 02:24:10 +00002209
2210 netif_tx_stop_all_queues(netdev);
2211
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002212 /* call carrier off first to avoid false dev_watchdog timeouts */
2213 netif_carrier_off(netdev);
2214 netif_tx_disable(netdev);
2215
Greg Rose92915f72010-01-09 02:24:10 +00002216 ixgbevf_irq_disable(adapter);
2217
2218 ixgbevf_napi_disable_all(adapter);
2219
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002220 del_timer_sync(&adapter->service_timer);
Greg Rose92915f72010-01-09 02:24:10 +00002221
2222 /* disable transmits in the hardware now that interrupts are off */
2223 for (i = 0; i < adapter->num_tx_queues; i++) {
Don Skidmorede02dec2014-01-16 02:30:09 -08002224 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2225
2226 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2227 IXGBE_TXDCTL_SWFLSH);
Greg Rose92915f72010-01-09 02:24:10 +00002228 }
2229
Greg Rose92915f72010-01-09 02:24:10 +00002230 if (!pci_channel_offline(adapter->pdev))
2231 ixgbevf_reset(adapter);
2232
2233 ixgbevf_clean_all_tx_rings(adapter);
2234 ixgbevf_clean_all_rx_rings(adapter);
2235}
2236
2237void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2238{
2239 WARN_ON(in_interrupt());
Greg Rosec0456c22010-01-22 22:47:18 +00002240
Greg Rose92915f72010-01-09 02:24:10 +00002241 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2242 msleep(1);
2243
Alexander Duyck4b2cd272012-08-02 01:16:59 +00002244 ixgbevf_down(adapter);
2245 ixgbevf_up(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002246
2247 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2248}
2249
2250void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2251{
2252 struct ixgbe_hw *hw = &adapter->hw;
2253 struct net_device *netdev = adapter->netdev;
2254
Don Skidmore798e3812013-10-01 04:33:51 -07002255 if (hw->mac.ops.reset_hw(hw)) {
Greg Rose92915f72010-01-09 02:24:10 +00002256 hw_dbg(hw, "PF still resetting\n");
Don Skidmore798e3812013-10-01 04:33:51 -07002257 } else {
Greg Rose92915f72010-01-09 02:24:10 +00002258 hw->mac.ops.init_hw(hw);
Don Skidmore798e3812013-10-01 04:33:51 -07002259 ixgbevf_negotiate_api(adapter);
2260 }
Greg Rose92915f72010-01-09 02:24:10 +00002261
2262 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
Emil Tantilov91a76ba2015-10-12 10:55:51 -07002263 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2264 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
Greg Rose92915f72010-01-09 02:24:10 +00002265 }
Emil Tantilove66c92a2015-01-28 03:21:29 +00002266
2267 adapter->last_reset = jiffies;
Greg Rose92915f72010-01-09 02:24:10 +00002268}
2269
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002270static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2271 int vectors)
Greg Rose92915f72010-01-09 02:24:10 +00002272{
Emil Tantilova5f93372012-11-13 04:03:17 +00002273 int vector_threshold;
Greg Rose92915f72010-01-09 02:24:10 +00002274
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002275 /* We'll want at least 2 (vector_threshold):
2276 * 1) TxQ[0] + RxQ[0] handler
2277 * 2) Other (Link Status Change, etc.)
Greg Rose92915f72010-01-09 02:24:10 +00002278 */
2279 vector_threshold = MIN_MSIX_COUNT;
2280
2281 /* The more we get, the more we will assign to Tx/Rx Cleanup
2282 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2283 * Right now, we simply care about how many we'll get; we'll
2284 * set them up later while requesting irq's.
2285 */
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002286 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2287 vector_threshold, vectors);
Greg Rose92915f72010-01-09 02:24:10 +00002288
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002289 if (vectors < 0) {
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002290 dev_err(&adapter->pdev->dev,
2291 "Unable to allocate MSI-X interrupts\n");
Greg Rose92915f72010-01-09 02:24:10 +00002292 kfree(adapter->msix_entries);
2293 adapter->msix_entries = NULL;
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002294 return vectors;
Greg Rose92915f72010-01-09 02:24:10 +00002295 }
Greg Rosedee847f2012-11-02 05:50:57 +00002296
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002297 /* Adjust for only the vectors we'll use, which is minimum
2298 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2299 * vectors we were allocated.
2300 */
2301 adapter->num_msix_vectors = vectors;
2302
2303 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002304}
2305
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002306/**
2307 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
Greg Rose92915f72010-01-09 02:24:10 +00002308 * @adapter: board private structure to initialize
2309 *
2310 * This is the top level queue allocation routine. The order here is very
2311 * important, starting with the "most" number of features turned on at once,
2312 * and ending with the smallest set of features. This way large combinations
2313 * can be allocated if they're turned on, and smaller combinations are the
2314 * fallthrough conditions.
2315 *
2316 **/
2317static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2318{
Don Skidmore220fe052013-09-21 01:40:49 +00002319 struct ixgbe_hw *hw = &adapter->hw;
2320 unsigned int def_q = 0;
2321 unsigned int num_tcs = 0;
2322 int err;
2323
Greg Rose92915f72010-01-09 02:24:10 +00002324 /* Start with base case */
2325 adapter->num_rx_queues = 1;
2326 adapter->num_tx_queues = 1;
Don Skidmore220fe052013-09-21 01:40:49 +00002327
2328 spin_lock_bh(&adapter->mbx_lock);
2329
2330 /* fetch queue configuration from the PF */
2331 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2332
2333 spin_unlock_bh(&adapter->mbx_lock);
2334
2335 if (err)
2336 return;
2337
2338 /* we need as many queues as traffic classes */
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002339 if (num_tcs > 1) {
Don Skidmore220fe052013-09-21 01:40:49 +00002340 adapter->num_rx_queues = num_tcs;
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002341 } else {
2342 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2343
2344 switch (hw->api_version) {
2345 case ixgbe_mbox_api_11:
Vlad Zolotarov94cf66f2015-03-30 21:35:26 +03002346 case ixgbe_mbox_api_12:
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002347 adapter->num_rx_queues = rss;
2348 adapter->num_tx_queues = rss;
2349 default:
2350 break;
2351 }
2352 }
Greg Rose92915f72010-01-09 02:24:10 +00002353}
2354
2355/**
2356 * ixgbevf_alloc_queues - Allocate memory for all rings
2357 * @adapter: board private structure to initialize
2358 *
2359 * We allocate one ring per queue at run-time since we don't know the
2360 * number of queues at compile-time. The polling_netdev array is
2361 * intended for Multiqueue, but should work fine with a single queue.
2362 **/
2363static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2364{
Don Skidmore87e70ab2014-01-16 02:30:08 -08002365 struct ixgbevf_ring *ring;
2366 int rx = 0, tx = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002367
Don Skidmore87e70ab2014-01-16 02:30:08 -08002368 for (; tx < adapter->num_tx_queues; tx++) {
2369 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2370 if (!ring)
2371 goto err_allocation;
Greg Rose92915f72010-01-09 02:24:10 +00002372
Don Skidmore87e70ab2014-01-16 02:30:08 -08002373 ring->dev = &adapter->pdev->dev;
2374 ring->netdev = adapter->netdev;
2375 ring->count = adapter->tx_ring_count;
2376 ring->queue_index = tx;
2377 ring->reg_idx = tx;
Greg Rose92915f72010-01-09 02:24:10 +00002378
Don Skidmore87e70ab2014-01-16 02:30:08 -08002379 adapter->tx_ring[tx] = ring;
Greg Rose92915f72010-01-09 02:24:10 +00002380 }
2381
Don Skidmore87e70ab2014-01-16 02:30:08 -08002382 for (; rx < adapter->num_rx_queues; rx++) {
2383 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2384 if (!ring)
2385 goto err_allocation;
2386
2387 ring->dev = &adapter->pdev->dev;
2388 ring->netdev = adapter->netdev;
2389
2390 ring->count = adapter->rx_ring_count;
2391 ring->queue_index = rx;
2392 ring->reg_idx = rx;
2393
2394 adapter->rx_ring[rx] = ring;
Greg Rose92915f72010-01-09 02:24:10 +00002395 }
2396
2397 return 0;
2398
Don Skidmore87e70ab2014-01-16 02:30:08 -08002399err_allocation:
2400 while (tx) {
2401 kfree(adapter->tx_ring[--tx]);
2402 adapter->tx_ring[tx] = NULL;
2403 }
2404
2405 while (rx) {
2406 kfree(adapter->rx_ring[--rx]);
2407 adapter->rx_ring[rx] = NULL;
2408 }
Greg Rose92915f72010-01-09 02:24:10 +00002409 return -ENOMEM;
2410}
2411
2412/**
2413 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2414 * @adapter: board private structure to initialize
2415 *
2416 * Attempt to configure the interrupts using the best available
2417 * capabilities of the hardware and the kernel.
2418 **/
2419static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2420{
Greg Rose91e2b892012-10-03 00:57:23 +00002421 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00002422 int err = 0;
2423 int vector, v_budget;
2424
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002425 /* It's easy to be greedy for MSI-X vectors, but it really
Greg Rose92915f72010-01-09 02:24:10 +00002426 * doesn't do us much good if we have a lot more vectors
2427 * than CPU's. So let's be conservative and only ask for
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002428 * (roughly) the same number of vectors as there are CPU's.
2429 * The default is to use pairs of vectors.
Greg Rose92915f72010-01-09 02:24:10 +00002430 */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002431 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2432 v_budget = min_t(int, v_budget, num_online_cpus());
2433 v_budget += NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002434
2435 /* A failure in MSI-X entry allocation isn't fatal, but it does
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002436 * mean we disable MSI-X capabilities of the adapter.
2437 */
Greg Rose92915f72010-01-09 02:24:10 +00002438 adapter->msix_entries = kcalloc(v_budget,
2439 sizeof(struct msix_entry), GFP_KERNEL);
2440 if (!adapter->msix_entries) {
2441 err = -ENOMEM;
2442 goto out;
2443 }
2444
2445 for (vector = 0; vector < v_budget; vector++)
2446 adapter->msix_entries[vector].entry = vector;
2447
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002448 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2449 if (err)
2450 goto out;
Greg Rose92915f72010-01-09 02:24:10 +00002451
Greg Rose91e2b892012-10-03 00:57:23 +00002452 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2453 if (err)
2454 goto out;
2455
2456 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2457
Greg Rose92915f72010-01-09 02:24:10 +00002458out:
2459 return err;
2460}
2461
2462/**
2463 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2464 * @adapter: board private structure to initialize
2465 *
2466 * We allocate one q_vector per queue interrupt. If allocation fails we
2467 * return -ENOMEM.
2468 **/
2469static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2470{
2471 int q_idx, num_q_vectors;
2472 struct ixgbevf_q_vector *q_vector;
Greg Rose92915f72010-01-09 02:24:10 +00002473
2474 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002475
2476 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2477 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2478 if (!q_vector)
2479 goto err_out;
2480 q_vector->adapter = adapter;
2481 q_vector->v_idx = q_idx;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002482 netif_napi_add(adapter->netdev, &q_vector->napi,
2483 ixgbevf_poll, 64);
Greg Rose92915f72010-01-09 02:24:10 +00002484 adapter->q_vector[q_idx] = q_vector;
2485 }
2486
2487 return 0;
2488
2489err_out:
2490 while (q_idx) {
2491 q_idx--;
2492 q_vector = adapter->q_vector[q_idx];
Jacob Kellerc777cdf2013-09-21 06:24:20 +00002493#ifdef CONFIG_NET_RX_BUSY_POLL
2494 napi_hash_del(&q_vector->napi);
2495#endif
Greg Rose92915f72010-01-09 02:24:10 +00002496 netif_napi_del(&q_vector->napi);
2497 kfree(q_vector);
2498 adapter->q_vector[q_idx] = NULL;
2499 }
2500 return -ENOMEM;
2501}
2502
2503/**
2504 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2505 * @adapter: board private structure to initialize
2506 *
2507 * This function frees the memory allocated to the q_vectors. In addition if
2508 * NAPI is enabled it will delete any references to the NAPI struct prior
2509 * to freeing the q_vector.
2510 **/
2511static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2512{
John Fastabendf4477702012-09-16 08:19:46 +00002513 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002514
2515 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2516 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2517
2518 adapter->q_vector[q_idx] = NULL;
Jacob Kellerc777cdf2013-09-21 06:24:20 +00002519#ifdef CONFIG_NET_RX_BUSY_POLL
2520 napi_hash_del(&q_vector->napi);
2521#endif
John Fastabendf4477702012-09-16 08:19:46 +00002522 netif_napi_del(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00002523 kfree(q_vector);
2524 }
2525}
2526
2527/**
2528 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2529 * @adapter: board private structure
2530 *
2531 **/
2532static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2533{
2534 pci_disable_msix(adapter->pdev);
2535 kfree(adapter->msix_entries);
2536 adapter->msix_entries = NULL;
Greg Rose92915f72010-01-09 02:24:10 +00002537}
2538
2539/**
2540 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2541 * @adapter: board private structure to initialize
2542 *
2543 **/
2544static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2545{
2546 int err;
2547
2548 /* Number of supported queues */
2549 ixgbevf_set_num_queues(adapter);
2550
2551 err = ixgbevf_set_interrupt_capability(adapter);
2552 if (err) {
2553 hw_dbg(&adapter->hw,
2554 "Unable to setup interrupt capabilities\n");
2555 goto err_set_interrupt;
2556 }
2557
2558 err = ixgbevf_alloc_q_vectors(adapter);
2559 if (err) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002560 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
Greg Rose92915f72010-01-09 02:24:10 +00002561 goto err_alloc_q_vectors;
2562 }
2563
2564 err = ixgbevf_alloc_queues(adapter);
2565 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002566 pr_err("Unable to allocate memory for queues\n");
Greg Rose92915f72010-01-09 02:24:10 +00002567 goto err_alloc_queues;
2568 }
2569
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002570 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
Greg Rose92915f72010-01-09 02:24:10 +00002571 (adapter->num_rx_queues > 1) ? "Enabled" :
2572 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2573
2574 set_bit(__IXGBEVF_DOWN, &adapter->state);
2575
2576 return 0;
2577err_alloc_queues:
2578 ixgbevf_free_q_vectors(adapter);
2579err_alloc_q_vectors:
2580 ixgbevf_reset_interrupt_capability(adapter);
2581err_set_interrupt:
2582 return err;
2583}
2584
2585/**
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002586 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2587 * @adapter: board private structure to clear interrupt scheme on
2588 *
2589 * We go through and clear interrupt specific resources and reset the structure
2590 * to pre-load conditions
2591 **/
2592static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2593{
Don Skidmore87e70ab2014-01-16 02:30:08 -08002594 int i;
2595
2596 for (i = 0; i < adapter->num_tx_queues; i++) {
2597 kfree(adapter->tx_ring[i]);
2598 adapter->tx_ring[i] = NULL;
2599 }
2600 for (i = 0; i < adapter->num_rx_queues; i++) {
2601 kfree(adapter->rx_ring[i]);
2602 adapter->rx_ring[i] = NULL;
2603 }
2604
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002605 adapter->num_tx_queues = 0;
2606 adapter->num_rx_queues = 0;
2607
2608 ixgbevf_free_q_vectors(adapter);
2609 ixgbevf_reset_interrupt_capability(adapter);
2610}
2611
2612/**
Greg Rose92915f72010-01-09 02:24:10 +00002613 * ixgbevf_sw_init - Initialize general software structures
Greg Rose92915f72010-01-09 02:24:10 +00002614 * @adapter: board private structure to initialize
2615 *
2616 * ixgbevf_sw_init initializes the Adapter private data structure.
2617 * Fields are initialized based on PCI device information and
2618 * OS network device settings (MTU size).
2619 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05002620static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002621{
2622 struct ixgbe_hw *hw = &adapter->hw;
2623 struct pci_dev *pdev = adapter->pdev;
Greg Rosee1941a72013-02-13 03:02:05 +00002624 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00002625 int err;
2626
2627 /* PCI config space info */
Greg Rose92915f72010-01-09 02:24:10 +00002628 hw->vendor_id = pdev->vendor;
2629 hw->device_id = pdev->device;
Sergei Shtylyovff938e42011-02-28 11:57:33 -08002630 hw->revision_id = pdev->revision;
Greg Rose92915f72010-01-09 02:24:10 +00002631 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2632 hw->subsystem_device_id = pdev->subsystem_device;
2633
2634 hw->mbx.ops.init_params(hw);
Alexander Duyck56e94092012-07-20 08:10:03 +00002635
2636 /* assume legacy case in which PF would only give VF 2 queues */
2637 hw->mac.max_tx_queues = 2;
2638 hw->mac.max_rx_queues = 2;
2639
Don Skidmore798e3812013-10-01 04:33:51 -07002640 /* lock to protect mailbox accesses */
2641 spin_lock_init(&adapter->mbx_lock);
2642
Greg Rose92915f72010-01-09 02:24:10 +00002643 err = hw->mac.ops.reset_hw(hw);
2644 if (err) {
2645 dev_info(&pdev->dev,
Greg Rosee1941a72013-02-13 03:02:05 +00002646 "PF still in reset state. Is the PF interface up?\n");
Greg Rose92915f72010-01-09 02:24:10 +00002647 } else {
2648 err = hw->mac.ops.init_hw(hw);
2649 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002650 pr_err("init_shared_code failed: %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +00002651 goto out;
2652 }
Don Skidmore798e3812013-10-01 04:33:51 -07002653 ixgbevf_negotiate_api(adapter);
Greg Rosee1941a72013-02-13 03:02:05 +00002654 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2655 if (err)
2656 dev_info(&pdev->dev, "Error reading MAC address\n");
2657 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2658 dev_info(&pdev->dev,
2659 "MAC address not assigned by administrator.\n");
Emil Tantilov91a76ba2015-10-12 10:55:51 -07002660 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
Greg Rosee1941a72013-02-13 03:02:05 +00002661 }
2662
2663 if (!is_valid_ether_addr(netdev->dev_addr)) {
2664 dev_info(&pdev->dev, "Assigning random MAC address\n");
2665 eth_hw_addr_random(netdev);
Emil Tantilov91a76ba2015-10-12 10:55:51 -07002666 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
Emil Tantilov465fc6432015-10-12 10:56:00 -07002667 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
Greg Rose92915f72010-01-09 02:24:10 +00002668 }
2669
2670 /* Enable dynamic interrupt throttling rates */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002671 adapter->rx_itr_setting = 1;
2672 adapter->tx_itr_setting = 1;
Greg Rose92915f72010-01-09 02:24:10 +00002673
Greg Rose92915f72010-01-09 02:24:10 +00002674 /* set default ring sizes */
2675 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2676 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2677
Greg Rose92915f72010-01-09 02:24:10 +00002678 set_bit(__IXGBEVF_DOWN, &adapter->state);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00002679 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002680
2681out:
2682 return err;
2683}
2684
Greg Rose92915f72010-01-09 02:24:10 +00002685#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2686 { \
2687 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2688 if (current_counter < last_counter) \
2689 counter += 0x100000000LL; \
2690 last_counter = current_counter; \
2691 counter &= 0xFFFFFFFF00000000LL; \
2692 counter |= current_counter; \
2693 }
2694
2695#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2696 { \
2697 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2698 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002699 u64 current_counter = (current_counter_msb << 32) | \
2700 current_counter_lsb; \
Greg Rose92915f72010-01-09 02:24:10 +00002701 if (current_counter < last_counter) \
2702 counter += 0x1000000000LL; \
2703 last_counter = current_counter; \
2704 counter &= 0xFFFFFFF000000000LL; \
2705 counter |= current_counter; \
2706 }
2707/**
2708 * ixgbevf_update_stats - Update the board statistics counters.
2709 * @adapter: board private structure
2710 **/
2711void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2712{
2713 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose55fb2772012-11-06 05:53:32 +00002714 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002715
Emil Tantilove66c92a2015-01-28 03:21:29 +00002716 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2717 test_bit(__IXGBEVF_RESETTING, &adapter->state))
Greg Rose088245a2013-01-04 07:37:31 +00002718 return;
2719
Greg Rose92915f72010-01-09 02:24:10 +00002720 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2721 adapter->stats.vfgprc);
2722 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2723 adapter->stats.vfgptc);
2724 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2725 adapter->stats.last_vfgorc,
2726 adapter->stats.vfgorc);
2727 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2728 adapter->stats.last_vfgotc,
2729 adapter->stats.vfgotc);
2730 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2731 adapter->stats.vfmprc);
Greg Rose55fb2772012-11-06 05:53:32 +00002732
2733 for (i = 0; i < adapter->num_rx_queues; i++) {
2734 adapter->hw_csum_rx_error +=
Don Skidmore87e70ab2014-01-16 02:30:08 -08002735 adapter->rx_ring[i]->hw_csum_rx_error;
Don Skidmore87e70ab2014-01-16 02:30:08 -08002736 adapter->rx_ring[i]->hw_csum_rx_error = 0;
Greg Rose55fb2772012-11-06 05:53:32 +00002737 }
Greg Rose92915f72010-01-09 02:24:10 +00002738}
2739
2740/**
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002741 * ixgbevf_service_timer - Timer Call-back
Greg Rose92915f72010-01-09 02:24:10 +00002742 * @data: pointer to adapter cast into an unsigned long
2743 **/
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002744static void ixgbevf_service_timer(unsigned long data)
Greg Rose92915f72010-01-09 02:24:10 +00002745{
2746 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
Emil Tantilove66c92a2015-01-28 03:21:29 +00002747
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002748 /* Reset the timer */
2749 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2750
2751 ixgbevf_service_event_schedule(adapter);
Emil Tantilove66c92a2015-01-28 03:21:29 +00002752}
2753
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002754static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
Emil Tantilove66c92a2015-01-28 03:21:29 +00002755{
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002756 if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
2757 return;
Emil Tantilove66c92a2015-01-28 03:21:29 +00002758
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002759 adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
Emil Tantilove66c92a2015-01-28 03:21:29 +00002760
2761 /* If we're already down or resetting, just bail */
2762 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2763 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2764 return;
2765
2766 adapter->tx_timeout_count++;
2767
2768 ixgbevf_reinit_locked(adapter);
2769}
2770
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002771/**
2772 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2773 * @adapter: pointer to the device adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002774 *
2775 * This function serves two purposes. First it strobes the interrupt lines
2776 * in order to make certain interrupts are occurring. Secondly it sets the
2777 * bits needed to check for TX hangs. As a result we should immediately
2778 * determine if a hang has occurred.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002779 **/
Emil Tantilove66c92a2015-01-28 03:21:29 +00002780static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2781{
Greg Rose92915f72010-01-09 02:24:10 +00002782 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002783 u32 eics = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002784 int i;
2785
Emil Tantilove66c92a2015-01-28 03:21:29 +00002786 /* If we're down or resetting, just bail */
2787 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2788 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2789 return;
Greg Rose92915f72010-01-09 02:24:10 +00002790
Emil Tantilove08400b2015-01-28 03:21:24 +00002791 /* Force detection of hung controller */
2792 if (netif_carrier_ok(adapter->netdev)) {
2793 for (i = 0; i < adapter->num_tx_queues; i++)
2794 set_check_for_tx_hang(adapter->tx_ring[i]);
2795 }
2796
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002797 /* get one bit for every active Tx/Rx interrupt vector */
Greg Rose92915f72010-01-09 02:24:10 +00002798 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2799 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002800
Alexander Duyck6b43c442012-05-11 08:32:45 +00002801 if (qv->rx.ring || qv->tx.ring)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002802 eics |= 1 << i;
Greg Rose92915f72010-01-09 02:24:10 +00002803 }
2804
Emil Tantilove66c92a2015-01-28 03:21:29 +00002805 /* Cause software interrupt to ensure rings are cleaned */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002806 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
Greg Rose92915f72010-01-09 02:24:10 +00002807}
2808
Emil Tantilove66c92a2015-01-28 03:21:29 +00002809/**
2810 * ixgbevf_watchdog_update_link - update the link status
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002811 * @adapter: pointer to the device adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002812 **/
2813static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002814{
Emil Tantilove66c92a2015-01-28 03:21:29 +00002815 struct ixgbe_hw *hw = &adapter->hw;
2816 u32 link_speed = adapter->link_speed;
2817 bool link_up = adapter->link_up;
2818 s32 err;
Greg Rose92915f72010-01-09 02:24:10 +00002819
Emil Tantilove66c92a2015-01-28 03:21:29 +00002820 spin_lock_bh(&adapter->mbx_lock);
2821
2822 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2823
2824 spin_unlock_bh(&adapter->mbx_lock);
2825
2826 /* if check for link returns error we will need to reset */
2827 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002828 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
Emil Tantilove66c92a2015-01-28 03:21:29 +00002829 link_up = false;
2830 }
2831
2832 adapter->link_up = link_up;
2833 adapter->link_speed = link_speed;
2834}
2835
2836/**
2837 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2838 * print link up message
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002839 * @adapter: pointer to the device adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002840 **/
2841static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
2842{
2843 struct net_device *netdev = adapter->netdev;
2844
2845 /* only continue if link was previously down */
2846 if (netif_carrier_ok(netdev))
Greg Rose92915f72010-01-09 02:24:10 +00002847 return;
2848
Emil Tantilove66c92a2015-01-28 03:21:29 +00002849 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2850 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2851 "10 Gbps" :
2852 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2853 "1 Gbps" :
2854 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2855 "100 Mbps" :
2856 "unknown speed");
Greg Rose92915f72010-01-09 02:24:10 +00002857
Emil Tantilove66c92a2015-01-28 03:21:29 +00002858 netif_carrier_on(netdev);
2859}
2860
2861/**
2862 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2863 * print link down message
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002864 * @adapter: pointer to the adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002865 **/
2866static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2867{
2868 struct net_device *netdev = adapter->netdev;
2869
2870 adapter->link_speed = 0;
2871
2872 /* only continue if link was up previously */
2873 if (!netif_carrier_ok(netdev))
2874 return;
2875
2876 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2877
2878 netif_carrier_off(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00002879}
2880
2881/**
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002882 * ixgbevf_watchdog_subtask - worker thread to bring link up
Greg Rose92915f72010-01-09 02:24:10 +00002883 * @work: pointer to work_struct containing our data
2884 **/
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002885static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2886{
2887 /* if interface is down do nothing */
2888 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2889 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2890 return;
2891
2892 ixgbevf_watchdog_update_link(adapter);
2893
2894 if (adapter->link_up)
2895 ixgbevf_watchdog_link_is_up(adapter);
2896 else
2897 ixgbevf_watchdog_link_is_down(adapter);
2898
2899 ixgbevf_update_stats(adapter);
2900}
2901
2902/**
2903 * ixgbevf_service_task - manages and runs subtasks
2904 * @work: pointer to work_struct containing our data
2905 **/
2906static void ixgbevf_service_task(struct work_struct *work)
Greg Rose92915f72010-01-09 02:24:10 +00002907{
2908 struct ixgbevf_adapter *adapter = container_of(work,
2909 struct ixgbevf_adapter,
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002910 service_task);
Greg Rose92915f72010-01-09 02:24:10 +00002911 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00002912
Mark Rustad26597802014-03-04 03:02:45 +00002913 if (IXGBE_REMOVED(hw->hw_addr)) {
2914 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2915 rtnl_lock();
2916 ixgbevf_down(adapter);
2917 rtnl_unlock();
2918 }
2919 return;
2920 }
Emil Tantilove66c92a2015-01-28 03:21:29 +00002921
Don Skidmore220fe052013-09-21 01:40:49 +00002922 ixgbevf_queue_reset_subtask(adapter);
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002923 ixgbevf_reset_subtask(adapter);
2924 ixgbevf_watchdog_subtask(adapter);
Emil Tantilove66c92a2015-01-28 03:21:29 +00002925 ixgbevf_check_hang_subtask(adapter);
2926
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002927 ixgbevf_service_event_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002928}
2929
2930/**
2931 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
Greg Rose92915f72010-01-09 02:24:10 +00002932 * @tx_ring: Tx descriptor ring for a specific queue
2933 *
2934 * Free all transmit software resources
2935 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002936void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002937{
Emil Tantilov05d063a2014-01-17 18:29:59 -08002938 ixgbevf_clean_tx_ring(tx_ring);
Greg Rose92915f72010-01-09 02:24:10 +00002939
2940 vfree(tx_ring->tx_buffer_info);
2941 tx_ring->tx_buffer_info = NULL;
2942
Don Skidmorede02dec2014-01-16 02:30:09 -08002943 /* if not set, then don't free */
2944 if (!tx_ring->desc)
2945 return;
2946
Emil Tantilov05d063a2014-01-17 18:29:59 -08002947 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002948 tx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00002949
2950 tx_ring->desc = NULL;
2951}
2952
2953/**
2954 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2955 * @adapter: board private structure
2956 *
2957 * Free all transmit software resources
2958 **/
2959static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2960{
2961 int i;
2962
2963 for (i = 0; i < adapter->num_tx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08002964 if (adapter->tx_ring[i]->desc)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002965 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002966}
2967
2968/**
2969 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002970 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
Greg Rose92915f72010-01-09 02:24:10 +00002971 *
2972 * Return 0 on success, negative on failure
2973 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002974int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002975{
Greg Rose92915f72010-01-09 02:24:10 +00002976 int size;
2977
2978 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002979 tx_ring->tx_buffer_info = vzalloc(size);
Greg Rose92915f72010-01-09 02:24:10 +00002980 if (!tx_ring->tx_buffer_info)
2981 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00002982
2983 /* round up to nearest 4K */
2984 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2985 tx_ring->size = ALIGN(tx_ring->size, 4096);
2986
Emil Tantilov05d063a2014-01-17 18:29:59 -08002987 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002988 &tx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00002989 if (!tx_ring->desc)
2990 goto err;
2991
Greg Rose92915f72010-01-09 02:24:10 +00002992 return 0;
2993
2994err:
2995 vfree(tx_ring->tx_buffer_info);
2996 tx_ring->tx_buffer_info = NULL;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002997 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
Greg Rose92915f72010-01-09 02:24:10 +00002998 return -ENOMEM;
2999}
3000
3001/**
3002 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3003 * @adapter: board private structure
3004 *
3005 * If this function returns with an error, then it's possible one or
3006 * more of the rings is populated (while the rest are not). It is the
3007 * callers duty to clean those orphaned rings.
3008 *
3009 * Return 0 on success, negative on failure
3010 **/
3011static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3012{
3013 int i, err = 0;
3014
3015 for (i = 0; i < adapter->num_tx_queues; i++) {
Emil Tantilov05d063a2014-01-17 18:29:59 -08003016 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00003017 if (!err)
3018 continue;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003019 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
Greg Rose92915f72010-01-09 02:24:10 +00003020 break;
3021 }
3022
3023 return err;
3024}
3025
3026/**
3027 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003028 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
Greg Rose92915f72010-01-09 02:24:10 +00003029 *
3030 * Returns 0 on success, negative on failure
3031 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08003032int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00003033{
Greg Rose92915f72010-01-09 02:24:10 +00003034 int size;
3035
3036 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00003037 rx_ring->rx_buffer_info = vzalloc(size);
Joe Perchese404dec2012-01-29 12:56:23 +00003038 if (!rx_ring->rx_buffer_info)
Emil Tantilov05d063a2014-01-17 18:29:59 -08003039 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00003040
3041 /* Round up to nearest 4K */
3042 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3043 rx_ring->size = ALIGN(rx_ring->size, 4096);
3044
Emil Tantilov05d063a2014-01-17 18:29:59 -08003045 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
Nick Nunley2a1f8792010-04-27 13:10:50 +00003046 &rx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00003047
Emil Tantilov05d063a2014-01-17 18:29:59 -08003048 if (!rx_ring->desc)
3049 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00003050
Greg Rose92915f72010-01-09 02:24:10 +00003051 return 0;
Emil Tantilov05d063a2014-01-17 18:29:59 -08003052err:
3053 vfree(rx_ring->rx_buffer_info);
3054 rx_ring->rx_buffer_info = NULL;
3055 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
Greg Rose92915f72010-01-09 02:24:10 +00003056 return -ENOMEM;
3057}
3058
3059/**
3060 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3061 * @adapter: board private structure
3062 *
3063 * If this function returns with an error, then it's possible one or
3064 * more of the rings is populated (while the rest are not). It is the
3065 * callers duty to clean those orphaned rings.
3066 *
3067 * Return 0 on success, negative on failure
3068 **/
3069static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3070{
3071 int i, err = 0;
3072
3073 for (i = 0; i < adapter->num_rx_queues; i++) {
Emil Tantilov05d063a2014-01-17 18:29:59 -08003074 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00003075 if (!err)
3076 continue;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003077 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
Greg Rose92915f72010-01-09 02:24:10 +00003078 break;
3079 }
3080 return err;
3081}
3082
3083/**
3084 * ixgbevf_free_rx_resources - Free Rx Resources
Greg Rose92915f72010-01-09 02:24:10 +00003085 * @rx_ring: ring to clean the resources from
3086 *
3087 * Free all receive software resources
3088 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08003089void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00003090{
Emil Tantilov05d063a2014-01-17 18:29:59 -08003091 ixgbevf_clean_rx_ring(rx_ring);
Greg Rose92915f72010-01-09 02:24:10 +00003092
3093 vfree(rx_ring->rx_buffer_info);
3094 rx_ring->rx_buffer_info = NULL;
3095
Emil Tantilov05d063a2014-01-17 18:29:59 -08003096 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
Nick Nunley2a1f8792010-04-27 13:10:50 +00003097 rx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00003098
3099 rx_ring->desc = NULL;
3100}
3101
3102/**
3103 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3104 * @adapter: board private structure
3105 *
3106 * Free all receive software resources
3107 **/
3108static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3109{
3110 int i;
3111
3112 for (i = 0; i < adapter->num_rx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08003113 if (adapter->rx_ring[i]->desc)
Emil Tantilov05d063a2014-01-17 18:29:59 -08003114 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00003115}
3116
3117/**
3118 * ixgbevf_open - Called when a network interface is made active
3119 * @netdev: network interface device structure
3120 *
3121 * Returns 0 on success, negative value on failure
3122 *
3123 * The open entry point is called when a network interface is made
3124 * active by the system (IFF_UP). At this point all resources needed
3125 * for transmit and receive operations are allocated, the interrupt
3126 * handler is registered with the OS, the watchdog timer is started,
3127 * and the stack is notified that the interface is ready.
3128 **/
3129static int ixgbevf_open(struct net_device *netdev)
3130{
3131 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3132 struct ixgbe_hw *hw = &adapter->hw;
3133 int err;
3134
xunleera1f6c6b2013-03-05 07:44:20 +00003135 /* A previous failure to open the device because of a lack of
3136 * available MSIX vector resources may have reset the number
3137 * of msix vectors variable to zero. The only way to recover
3138 * is to unload/reload the driver and hope that the system has
3139 * been able to recover some MSIX vector resources.
3140 */
3141 if (!adapter->num_msix_vectors)
3142 return -ENOMEM;
3143
Greg Rose92915f72010-01-09 02:24:10 +00003144 if (hw->adapter_stopped) {
3145 ixgbevf_reset(adapter);
3146 /* if adapter is still stopped then PF isn't up and
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003147 * the VF can't start.
3148 */
Greg Rose92915f72010-01-09 02:24:10 +00003149 if (hw->adapter_stopped) {
3150 err = IXGBE_ERR_MBX;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003151 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
Greg Rose92915f72010-01-09 02:24:10 +00003152 goto err_setup_reset;
3153 }
3154 }
3155
Emil Tantilovd9bdb572015-01-28 03:21:18 +00003156 /* disallow open during test */
3157 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3158 return -EBUSY;
3159
3160 netif_carrier_off(netdev);
3161
Greg Rose92915f72010-01-09 02:24:10 +00003162 /* allocate transmit descriptors */
3163 err = ixgbevf_setup_all_tx_resources(adapter);
3164 if (err)
3165 goto err_setup_tx;
3166
3167 /* allocate receive descriptors */
3168 err = ixgbevf_setup_all_rx_resources(adapter);
3169 if (err)
3170 goto err_setup_rx;
3171
3172 ixgbevf_configure(adapter);
3173
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003174 /* Map the Tx/Rx rings to the vectors we were allotted.
Greg Rose92915f72010-01-09 02:24:10 +00003175 * if request_irq will be called in this function map_rings
3176 * must be called *before* up_complete
3177 */
3178 ixgbevf_map_rings_to_vectors(adapter);
3179
Greg Rose92915f72010-01-09 02:24:10 +00003180 err = ixgbevf_request_irq(adapter);
3181 if (err)
3182 goto err_req_irq;
3183
Emil Tantilovd9bdb572015-01-28 03:21:18 +00003184 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003185
3186 return 0;
3187
3188err_req_irq:
3189 ixgbevf_down(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003190err_setup_rx:
3191 ixgbevf_free_all_rx_resources(adapter);
3192err_setup_tx:
3193 ixgbevf_free_all_tx_resources(adapter);
3194 ixgbevf_reset(adapter);
3195
3196err_setup_reset:
3197
3198 return err;
3199}
3200
3201/**
3202 * ixgbevf_close - Disables a network interface
3203 * @netdev: network interface device structure
3204 *
3205 * Returns 0, this is not allowed to fail
3206 *
3207 * The close entry point is called when an interface is de-activated
3208 * by the OS. The hardware is still under the drivers control, but
3209 * needs to be disabled. A global MAC reset is issued to stop the
3210 * hardware, and all transmit and receive resources are freed.
3211 **/
3212static int ixgbevf_close(struct net_device *netdev)
3213{
3214 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3215
3216 ixgbevf_down(adapter);
3217 ixgbevf_free_irq(adapter);
3218
3219 ixgbevf_free_all_tx_resources(adapter);
3220 ixgbevf_free_all_rx_resources(adapter);
3221
3222 return 0;
3223}
3224
Don Skidmore220fe052013-09-21 01:40:49 +00003225static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3226{
3227 struct net_device *dev = adapter->netdev;
3228
3229 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
3230 return;
3231
3232 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
3233
3234 /* if interface is down do nothing */
3235 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3236 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3237 return;
3238
3239 /* Hardware has to reinitialize queues and interrupts to
3240 * match packet buffer alignment. Unfortunately, the
3241 * hardware is not flexible enough to do this dynamically.
3242 */
3243 if (netif_running(dev))
3244 ixgbevf_close(dev);
3245
3246 ixgbevf_clear_interrupt_scheme(adapter);
3247 ixgbevf_init_interrupt_scheme(adapter);
3248
3249 if (netif_running(dev))
3250 ixgbevf_open(dev);
3251}
3252
Alexander Duyck70a10e22012-05-11 08:33:21 +00003253static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3254 u32 vlan_macip_lens, u32 type_tucmd,
3255 u32 mss_l4len_idx)
3256{
3257 struct ixgbe_adv_tx_context_desc *context_desc;
3258 u16 i = tx_ring->next_to_use;
3259
3260 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3261
3262 i++;
3263 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3264
3265 /* set bits to identify this as an advanced context descriptor */
3266 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3267
3268 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3269 context_desc->seqnum_seed = 0;
3270 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3271 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3272}
3273
3274static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003275 struct ixgbevf_tx_buffer *first,
3276 u8 *hdr_len)
Greg Rose92915f72010-01-09 02:24:10 +00003277{
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003278 struct sk_buff *skb = first->skb;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003279 u32 vlan_macip_lens, type_tucmd;
Greg Rose92915f72010-01-09 02:24:10 +00003280 u32 mss_l4len_idx, l4len;
Francois Romieu8f12c032014-03-30 03:14:32 +00003281 int err;
Greg Rose92915f72010-01-09 02:24:10 +00003282
Emil Tantilov01a545c2014-02-27 20:32:45 -08003283 if (skb->ip_summed != CHECKSUM_PARTIAL)
3284 return 0;
3285
Alexander Duyck70a10e22012-05-11 08:33:21 +00003286 if (!skb_is_gso(skb))
3287 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00003288
Francois Romieu8f12c032014-03-30 03:14:32 +00003289 err = skb_cow_head(skb, 0);
3290 if (err < 0)
3291 return err;
Greg Rose92915f72010-01-09 02:24:10 +00003292
Alexander Duyck70a10e22012-05-11 08:33:21 +00003293 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3294 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3295
Toshiaki Makita10e4fb32015-01-29 20:37:10 +09003296 if (first->protocol == htons(ETH_P_IP)) {
Alexander Duyck70a10e22012-05-11 08:33:21 +00003297 struct iphdr *iph = ip_hdr(skb);
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003298
Alexander Duyck70a10e22012-05-11 08:33:21 +00003299 iph->tot_len = 0;
3300 iph->check = 0;
3301 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3302 iph->daddr, 0,
3303 IPPROTO_TCP,
3304 0);
3305 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003306 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3307 IXGBE_TX_FLAGS_CSUM |
3308 IXGBE_TX_FLAGS_IPV4;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003309 } else if (skb_is_gso_v6(skb)) {
3310 ipv6_hdr(skb)->payload_len = 0;
3311 tcp_hdr(skb)->check =
3312 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3313 &ipv6_hdr(skb)->daddr,
3314 0, IPPROTO_TCP, 0);
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003315 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3316 IXGBE_TX_FLAGS_CSUM;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003317 }
3318
3319 /* compute header lengths */
3320 l4len = tcp_hdrlen(skb);
3321 *hdr_len += l4len;
3322 *hdr_len = skb_transport_offset(skb) + l4len;
3323
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003324 /* update GSO size and bytecount with header size */
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003325 first->gso_segs = skb_shinfo(skb)->gso_segs;
3326 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3327
Alexander Duyck70a10e22012-05-11 08:33:21 +00003328 /* mss_l4len_id: use 1 as index for TSO */
3329 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
3330 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3331 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
3332
3333 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3334 vlan_macip_lens = skb_network_header_len(skb);
3335 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003336 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003337
3338 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3339 type_tucmd, mss_l4len_idx);
3340
3341 return 1;
Greg Rose92915f72010-01-09 02:24:10 +00003342}
3343
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003344static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3345 struct ixgbevf_tx_buffer *first)
Greg Rose92915f72010-01-09 02:24:10 +00003346{
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003347 struct sk_buff *skb = first->skb;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003348 u32 vlan_macip_lens = 0;
3349 u32 mss_l4len_idx = 0;
3350 u32 type_tucmd = 0;
Greg Rose92915f72010-01-09 02:24:10 +00003351
Alexander Duyck70a10e22012-05-11 08:33:21 +00003352 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3353 u8 l4_hdr = 0;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003354
Toshiaki Makita10e4fb32015-01-29 20:37:10 +09003355 switch (first->protocol) {
Joe Perches0933ce42014-03-13 05:19:30 +00003356 case htons(ETH_P_IP):
Alexander Duyck70a10e22012-05-11 08:33:21 +00003357 vlan_macip_lens |= skb_network_header_len(skb);
3358 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3359 l4_hdr = ip_hdr(skb)->protocol;
3360 break;
Joe Perches0933ce42014-03-13 05:19:30 +00003361 case htons(ETH_P_IPV6):
Alexander Duyck70a10e22012-05-11 08:33:21 +00003362 vlan_macip_lens |= skb_network_header_len(skb);
3363 l4_hdr = ipv6_hdr(skb)->nexthdr;
3364 break;
3365 default:
3366 if (unlikely(net_ratelimit())) {
3367 dev_warn(tx_ring->dev,
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003368 "partial checksum but proto=%x!\n",
3369 first->protocol);
Greg Rose92915f72010-01-09 02:24:10 +00003370 }
Alexander Duyck70a10e22012-05-11 08:33:21 +00003371 break;
Greg Rose92915f72010-01-09 02:24:10 +00003372 }
3373
Alexander Duyck70a10e22012-05-11 08:33:21 +00003374 switch (l4_hdr) {
3375 case IPPROTO_TCP:
3376 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3377 mss_l4len_idx = tcp_hdrlen(skb) <<
3378 IXGBE_ADVTXD_L4LEN_SHIFT;
3379 break;
3380 case IPPROTO_SCTP:
3381 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3382 mss_l4len_idx = sizeof(struct sctphdr) <<
3383 IXGBE_ADVTXD_L4LEN_SHIFT;
3384 break;
3385 case IPPROTO_UDP:
3386 mss_l4len_idx = sizeof(struct udphdr) <<
3387 IXGBE_ADVTXD_L4LEN_SHIFT;
3388 break;
3389 default:
3390 if (unlikely(net_ratelimit())) {
3391 dev_warn(tx_ring->dev,
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003392 "partial checksum but l4 proto=%x!\n",
3393 l4_hdr);
Alexander Duyck70a10e22012-05-11 08:33:21 +00003394 }
3395 break;
3396 }
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003397
3398 /* update TX checksum flag */
3399 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
Greg Rose92915f72010-01-09 02:24:10 +00003400 }
3401
Alexander Duyck70a10e22012-05-11 08:33:21 +00003402 /* vlan_macip_lens: MACLEN, VLAN tag */
3403 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003404 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003405
3406 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3407 type_tucmd, mss_l4len_idx);
Greg Rose92915f72010-01-09 02:24:10 +00003408}
3409
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003410static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3411{
3412 /* set type for advanced descriptor with frame checksum insertion */
3413 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3414 IXGBE_ADVTXD_DCMD_IFCS |
3415 IXGBE_ADVTXD_DCMD_DEXT);
3416
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003417 /* set HW VLAN bit if VLAN is present */
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003418 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3419 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3420
3421 /* set segmentation enable bits for TSO/FSO */
3422 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3423 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3424
3425 return cmd_type;
3426}
3427
3428static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3429 u32 tx_flags, unsigned int paylen)
3430{
3431 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3432
3433 /* enable L4 checksum for TSO and TX checksum offload */
3434 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3435 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3436
3437 /* enble IPv4 checksum for TSO */
3438 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3439 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3440
3441 /* use index 1 context for TSO/FSO/FCOE */
3442 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3443 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
3444
3445 /* Check Context must be set if Tx switch is enabled, which it
3446 * always is for case where virtual functions are running
3447 */
3448 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3449
3450 tx_desc->read.olinfo_status = olinfo_status;
3451}
3452
3453static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3454 struct ixgbevf_tx_buffer *first,
3455 const u8 hdr_len)
Greg Rose92915f72010-01-09 02:24:10 +00003456{
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003457 dma_addr_t dma;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003458 struct sk_buff *skb = first->skb;
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003459 struct ixgbevf_tx_buffer *tx_buffer;
3460 union ixgbe_adv_tx_desc *tx_desc;
3461 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3462 unsigned int data_len = skb->data_len;
3463 unsigned int size = skb_headlen(skb);
3464 unsigned int paylen = skb->len - hdr_len;
3465 u32 tx_flags = first->tx_flags;
3466 __le32 cmd_type;
3467 u16 i = tx_ring->next_to_use;
Greg Rose92915f72010-01-09 02:24:10 +00003468
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003469 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +00003470
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003471 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3472 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
Greg Rose92915f72010-01-09 02:24:10 +00003473
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003474 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3475 if (dma_mapping_error(tx_ring->dev, dma))
3476 goto dma_error;
3477
3478 /* record length, and DMA address */
3479 dma_unmap_len_set(first, len, size);
3480 dma_unmap_addr_set(first, dma, dma);
3481
3482 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3483
3484 for (;;) {
3485 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3486 tx_desc->read.cmd_type_len =
3487 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3488
3489 i++;
3490 tx_desc++;
3491 if (i == tx_ring->count) {
3492 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3493 i = 0;
3494 }
3495
3496 dma += IXGBE_MAX_DATA_PER_TXD;
3497 size -= IXGBE_MAX_DATA_PER_TXD;
3498
3499 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3500 tx_desc->read.olinfo_status = 0;
3501 }
3502
3503 if (likely(!data_len))
3504 break;
3505
3506 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3507
3508 i++;
3509 tx_desc++;
3510 if (i == tx_ring->count) {
3511 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3512 i = 0;
3513 }
3514
3515 size = skb_frag_size(frag);
3516 data_len -= size;
3517
3518 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3519 DMA_TO_DEVICE);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003520 if (dma_mapping_error(tx_ring->dev, dma))
Greg Rose92915f72010-01-09 02:24:10 +00003521 goto dma_error;
Greg Rose92915f72010-01-09 02:24:10 +00003522
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003523 tx_buffer = &tx_ring->tx_buffer_info[i];
3524 dma_unmap_len_set(tx_buffer, len, size);
3525 dma_unmap_addr_set(tx_buffer, dma, dma);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003526
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003527 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3528 tx_desc->read.olinfo_status = 0;
3529
3530 frag++;
Greg Rose92915f72010-01-09 02:24:10 +00003531 }
3532
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003533 /* write last descriptor with RS and EOP bits */
3534 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3535 tx_desc->read.cmd_type_len = cmd_type;
Greg Rose92915f72010-01-09 02:24:10 +00003536
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003537 /* set the timestamp */
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003538 first->time_stamp = jiffies;
Greg Rose92915f72010-01-09 02:24:10 +00003539
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003540 /* Force memory writes to complete before letting h/w know there
3541 * are new descriptors to fetch. (Only applicable for weak-ordered
3542 * memory model archs, such as IA-64).
3543 *
3544 * We also need this memory barrier (wmb) to make certain all of the
3545 * status bits have been updated before next_to_watch is written.
3546 */
3547 wmb();
Greg Rose92915f72010-01-09 02:24:10 +00003548
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003549 /* set next_to_watch value indicating a packet is present */
3550 first->next_to_watch = tx_desc;
3551
3552 i++;
3553 if (i == tx_ring->count)
3554 i = 0;
3555
3556 tx_ring->next_to_use = i;
3557
3558 /* notify HW of packet */
Mark Rustad06380db2014-03-04 03:02:23 +00003559 ixgbevf_write_tail(tx_ring, i);
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003560
3561 return;
Greg Rose92915f72010-01-09 02:24:10 +00003562dma_error:
Alexander Duyck70a10e22012-05-11 08:33:21 +00003563 dev_err(tx_ring->dev, "TX DMA map failed\n");
Greg Rose92915f72010-01-09 02:24:10 +00003564
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003565 /* clear dma mappings for failed tx_buffer_info map */
3566 for (;;) {
3567 tx_buffer = &tx_ring->tx_buffer_info[i];
3568 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3569 if (tx_buffer == first)
3570 break;
3571 if (i == 0)
3572 i = tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +00003573 i--;
Greg Rose92915f72010-01-09 02:24:10 +00003574 }
3575
Greg Rose92915f72010-01-09 02:24:10 +00003576 tx_ring->next_to_use = i;
Greg Rose92915f72010-01-09 02:24:10 +00003577}
3578
Alexander Duyckfb401952012-05-11 08:33:16 +00003579static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00003580{
Alexander Duyckfb401952012-05-11 08:33:16 +00003581 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +00003582 /* Herbert's original patch had:
3583 * smp_mb__after_netif_stop_queue();
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003584 * but since that doesn't exist yet, just open code it.
3585 */
Greg Rose92915f72010-01-09 02:24:10 +00003586 smp_mb();
3587
3588 /* We need to check again in a case another CPU has just
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003589 * made room available.
3590 */
Don Skidmoref880d072013-10-23 02:17:52 +00003591 if (likely(ixgbevf_desc_unused(tx_ring) < size))
Greg Rose92915f72010-01-09 02:24:10 +00003592 return -EBUSY;
3593
3594 /* A reprieve! - use start_queue because it doesn't call schedule */
Alexander Duyckfb401952012-05-11 08:33:16 +00003595 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
Emil Tantilov095e2612014-01-17 18:30:00 -08003596 ++tx_ring->tx_stats.restart_queue;
3597
Greg Rose92915f72010-01-09 02:24:10 +00003598 return 0;
3599}
3600
Alexander Duyckfb401952012-05-11 08:33:16 +00003601static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00003602{
Don Skidmoref880d072013-10-23 02:17:52 +00003603 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
Greg Rose92915f72010-01-09 02:24:10 +00003604 return 0;
Alexander Duyckfb401952012-05-11 08:33:16 +00003605 return __ixgbevf_maybe_stop_tx(tx_ring, size);
Greg Rose92915f72010-01-09 02:24:10 +00003606}
3607
3608static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3609{
3610 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003611 struct ixgbevf_tx_buffer *first;
Greg Rose92915f72010-01-09 02:24:10 +00003612 struct ixgbevf_ring *tx_ring;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003613 int tso;
3614 u32 tx_flags = 0;
Alexander Duyck35959902012-05-11 08:32:40 +00003615 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3616#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3617 unsigned short f;
3618#endif
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003619 u8 hdr_len = 0;
Greg Rosef9d08f162012-10-02 00:50:52 +00003620 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003621
Ben Hutchings46acc462012-11-01 09:11:11 +00003622 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
Alexander Duycke7fcd542015-05-01 10:34:50 -07003623 dev_kfree_skb_any(skb);
Greg Rosef9d08f162012-10-02 00:50:52 +00003624 return NETDEV_TX_OK;
3625 }
Greg Rose92915f72010-01-09 02:24:10 +00003626
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003627 tx_ring = adapter->tx_ring[skb->queue_mapping];
Greg Rose92915f72010-01-09 02:24:10 +00003628
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003629 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
Alexander Duyck35959902012-05-11 08:32:40 +00003630 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3631 * + 2 desc gap to keep tail from touching head,
3632 * + 1 desc for context descriptor,
3633 * otherwise try next time
3634 */
3635#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3636 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3637 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3638#else
3639 count += skb_shinfo(skb)->nr_frags;
3640#endif
Alexander Duyckfb401952012-05-11 08:33:16 +00003641 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
Emil Tantilov095e2612014-01-17 18:30:00 -08003642 tx_ring->tx_stats.tx_busy++;
Alexander Duyck35959902012-05-11 08:32:40 +00003643 return NETDEV_TX_BUSY;
3644 }
3645
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003646 /* record the location of the first descriptor for this packet */
3647 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3648 first->skb = skb;
3649 first->bytecount = skb->len;
3650 first->gso_segs = 1;
3651
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003652 if (skb_vlan_tag_present(skb)) {
3653 tx_flags |= skb_vlan_tag_get(skb);
Greg Rose92915f72010-01-09 02:24:10 +00003654 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3655 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3656 }
3657
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003658 /* record initial flags and protocol */
3659 first->tx_flags = tx_flags;
3660 first->protocol = vlan_get_protocol(skb);
Greg Rose92915f72010-01-09 02:24:10 +00003661
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003662 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3663 if (tso < 0)
3664 goto out_drop;
Emil Tantilovb5d217f2014-02-27 20:32:44 -08003665 else if (!tso)
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003666 ixgbevf_tx_csum(tx_ring, first);
Greg Rose92915f72010-01-09 02:24:10 +00003667
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003668 ixgbevf_tx_map(tx_ring, first, hdr_len);
Greg Rose92915f72010-01-09 02:24:10 +00003669
Alexander Duyckfb401952012-05-11 08:33:16 +00003670 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
Greg Rose92915f72010-01-09 02:24:10 +00003671
3672 return NETDEV_TX_OK;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003673
3674out_drop:
3675 dev_kfree_skb_any(first->skb);
3676 first->skb = NULL;
3677
3678 return NETDEV_TX_OK;
Greg Rose92915f72010-01-09 02:24:10 +00003679}
3680
3681/**
Greg Rose92915f72010-01-09 02:24:10 +00003682 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3683 * @netdev: network interface device structure
3684 * @p: pointer to an address structure
3685 *
3686 * Returns 0 on success, negative on failure
3687 **/
3688static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3689{
3690 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3691 struct ixgbe_hw *hw = &adapter->hw;
3692 struct sockaddr *addr = p;
3693
3694 if (!is_valid_ether_addr(addr->sa_data))
3695 return -EADDRNOTAVAIL;
3696
Emil Tantilov91a76ba2015-10-12 10:55:51 -07003697 ether_addr_copy(netdev->dev_addr, addr->sa_data);
3698 ether_addr_copy(hw->mac.addr, addr->sa_data);
Greg Rose92915f72010-01-09 02:24:10 +00003699
John Fastabend55fdd45b2012-10-01 14:52:20 +00003700 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003701
Greg Rose92fe0bf2012-11-02 05:50:47 +00003702 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
Greg Rose92915f72010-01-09 02:24:10 +00003703
John Fastabend55fdd45b2012-10-01 14:52:20 +00003704 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003705
Greg Rose92915f72010-01-09 02:24:10 +00003706 return 0;
3707}
3708
3709/**
3710 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3711 * @netdev: network interface device structure
3712 * @new_mtu: new value for maximum frame size
3713 *
3714 * Returns 0 on success, negative on failure
3715 **/
3716static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3717{
3718 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Emil Tantilovbad17232014-11-21 02:57:15 +00003719 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00003720 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Greg Rose69bfbec2011-01-26 01:06:12 +00003721 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
Greg Rose69bfbec2011-01-26 01:06:12 +00003722
Alexander Duyck56e94092012-07-20 08:10:03 +00003723 switch (adapter->hw.api_version) {
3724 case ixgbe_mbox_api_11:
Vlad Zolotarov94cf66f2015-03-30 21:35:26 +03003725 case ixgbe_mbox_api_12:
Greg Rose69bfbec2011-01-26 01:06:12 +00003726 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
Alexander Duyck56e94092012-07-20 08:10:03 +00003727 break;
3728 default:
Emil Tantilov47068b02014-11-22 07:59:56 +00003729 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
Alexander Duyck56e94092012-07-20 08:10:03 +00003730 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3731 break;
3732 }
Greg Rose92915f72010-01-09 02:24:10 +00003733
3734 /* MTU < 68 is an error and causes problems on some kernels */
Greg Rose69bfbec2011-01-26 01:06:12 +00003735 if ((new_mtu < 68) || (max_frame > max_possible_frame))
Greg Rose92915f72010-01-09 02:24:10 +00003736 return -EINVAL;
3737
Emil Tantilovbad17232014-11-21 02:57:15 +00003738 hw_dbg(hw, "changing MTU from %d to %d\n",
Greg Rose92915f72010-01-09 02:24:10 +00003739 netdev->mtu, new_mtu);
3740 /* must set new MTU before calling down or up */
3741 netdev->mtu = new_mtu;
3742
Emil Tantilovbad17232014-11-21 02:57:15 +00003743 /* notify the PF of our intent to use this size of frame */
3744 ixgbevf_rlpml_set_vf(hw, max_frame);
Greg Rose92915f72010-01-09 02:24:10 +00003745
3746 return 0;
3747}
3748
Emil Tantilov688ff322014-11-08 01:39:56 +00003749#ifdef CONFIG_NET_POLL_CONTROLLER
3750/* Polling 'interrupt' - used by things like netconsole to send skbs
3751 * without having to re-enable interrupts. It's not called while
3752 * the interrupt routine is executing.
3753 */
3754static void ixgbevf_netpoll(struct net_device *netdev)
3755{
3756 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3757 int i;
3758
3759 /* if interface is down do nothing */
3760 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3761 return;
3762 for (i = 0; i < adapter->num_rx_queues; i++)
3763 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3764}
3765#endif /* CONFIG_NET_POLL_CONTROLLER */
3766
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003767static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
Greg Rose92915f72010-01-09 02:24:10 +00003768{
3769 struct net_device *netdev = pci_get_drvdata(pdev);
3770 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003771#ifdef CONFIG_PM
3772 int retval = 0;
3773#endif
Greg Rose92915f72010-01-09 02:24:10 +00003774
3775 netif_device_detach(netdev);
3776
3777 if (netif_running(netdev)) {
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003778 rtnl_lock();
Greg Rose92915f72010-01-09 02:24:10 +00003779 ixgbevf_down(adapter);
3780 ixgbevf_free_irq(adapter);
3781 ixgbevf_free_all_tx_resources(adapter);
3782 ixgbevf_free_all_rx_resources(adapter);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003783 rtnl_unlock();
Greg Rose92915f72010-01-09 02:24:10 +00003784 }
3785
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003786 ixgbevf_clear_interrupt_scheme(adapter);
3787
3788#ifdef CONFIG_PM
3789 retval = pci_save_state(pdev);
3790 if (retval)
3791 return retval;
3792
3793#endif
Mark Rustadbc0c7152014-03-12 00:38:45 +00003794 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3795 pci_disable_device(pdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003796
3797 return 0;
3798}
3799
3800#ifdef CONFIG_PM
3801static int ixgbevf_resume(struct pci_dev *pdev)
3802{
Wei Yongjun27ae2962014-01-16 02:30:07 -08003803 struct net_device *netdev = pci_get_drvdata(pdev);
3804 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003805 u32 err;
3806
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003807 pci_restore_state(pdev);
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003808 /* pci_restore_state clears dev->state_saved so call
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003809 * pci_save_state to restore it.
3810 */
Greg Rose92915f72010-01-09 02:24:10 +00003811 pci_save_state(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00003812
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003813 err = pci_enable_device_mem(pdev);
3814 if (err) {
3815 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3816 return err;
3817 }
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003818 smp_mb__before_atomic();
Mark Rustadbc0c7152014-03-12 00:38:45 +00003819 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003820 pci_set_master(pdev);
3821
Don Skidmore798e3812013-10-01 04:33:51 -07003822 ixgbevf_reset(adapter);
3823
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003824 rtnl_lock();
3825 err = ixgbevf_init_interrupt_scheme(adapter);
3826 rtnl_unlock();
3827 if (err) {
3828 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3829 return err;
3830 }
3831
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003832 if (netif_running(netdev)) {
3833 err = ixgbevf_open(netdev);
3834 if (err)
3835 return err;
3836 }
3837
3838 netif_device_attach(netdev);
3839
3840 return err;
3841}
3842
3843#endif /* CONFIG_PM */
3844static void ixgbevf_shutdown(struct pci_dev *pdev)
3845{
3846 ixgbevf_suspend(pdev, PMSG_SUSPEND);
Greg Rose92915f72010-01-09 02:24:10 +00003847}
3848
Eric Dumazet4197aa72011-06-22 05:01:35 +00003849static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3850 struct rtnl_link_stats64 *stats)
3851{
3852 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3853 unsigned int start;
3854 u64 bytes, packets;
3855 const struct ixgbevf_ring *ring;
3856 int i;
3857
3858 ixgbevf_update_stats(adapter);
3859
3860 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3861
3862 for (i = 0; i < adapter->num_rx_queues; i++) {
Don Skidmore87e70ab2014-01-16 02:30:08 -08003863 ring = adapter->rx_ring[i];
Eric Dumazet4197aa72011-06-22 05:01:35 +00003864 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07003865 start = u64_stats_fetch_begin_irq(&ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -08003866 bytes = ring->stats.bytes;
3867 packets = ring->stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07003868 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
Eric Dumazet4197aa72011-06-22 05:01:35 +00003869 stats->rx_bytes += bytes;
3870 stats->rx_packets += packets;
3871 }
3872
3873 for (i = 0; i < adapter->num_tx_queues; i++) {
Don Skidmore87e70ab2014-01-16 02:30:08 -08003874 ring = adapter->tx_ring[i];
Eric Dumazet4197aa72011-06-22 05:01:35 +00003875 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07003876 start = u64_stats_fetch_begin_irq(&ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -08003877 bytes = ring->stats.bytes;
3878 packets = ring->stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07003879 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
Eric Dumazet4197aa72011-06-22 05:01:35 +00003880 stats->tx_bytes += bytes;
3881 stats->tx_packets += packets;
3882 }
3883
3884 return stats;
3885}
3886
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003887static const struct net_device_ops ixgbevf_netdev_ops = {
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003888 .ndo_open = ixgbevf_open,
3889 .ndo_stop = ixgbevf_close,
3890 .ndo_start_xmit = ixgbevf_xmit_frame,
3891 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
Eric Dumazet4197aa72011-06-22 05:01:35 +00003892 .ndo_get_stats64 = ixgbevf_get_stats,
Greg Rose92915f72010-01-09 02:24:10 +00003893 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003894 .ndo_set_mac_address = ixgbevf_set_mac,
3895 .ndo_change_mtu = ixgbevf_change_mtu,
3896 .ndo_tx_timeout = ixgbevf_tx_timeout,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003897 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3898 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
Jacob Kellerc777cdf2013-09-21 06:24:20 +00003899#ifdef CONFIG_NET_RX_BUSY_POLL
3900 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3901#endif
Emil Tantilov688ff322014-11-08 01:39:56 +00003902#ifdef CONFIG_NET_POLL_CONTROLLER
3903 .ndo_poll_controller = ixgbevf_netpoll,
3904#endif
Toshiaki Makita0f903002015-08-06 17:57:31 +09003905 .ndo_features_check = passthru_features_check,
Greg Rose92915f72010-01-09 02:24:10 +00003906};
Greg Rose92915f72010-01-09 02:24:10 +00003907
3908static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3909{
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003910 dev->netdev_ops = &ixgbevf_netdev_ops;
Greg Rose92915f72010-01-09 02:24:10 +00003911 ixgbevf_set_ethtool_ops(dev);
3912 dev->watchdog_timeo = 5 * HZ;
3913}
3914
3915/**
3916 * ixgbevf_probe - Device Initialization Routine
3917 * @pdev: PCI device information struct
3918 * @ent: entry in ixgbevf_pci_tbl
3919 *
3920 * Returns 0 on success, negative on failure
3921 *
3922 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3923 * The OS initialization, configuring of the adapter private structure,
3924 * and a hardware reset occur.
3925 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003926static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Greg Rose92915f72010-01-09 02:24:10 +00003927{
3928 struct net_device *netdev;
3929 struct ixgbevf_adapter *adapter = NULL;
3930 struct ixgbe_hw *hw = NULL;
3931 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
Greg Rose92915f72010-01-09 02:24:10 +00003932 int err, pci_using_dac;
Emil Tantilov03334642014-12-05 04:32:44 +00003933 bool disable_dev = false;
Greg Rose92915f72010-01-09 02:24:10 +00003934
3935 err = pci_enable_device(pdev);
3936 if (err)
3937 return err;
3938
Russell King53567aa2013-06-10 12:49:38 +01003939 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
Greg Rose92915f72010-01-09 02:24:10 +00003940 pci_using_dac = 1;
3941 } else {
Russell King53567aa2013-06-10 12:49:38 +01003942 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Greg Rose92915f72010-01-09 02:24:10 +00003943 if (err) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003944 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
Russell King53567aa2013-06-10 12:49:38 +01003945 goto err_dma;
Greg Rose92915f72010-01-09 02:24:10 +00003946 }
3947 pci_using_dac = 0;
3948 }
3949
3950 err = pci_request_regions(pdev, ixgbevf_driver_name);
3951 if (err) {
3952 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3953 goto err_pci_reg;
3954 }
3955
3956 pci_set_master(pdev);
3957
Greg Rose92915f72010-01-09 02:24:10 +00003958 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3959 MAX_TX_QUEUES);
Greg Rose92915f72010-01-09 02:24:10 +00003960 if (!netdev) {
3961 err = -ENOMEM;
3962 goto err_alloc_etherdev;
3963 }
3964
3965 SET_NETDEV_DEV(netdev, &pdev->dev);
3966
Greg Rose92915f72010-01-09 02:24:10 +00003967 adapter = netdev_priv(netdev);
3968
3969 adapter->netdev = netdev;
3970 adapter->pdev = pdev;
3971 hw = &adapter->hw;
3972 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00003973 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Greg Rose92915f72010-01-09 02:24:10 +00003974
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003975 /* call save state here in standalone driver because it relies on
Greg Rose92915f72010-01-09 02:24:10 +00003976 * adapter struct to exist, and needs to call netdev_priv
3977 */
3978 pci_save_state(pdev);
3979
3980 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3981 pci_resource_len(pdev, 0));
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00003982 adapter->io_addr = hw->hw_addr;
Greg Rose92915f72010-01-09 02:24:10 +00003983 if (!hw->hw_addr) {
3984 err = -EIO;
3985 goto err_ioremap;
3986 }
3987
3988 ixgbevf_assign_netdev_ops(netdev);
3989
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003990 /* Setup HW API */
Greg Rose92915f72010-01-09 02:24:10 +00003991 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3992 hw->mac.type = ii->mac;
3993
3994 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
Greg Rosef416dfc2011-06-08 07:32:38 +00003995 sizeof(struct ixgbe_mbx_operations));
Greg Rose92915f72010-01-09 02:24:10 +00003996
Greg Rose92915f72010-01-09 02:24:10 +00003997 /* setup the private structure */
3998 err = ixgbevf_sw_init(adapter);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00003999 if (err)
4000 goto err_sw_init;
4001
4002 /* The HW MAC address was set and/or determined in sw_init */
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00004003 if (!is_valid_ether_addr(netdev->dev_addr)) {
4004 pr_err("invalid MAC address\n");
4005 err = -EIO;
4006 goto err_sw_init;
4007 }
Greg Rose92915f72010-01-09 02:24:10 +00004008
Michał Mirosław471a76d2011-06-08 08:53:03 +00004009 netdev->hw_features = NETIF_F_SG |
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004010 NETIF_F_IP_CSUM |
4011 NETIF_F_IPV6_CSUM |
4012 NETIF_F_TSO |
4013 NETIF_F_TSO6 |
4014 NETIF_F_RXCSUM;
Michał Mirosław471a76d2011-06-08 08:53:03 +00004015
4016 netdev->features = netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004017 NETIF_F_HW_VLAN_CTAG_TX |
4018 NETIF_F_HW_VLAN_CTAG_RX |
4019 NETIF_F_HW_VLAN_CTAG_FILTER;
Greg Rose92915f72010-01-09 02:24:10 +00004020
Emil Tantilov39f35a32015-01-28 03:21:13 +00004021 netdev->vlan_features |= NETIF_F_TSO |
4022 NETIF_F_TSO6 |
4023 NETIF_F_IP_CSUM |
4024 NETIF_F_IPV6_CSUM |
4025 NETIF_F_SG;
Greg Rose92915f72010-01-09 02:24:10 +00004026
4027 if (pci_using_dac)
4028 netdev->features |= NETIF_F_HIGHDMA;
4029
Jiri Pirko01789342011-08-16 06:29:00 +00004030 netdev->priv_flags |= IFF_UNICAST_FLT;
4031
Mark Rustadea699562014-03-12 00:38:51 +00004032 if (IXGBE_REMOVED(hw->hw_addr)) {
4033 err = -EIO;
4034 goto err_sw_init;
4035 }
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00004036
4037 setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
4038 (unsigned long)adapter);
4039
4040 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4041 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4042 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00004043
4044 err = ixgbevf_init_interrupt_scheme(adapter);
4045 if (err)
4046 goto err_sw_init;
4047
Greg Rose92915f72010-01-09 02:24:10 +00004048 strcpy(netdev->name, "eth%d");
4049
4050 err = register_netdev(netdev);
4051 if (err)
4052 goto err_register;
4053
Emil Tantilov03334642014-12-05 04:32:44 +00004054 pci_set_drvdata(pdev, netdev);
Greg Rose5d426ad2010-11-16 19:27:19 -08004055 netif_carrier_off(netdev);
4056
Greg Rose33bd9f62010-03-19 02:59:52 +00004057 ixgbevf_init_last_counter_stats(adapter);
4058
Emil Tantilov47068b02014-11-22 07:59:56 +00004059 /* print the VF info */
4060 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4061 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
Greg Rose92915f72010-01-09 02:24:10 +00004062
Emil Tantilov47068b02014-11-22 07:59:56 +00004063 switch (hw->mac.type) {
4064 case ixgbe_mac_X550_vf:
4065 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4066 break;
4067 case ixgbe_mac_X540_vf:
4068 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4069 break;
4070 case ixgbe_mac_82599_vf:
4071 default:
4072 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4073 break;
4074 }
Greg Rose92915f72010-01-09 02:24:10 +00004075
Greg Rose92915f72010-01-09 02:24:10 +00004076 return 0;
4077
4078err_register:
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004079 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00004080err_sw_init:
4081 ixgbevf_reset_interrupt_capability(adapter);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00004082 iounmap(adapter->io_addr);
Greg Rose92915f72010-01-09 02:24:10 +00004083err_ioremap:
Emil Tantilov03334642014-12-05 04:32:44 +00004084 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00004085 free_netdev(netdev);
4086err_alloc_etherdev:
4087 pci_release_regions(pdev);
4088err_pci_reg:
4089err_dma:
Emil Tantilov03334642014-12-05 04:32:44 +00004090 if (!adapter || disable_dev)
Mark Rustadbc0c7152014-03-12 00:38:45 +00004091 pci_disable_device(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00004092 return err;
4093}
4094
4095/**
4096 * ixgbevf_remove - Device Removal Routine
4097 * @pdev: PCI device information struct
4098 *
4099 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4100 * that it should release a PCI device. The could be caused by a
4101 * Hot-Plug event, or because the driver is going to be removed from
4102 * memory.
4103 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05004104static void ixgbevf_remove(struct pci_dev *pdev)
Greg Rose92915f72010-01-09 02:24:10 +00004105{
4106 struct net_device *netdev = pci_get_drvdata(pdev);
Emil Tantilov03334642014-12-05 04:32:44 +00004107 struct ixgbevf_adapter *adapter;
4108 bool disable_dev;
4109
4110 if (!netdev)
4111 return;
4112
4113 adapter = netdev_priv(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00004114
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00004115 set_bit(__IXGBEVF_REMOVING, &adapter->state);
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00004116 cancel_work_sync(&adapter->service_task);
Greg Rose92915f72010-01-09 02:24:10 +00004117
Alexander Duyckfd13a9a2012-05-11 08:32:24 +00004118 if (netdev->reg_state == NETREG_REGISTERED)
Greg Rose92915f72010-01-09 02:24:10 +00004119 unregister_netdev(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00004120
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004121 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00004122 ixgbevf_reset_interrupt_capability(adapter);
4123
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00004124 iounmap(adapter->io_addr);
Greg Rose92915f72010-01-09 02:24:10 +00004125 pci_release_regions(pdev);
4126
4127 hw_dbg(&adapter->hw, "Remove complete\n");
4128
Emil Tantilov03334642014-12-05 04:32:44 +00004129 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00004130 free_netdev(netdev);
4131
Emil Tantilov03334642014-12-05 04:32:44 +00004132 if (disable_dev)
Mark Rustadbc0c7152014-03-12 00:38:45 +00004133 pci_disable_device(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00004134}
4135
Alexander Duyck9f19f312012-05-11 08:33:32 +00004136/**
4137 * ixgbevf_io_error_detected - called when PCI error is detected
4138 * @pdev: Pointer to PCI device
4139 * @state: The current pci connection state
4140 *
4141 * This function is called after a PCI bus error affecting
4142 * this device has been detected.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004143 **/
Alexander Duyck9f19f312012-05-11 08:33:32 +00004144static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4145 pci_channel_state_t state)
4146{
4147 struct net_device *netdev = pci_get_drvdata(pdev);
4148 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4149
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00004150 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
Mark Rustadea699562014-03-12 00:38:51 +00004151 return PCI_ERS_RESULT_DISCONNECT;
4152
Mark Rustadbc0c7152014-03-12 00:38:45 +00004153 rtnl_lock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004154 netif_device_detach(netdev);
4155
Mark Rustadbc0c7152014-03-12 00:38:45 +00004156 if (state == pci_channel_io_perm_failure) {
4157 rtnl_unlock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004158 return PCI_ERS_RESULT_DISCONNECT;
Mark Rustadbc0c7152014-03-12 00:38:45 +00004159 }
Alexander Duyck9f19f312012-05-11 08:33:32 +00004160
4161 if (netif_running(netdev))
4162 ixgbevf_down(adapter);
4163
Mark Rustadbc0c7152014-03-12 00:38:45 +00004164 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4165 pci_disable_device(pdev);
4166 rtnl_unlock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004167
4168 /* Request a slot slot reset. */
4169 return PCI_ERS_RESULT_NEED_RESET;
4170}
4171
4172/**
4173 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4174 * @pdev: Pointer to PCI device
4175 *
4176 * Restart the card from scratch, as if from a cold-boot. Implementation
4177 * resembles the first-half of the ixgbevf_resume routine.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004178 **/
Alexander Duyck9f19f312012-05-11 08:33:32 +00004179static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4180{
4181 struct net_device *netdev = pci_get_drvdata(pdev);
4182 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4183
4184 if (pci_enable_device_mem(pdev)) {
4185 dev_err(&pdev->dev,
4186 "Cannot re-enable PCI device after reset.\n");
4187 return PCI_ERS_RESULT_DISCONNECT;
4188 }
4189
Peter Zijlstra4e857c52014-03-17 18:06:10 +01004190 smp_mb__before_atomic();
Mark Rustadbc0c7152014-03-12 00:38:45 +00004191 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
Alexander Duyck9f19f312012-05-11 08:33:32 +00004192 pci_set_master(pdev);
4193
4194 ixgbevf_reset(adapter);
4195
4196 return PCI_ERS_RESULT_RECOVERED;
4197}
4198
4199/**
4200 * ixgbevf_io_resume - called when traffic can start flowing again.
4201 * @pdev: Pointer to PCI device
4202 *
4203 * This callback is called when the error recovery driver tells us that
4204 * its OK to resume normal operation. Implementation resembles the
4205 * second-half of the ixgbevf_resume routine.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004206 **/
Alexander Duyck9f19f312012-05-11 08:33:32 +00004207static void ixgbevf_io_resume(struct pci_dev *pdev)
4208{
4209 struct net_device *netdev = pci_get_drvdata(pdev);
4210 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4211
4212 if (netif_running(netdev))
4213 ixgbevf_up(adapter);
4214
4215 netif_device_attach(netdev);
4216}
4217
4218/* PCI Error Recovery (ERS) */
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004219static const struct pci_error_handlers ixgbevf_err_handler = {
Alexander Duyck9f19f312012-05-11 08:33:32 +00004220 .error_detected = ixgbevf_io_error_detected,
4221 .slot_reset = ixgbevf_io_slot_reset,
4222 .resume = ixgbevf_io_resume,
4223};
4224
Greg Rose92915f72010-01-09 02:24:10 +00004225static struct pci_driver ixgbevf_driver = {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004226 .name = ixgbevf_driver_name,
4227 .id_table = ixgbevf_pci_tbl,
4228 .probe = ixgbevf_probe,
4229 .remove = ixgbevf_remove,
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004230#ifdef CONFIG_PM
4231 /* Power Management Hooks */
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004232 .suspend = ixgbevf_suspend,
4233 .resume = ixgbevf_resume,
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004234#endif
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004235 .shutdown = ixgbevf_shutdown,
4236 .err_handler = &ixgbevf_err_handler
Greg Rose92915f72010-01-09 02:24:10 +00004237};
4238
4239/**
Greg Rose65d676c2011-02-03 06:54:13 +00004240 * ixgbevf_init_module - Driver Registration Routine
Greg Rose92915f72010-01-09 02:24:10 +00004241 *
Greg Rose65d676c2011-02-03 06:54:13 +00004242 * ixgbevf_init_module is the first routine called when the driver is
Greg Rose92915f72010-01-09 02:24:10 +00004243 * loaded. All it does is register with the PCI subsystem.
4244 **/
4245static int __init ixgbevf_init_module(void)
4246{
4247 int ret;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004248
Jeff Kirsherdbd96362011-10-21 19:38:18 +00004249 pr_info("%s - version %s\n", ixgbevf_driver_string,
4250 ixgbevf_driver_version);
Greg Rose92915f72010-01-09 02:24:10 +00004251
Jeff Kirsherdbd96362011-10-21 19:38:18 +00004252 pr_info("%s\n", ixgbevf_copyright);
Greg Rose92915f72010-01-09 02:24:10 +00004253
4254 ret = pci_register_driver(&ixgbevf_driver);
4255 return ret;
4256}
4257
4258module_init(ixgbevf_init_module);
4259
4260/**
Greg Rose65d676c2011-02-03 06:54:13 +00004261 * ixgbevf_exit_module - Driver Exit Cleanup Routine
Greg Rose92915f72010-01-09 02:24:10 +00004262 *
Greg Rose65d676c2011-02-03 06:54:13 +00004263 * ixgbevf_exit_module is called just before the driver is removed
Greg Rose92915f72010-01-09 02:24:10 +00004264 * from memory.
4265 **/
4266static void __exit ixgbevf_exit_module(void)
4267{
4268 pci_unregister_driver(&ixgbevf_driver);
4269}
4270
4271#ifdef DEBUG
4272/**
Greg Rose65d676c2011-02-03 06:54:13 +00004273 * ixgbevf_get_hw_dev_name - return device name string
Greg Rose92915f72010-01-09 02:24:10 +00004274 * used by hardware layer to print debugging information
4275 **/
4276char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4277{
4278 struct ixgbevf_adapter *adapter = hw->back;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004279
Greg Rose92915f72010-01-09 02:24:10 +00004280 return adapter->netdev->name;
4281}
4282
4283#endif
4284module_exit(ixgbevf_exit_module);
4285
4286/* ixgbevf_main.c */