blob: 8574f212a67e8c39ced7ff0bd7f70ed33314e875 [file] [log] [blame]
Greg Rose92915f72010-01-09 02:24:10 +00001/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004 Copyright(c) 1999 - 2015 Intel Corporation.
Greg Rose92915f72010-01-09 02:24:10 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +000016 this program; if not, see <http://www.gnu.org/licenses/>.
Greg Rose92915f72010-01-09 02:24:10 +000017
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26
Greg Rose92915f72010-01-09 02:24:10 +000027/******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29******************************************************************************/
Jeff Kirsherdbd96362011-10-21 19:38:18 +000030
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
Greg Rose92915f72010-01-09 02:24:10 +000033#include <linux/types.h>
Jiri Pirkodadcd652011-07-21 03:25:09 +000034#include <linux/bitops.h>
Greg Rose92915f72010-01-09 02:24:10 +000035#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39#include <linux/string.h>
40#include <linux/in.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
Alexander Duyck70a10e22012-05-11 08:33:21 +000043#include <linux/sctp.h>
Greg Rose92915f72010-01-09 02:24:10 +000044#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090045#include <linux/slab.h>
Greg Rose92915f72010-01-09 02:24:10 +000046#include <net/checksum.h>
47#include <net/ip6_checksum.h>
48#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000049#include <linux/if.h>
Greg Rose92915f72010-01-09 02:24:10 +000050#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040051#include <linux/prefetch.h>
Greg Rose92915f72010-01-09 02:24:10 +000052
53#include "ixgbevf.h"
54
Stephen Hemminger3d8fe982012-01-18 22:13:34 +000055const char ixgbevf_driver_name[] = "ixgbevf";
Greg Rose92915f72010-01-09 02:24:10 +000056static const char ixgbevf_driver_string[] =
Greg Rose422e05d2011-03-12 02:01:29 +000057 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
Greg Rose92915f72010-01-09 02:24:10 +000058
Don Skidmore9f8fe732016-06-06 20:23:24 -040059#define DRV_VERSION "3.2.2-k"
Greg Rose92915f72010-01-09 02:24:10 +000060const char ixgbevf_driver_version[] = DRV_VERSION;
Greg Rose66c87bd2010-11-16 19:26:43 -080061static char ixgbevf_copyright[] =
Mark Rustad40a13e22015-10-21 17:21:15 -070062 "Copyright (c) 2009 - 2015 Intel Corporation.";
Greg Rose92915f72010-01-09 02:24:10 +000063
64static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
KY Srinivasanc6d45172016-04-19 19:17:57 -070065 [board_82599_vf] = &ixgbevf_82599_vf_info,
66 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
67 [board_X540_vf] = &ixgbevf_X540_vf_info,
68 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
69 [board_X550_vf] = &ixgbevf_X550_vf_info,
70 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
71 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
72 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
Don Skidmore1d94f982016-06-29 19:32:24 -040073 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
Greg Rose92915f72010-01-09 02:24:10 +000074};
75
76/* ixgbevf_pci_tbl - PCI Device ID Table
77 *
78 * Wildcard entries (PCI_ANY_ID) should come last
79 * Last entry must be all 0s
80 *
81 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
82 * Class, Class Mask, private data (not used) }
83 */
Benoit Taine9baa3c32014-08-08 15:56:03 +020084static const struct pci_device_id ixgbevf_pci_tbl[] = {
Stephen Hemminger39ba22b2013-02-06 02:37:04 +000085 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
KY Srinivasanc6d45172016-04-19 19:17:57 -070086 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
Stephen Hemminger39ba22b2013-02-06 02:37:04 +000087 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
KY Srinivasanc6d45172016-04-19 19:17:57 -070088 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
Emil Tantilov47068b02014-11-22 07:59:56 +000089 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
KY Srinivasanc6d45172016-04-19 19:17:57 -070090 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
Emil Tantilov47068b02014-11-22 07:59:56 +000091 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
KY Srinivasanc6d45172016-04-19 19:17:57 -070092 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
Don Skidmore1d94f982016-06-29 19:32:24 -040093 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
Greg Rose92915f72010-01-09 02:24:10 +000094 /* required last entry */
95 {0, }
96};
97MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
98
99MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
Emil Tantilovb8ce18c2014-04-05 05:39:42 +0000100MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
Greg Rose92915f72010-01-09 02:24:10 +0000101MODULE_LICENSE("GPL");
102MODULE_VERSION(DRV_VERSION);
103
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000104#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
105static int debug = -1;
106module_param(debug, int, 0);
107MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
Greg Rose92915f72010-01-09 02:24:10 +0000108
Mark Rustad40a13e22015-10-21 17:21:15 -0700109static struct workqueue_struct *ixgbevf_wq;
110
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000111static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
112{
113 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
114 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
115 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
Mark Rustad40a13e22015-10-21 17:21:15 -0700116 queue_work(ixgbevf_wq, &adapter->service_task);
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000117}
118
119static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
120{
121 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
122
123 /* flush memory to make sure state is correct before next watchdog */
124 smp_mb__before_atomic();
125 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
126}
127
Greg Rose92915f72010-01-09 02:24:10 +0000128/* forward decls */
Don Skidmore220fe052013-09-21 01:40:49 +0000129static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000130static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
Alexander Duyck56e94092012-07-20 08:10:03 +0000131static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000132
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000133static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
134{
135 struct ixgbevf_adapter *adapter = hw->back;
136
137 if (!hw->hw_addr)
138 return;
139 hw->hw_addr = NULL;
140 dev_err(&adapter->pdev->dev, "Adapter removed\n");
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000141 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
142 ixgbevf_service_event_schedule(adapter);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000143}
144
145static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
146{
147 u32 value;
148
149 /* The following check not only optimizes a bit by not
150 * performing a read on the status register when the
151 * register just read was a status register read that
152 * returned IXGBE_FAILED_READ_REG. It also blocks any
153 * potential recursion.
154 */
155 if (reg == IXGBE_VFSTATUS) {
156 ixgbevf_remove_adapter(hw);
157 return;
158 }
Mark Rustad32c74942014-03-18 07:03:35 +0000159 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000160 if (value == IXGBE_FAILED_READ_REG)
161 ixgbevf_remove_adapter(hw);
162}
163
Mark Rustad32c74942014-03-18 07:03:35 +0000164u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000165{
166 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
167 u32 value;
168
169 if (IXGBE_REMOVED(reg_addr))
170 return IXGBE_FAILED_READ_REG;
171 value = readl(reg_addr + reg);
172 if (unlikely(value == IXGBE_FAILED_READ_REG))
173 ixgbevf_check_remove(hw, reg);
174 return value;
175}
176
Ben Hutchings49ce9c22012-07-10 10:56:00 +0000177/**
Greg Rose65d676c2011-02-03 06:54:13 +0000178 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
Greg Rose92915f72010-01-09 02:24:10 +0000179 * @adapter: pointer to adapter struct
180 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
181 * @queue: queue to map the corresponding interrupt to
182 * @msix_vector: the vector to map to the corresponding queue
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000183 **/
Greg Rose92915f72010-01-09 02:24:10 +0000184static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
185 u8 queue, u8 msix_vector)
186{
187 u32 ivar, index;
188 struct ixgbe_hw *hw = &adapter->hw;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000189
Greg Rose92915f72010-01-09 02:24:10 +0000190 if (direction == -1) {
191 /* other causes */
192 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
193 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
194 ivar &= ~0xFF;
195 ivar |= msix_vector;
196 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
197 } else {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000198 /* Tx or Rx causes */
Greg Rose92915f72010-01-09 02:24:10 +0000199 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
200 index = ((16 * (queue & 1)) + (8 * direction));
201 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
202 ivar &= ~(0xFF << index);
203 ivar |= (msix_vector << index);
204 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
205 }
206}
207
Alexander Duyck70a10e22012-05-11 08:33:21 +0000208static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800209 struct ixgbevf_tx_buffer *tx_buffer)
Greg Rose92915f72010-01-09 02:24:10 +0000210{
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800211 if (tx_buffer->skb) {
212 dev_kfree_skb_any(tx_buffer->skb);
213 if (dma_unmap_len(tx_buffer, len))
Alexander Duyck70a10e22012-05-11 08:33:21 +0000214 dma_unmap_single(tx_ring->dev,
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800215 dma_unmap_addr(tx_buffer, dma),
216 dma_unmap_len(tx_buffer, len),
Nick Nunley2a1f8792010-04-27 13:10:50 +0000217 DMA_TO_DEVICE);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800218 } else if (dma_unmap_len(tx_buffer, len)) {
219 dma_unmap_page(tx_ring->dev,
220 dma_unmap_addr(tx_buffer, dma),
221 dma_unmap_len(tx_buffer, len),
222 DMA_TO_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000223 }
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800224 tx_buffer->next_to_watch = NULL;
225 tx_buffer->skb = NULL;
226 dma_unmap_len_set(tx_buffer, len, 0);
227 /* tx_buffer must be completely set up in the transmit path */
Greg Rose92915f72010-01-09 02:24:10 +0000228}
229
Emil Tantilove08400b2015-01-28 03:21:24 +0000230static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
231{
232 return ring->stats.packets;
233}
Greg Rose92915f72010-01-09 02:24:10 +0000234
Emil Tantilove08400b2015-01-28 03:21:24 +0000235static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
236{
237 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
238 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +0000239
Emil Tantilove08400b2015-01-28 03:21:24 +0000240 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
241 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
242
243 if (head != tail)
244 return (head < tail) ?
245 tail - head : (tail + ring->count - head);
246
247 return 0;
248}
249
250static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
251{
252 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
253 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
254 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
255
256 clear_check_for_tx_hang(tx_ring);
257
258 /* Check for a hung queue, but be thorough. This verifies
259 * that a transmit has been completed since the previous
260 * check AND there is at least one packet pending. The
261 * ARMED bit is set to indicate a potential hang.
262 */
263 if ((tx_done_old == tx_done) && tx_pending) {
264 /* make sure it is true for two checks in a row */
265 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
266 &tx_ring->state);
267 }
268 /* reset the countdown */
269 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
270
271 /* update completed stats and continue */
272 tx_ring->tx_stats.tx_done_old = tx_done;
273
274 return false;
275}
276
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000277static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
278{
279 /* Do the reset outside of interrupt context */
280 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
Emil Tantilovd5dd7c32015-12-17 17:32:55 -0800281 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000282 ixgbevf_service_event_schedule(adapter);
283 }
284}
285
Emil Tantilove08400b2015-01-28 03:21:24 +0000286/**
287 * ixgbevf_tx_timeout - Respond to a Tx Hang
288 * @netdev: network interface device structure
289 **/
290static void ixgbevf_tx_timeout(struct net_device *netdev)
291{
292 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
293
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000294 ixgbevf_tx_timeout_reset(adapter);
Emil Tantilove08400b2015-01-28 03:21:24 +0000295}
Greg Rose92915f72010-01-09 02:24:10 +0000296
297/**
298 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000299 * @q_vector: board private structure
Greg Rose92915f72010-01-09 02:24:10 +0000300 * @tx_ring: tx ring to clean
Alexander Duyck8220bbc2016-03-07 09:30:09 -0800301 * @napi_budget: Used to determine if we are in netpoll
Greg Rose92915f72010-01-09 02:24:10 +0000302 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000303static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
Alexander Duyck8220bbc2016-03-07 09:30:09 -0800304 struct ixgbevf_ring *tx_ring, int napi_budget)
Greg Rose92915f72010-01-09 02:24:10 +0000305{
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000306 struct ixgbevf_adapter *adapter = q_vector->adapter;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800307 struct ixgbevf_tx_buffer *tx_buffer;
308 union ixgbe_adv_tx_desc *tx_desc;
Greg Rose92915f72010-01-09 02:24:10 +0000309 unsigned int total_bytes = 0, total_packets = 0;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800310 unsigned int budget = tx_ring->count / 2;
311 unsigned int i = tx_ring->next_to_clean;
Greg Rose92915f72010-01-09 02:24:10 +0000312
Alexander Duyck10cc1bd2012-07-16 23:44:48 +0000313 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
314 return true;
315
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800316 tx_buffer = &tx_ring->tx_buffer_info[i];
317 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
318 i -= tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +0000319
Alexander Duycke757e3e2013-01-31 07:43:22 +0000320 do {
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800321 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000322
323 /* if next_to_watch is not set then there is no work pending */
324 if (!eop_desc)
325 break;
326
327 /* prevent any other reads prior to eop_desc */
328 read_barrier_depends();
329
330 /* if DD is not set pending work has not been completed */
331 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
332 break;
333
334 /* clear next_to_watch to prevent false hangs */
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800335 tx_buffer->next_to_watch = NULL;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000336
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800337 /* update the statistics for this packet */
338 total_bytes += tx_buffer->bytecount;
339 total_packets += tx_buffer->gso_segs;
Greg Rose92915f72010-01-09 02:24:10 +0000340
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800341 /* free the skb */
Alexander Duyck8220bbc2016-03-07 09:30:09 -0800342 napi_consume_skb(tx_buffer->skb, napi_budget);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800343
344 /* unmap skb header data */
345 dma_unmap_single(tx_ring->dev,
346 dma_unmap_addr(tx_buffer, dma),
347 dma_unmap_len(tx_buffer, len),
348 DMA_TO_DEVICE);
349
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800350 /* clear tx_buffer data */
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800351 tx_buffer->skb = NULL;
352 dma_unmap_len_set(tx_buffer, len, 0);
Greg Rose92915f72010-01-09 02:24:10 +0000353
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800354 /* unmap remaining buffers */
355 while (tx_desc != eop_desc) {
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800356 tx_buffer++;
357 tx_desc++;
Greg Rose92915f72010-01-09 02:24:10 +0000358 i++;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800359 if (unlikely(!i)) {
360 i -= tx_ring->count;
361 tx_buffer = tx_ring->tx_buffer_info;
362 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
363 }
Alexander Duycke757e3e2013-01-31 07:43:22 +0000364
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800365 /* unmap any remaining paged data */
366 if (dma_unmap_len(tx_buffer, len)) {
367 dma_unmap_page(tx_ring->dev,
368 dma_unmap_addr(tx_buffer, dma),
369 dma_unmap_len(tx_buffer, len),
370 DMA_TO_DEVICE);
371 dma_unmap_len_set(tx_buffer, len, 0);
372 }
Greg Rose92915f72010-01-09 02:24:10 +0000373 }
374
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800375 /* move us one more past the eop_desc for start of next pkt */
376 tx_buffer++;
377 tx_desc++;
378 i++;
379 if (unlikely(!i)) {
380 i -= tx_ring->count;
381 tx_buffer = tx_ring->tx_buffer_info;
382 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
383 }
384
385 /* issue prefetch for next Tx descriptor */
386 prefetch(tx_desc);
387
388 /* update budget accounting */
389 budget--;
390 } while (likely(budget));
391
392 i += tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +0000393 tx_ring->next_to_clean = i;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000394 u64_stats_update_begin(&tx_ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -0800395 tx_ring->stats.bytes += total_bytes;
396 tx_ring->stats.packets += total_packets;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000397 u64_stats_update_end(&tx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +0000398 q_vector->tx.total_bytes += total_bytes;
399 q_vector->tx.total_packets += total_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000400
Emil Tantilove08400b2015-01-28 03:21:24 +0000401 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
402 struct ixgbe_hw *hw = &adapter->hw;
403 union ixgbe_adv_tx_desc *eop_desc;
404
405 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
406
407 pr_err("Detected Tx Unit Hang\n"
408 " Tx Queue <%d>\n"
409 " TDH, TDT <%x>, <%x>\n"
410 " next_to_use <%x>\n"
411 " next_to_clean <%x>\n"
412 "tx_buffer_info[next_to_clean]\n"
413 " next_to_watch <%p>\n"
414 " eop_desc->wb.status <%x>\n"
415 " time_stamp <%lx>\n"
416 " jiffies <%lx>\n",
417 tx_ring->queue_index,
418 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
419 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
420 tx_ring->next_to_use, i,
421 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
422 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
423
424 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
425
426 /* schedule immediate reset if we believe we hung */
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000427 ixgbevf_tx_timeout_reset(adapter);
Emil Tantilove08400b2015-01-28 03:21:24 +0000428
429 return true;
430 }
431
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800432#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
433 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
434 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
435 /* Make sure that anybody stopping the queue after this
436 * sees the new next_to_clean.
437 */
438 smp_mb();
439
440 if (__netif_subqueue_stopped(tx_ring->netdev,
441 tx_ring->queue_index) &&
442 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
443 netif_wake_subqueue(tx_ring->netdev,
444 tx_ring->queue_index);
445 ++tx_ring->tx_stats.restart_queue;
446 }
447 }
448
449 return !!budget;
Greg Rose92915f72010-01-09 02:24:10 +0000450}
451
452/**
Jacob Keller08681612013-09-21 06:24:09 +0000453 * ixgbevf_rx_skb - Helper function to determine proper Rx method
454 * @q_vector: structure containing interrupt and ring information
455 * @skb: packet to send up
Jacob Keller08681612013-09-21 06:24:09 +0000456 **/
457static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
Emil Tantilovdff80522014-11-08 01:39:25 +0000458 struct sk_buff *skb)
Jacob Keller08681612013-09-21 06:24:09 +0000459{
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000460#ifdef CONFIG_NET_RX_BUSY_POLL
461 skb_mark_napi_id(skb, &q_vector->napi);
462
463 if (ixgbevf_qv_busy_polling(q_vector)) {
464 netif_receive_skb(skb);
465 /* exit early if we busy polled */
466 return;
467 }
468#endif /* CONFIG_NET_RX_BUSY_POLL */
Emil Tantilov688ff322014-11-08 01:39:56 +0000469
470 napi_gro_receive(&q_vector->napi, skb);
Jacob Keller08681612013-09-21 06:24:09 +0000471}
472
Fan Du1e1429d2015-04-29 10:57:40 +0800473#define IXGBE_RSS_L4_TYPES_MASK \
474 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
475 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
476 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
477 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
478
479static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
480 union ixgbe_adv_rx_desc *rx_desc,
481 struct sk_buff *skb)
482{
483 u16 rss_type;
484
485 if (!(ring->netdev->features & NETIF_F_RXHASH))
486 return;
487
488 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
489 IXGBE_RXDADV_RSSTYPE_MASK;
490
491 if (!rss_type)
492 return;
493
494 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
495 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
496 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
497}
498
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000499/**
500 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
Emil Tantilovec62fe22014-11-08 01:39:20 +0000501 * @ring: structure containig ring specific data
502 * @rx_desc: current Rx descriptor being processed
Greg Rose92915f72010-01-09 02:24:10 +0000503 * @skb: skb currently being received and modified
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000504 **/
Greg Rose55fb2772012-11-06 05:53:32 +0000505static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
Emil Tantilovec62fe22014-11-08 01:39:20 +0000506 union ixgbe_adv_rx_desc *rx_desc,
507 struct sk_buff *skb)
Greg Rose92915f72010-01-09 02:24:10 +0000508{
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700509 skb_checksum_none_assert(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000510
511 /* Rx csum disabled */
Alexander Duyckfb401952012-05-11 08:33:16 +0000512 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Greg Rose92915f72010-01-09 02:24:10 +0000513 return;
514
515 /* if IP and error */
Emil Tantilovec62fe22014-11-08 01:39:20 +0000516 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
517 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
Emil Tantilov095e2612014-01-17 18:30:00 -0800518 ring->rx_stats.csum_err++;
Greg Rose92915f72010-01-09 02:24:10 +0000519 return;
520 }
521
Emil Tantilovec62fe22014-11-08 01:39:20 +0000522 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
Greg Rose92915f72010-01-09 02:24:10 +0000523 return;
524
Emil Tantilovec62fe22014-11-08 01:39:20 +0000525 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
Emil Tantilov095e2612014-01-17 18:30:00 -0800526 ring->rx_stats.csum_err++;
Greg Rose92915f72010-01-09 02:24:10 +0000527 return;
528 }
529
530 /* It must be a TCP or UDP packet with a valid checksum */
531 skb->ip_summed = CHECKSUM_UNNECESSARY;
Greg Rose92915f72010-01-09 02:24:10 +0000532}
533
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000534/**
535 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
Emil Tantilovdff80522014-11-08 01:39:25 +0000536 * @rx_ring: rx descriptor ring packet is being transacted on
537 * @rx_desc: pointer to the EOP Rx descriptor
538 * @skb: pointer to current skb being populated
539 *
540 * This function checks the ring, descriptor, and packet information in
541 * order to populate the checksum, VLAN, protocol, and other fields within
542 * the skb.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000543 **/
Emil Tantilovdff80522014-11-08 01:39:25 +0000544static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
545 union ixgbe_adv_rx_desc *rx_desc,
546 struct sk_buff *skb)
547{
Fan Du1e1429d2015-04-29 10:57:40 +0800548 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
Emil Tantilovdff80522014-11-08 01:39:25 +0000549 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
550
551 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
552 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
553 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
554
555 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
556 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
557 }
558
559 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
560}
561
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000562/**
563 * ixgbevf_is_non_eop - process handling of non-EOP buffers
564 * @rx_ring: Rx ring being processed
565 * @rx_desc: Rx descriptor for current buffer
566 * @skb: current socket buffer containing buffer in progress
567 *
568 * This function updates next to clean. If the buffer is an EOP buffer
569 * this function exits returning false, otherwise it will place the
570 * sk_buff in the next buffer to be chained and return true indicating
571 * that this is in fact a non-EOP buffer.
572 **/
573static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
Emil Tantilovbad17232014-11-21 02:57:15 +0000574 union ixgbe_adv_rx_desc *rx_desc)
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000575{
576 u32 ntc = rx_ring->next_to_clean + 1;
577
578 /* fetch, update, and store next to clean */
579 ntc = (ntc < rx_ring->count) ? ntc : 0;
580 rx_ring->next_to_clean = ntc;
581
582 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
583
584 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
585 return false;
586
587 return true;
588}
589
Emil Tantilovbad17232014-11-21 02:57:15 +0000590static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
591 struct ixgbevf_rx_buffer *bi)
Emil Tantilovbafa5782014-11-08 01:39:15 +0000592{
Emil Tantilovbad17232014-11-21 02:57:15 +0000593 struct page *page = bi->page;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000594 dma_addr_t dma = bi->dma;
595
Emil Tantilovbad17232014-11-21 02:57:15 +0000596 /* since we are recycling buffers we should seldom need to alloc */
597 if (likely(page))
Emil Tantilovbafa5782014-11-08 01:39:15 +0000598 return true;
599
Emil Tantilovbad17232014-11-21 02:57:15 +0000600 /* alloc new page for storage */
601 page = dev_alloc_page();
602 if (unlikely(!page)) {
603 rx_ring->rx_stats.alloc_rx_page_failed++;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000604 return false;
605 }
606
Emil Tantilovbad17232014-11-21 02:57:15 +0000607 /* map page for use */
608 dma = dma_map_page(rx_ring->dev, page, 0,
609 PAGE_SIZE, DMA_FROM_DEVICE);
Emil Tantilovbafa5782014-11-08 01:39:15 +0000610
611 /* if mapping failed free memory back to system since
612 * there isn't much point in holding memory we can't use
613 */
614 if (dma_mapping_error(rx_ring->dev, dma)) {
Emil Tantilovbad17232014-11-21 02:57:15 +0000615 __free_page(page);
Emil Tantilovbafa5782014-11-08 01:39:15 +0000616
617 rx_ring->rx_stats.alloc_rx_buff_failed++;
618 return false;
619 }
620
Emil Tantilovbafa5782014-11-08 01:39:15 +0000621 bi->dma = dma;
Emil Tantilovbad17232014-11-21 02:57:15 +0000622 bi->page = page;
623 bi->page_offset = 0;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000624
625 return true;
626}
627
Greg Rose92915f72010-01-09 02:24:10 +0000628/**
629 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
Emil Tantilov095e2612014-01-17 18:30:00 -0800630 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
Emil Tantilovbafa5782014-11-08 01:39:15 +0000631 * @cleaned_count: number of buffers to replace
Greg Rose92915f72010-01-09 02:24:10 +0000632 **/
Emil Tantilov095e2612014-01-17 18:30:00 -0800633static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
Emil Tantilovbafa5782014-11-08 01:39:15 +0000634 u16 cleaned_count)
Greg Rose92915f72010-01-09 02:24:10 +0000635{
Greg Rose92915f72010-01-09 02:24:10 +0000636 union ixgbe_adv_rx_desc *rx_desc;
637 struct ixgbevf_rx_buffer *bi;
Alexander Duyckfb401952012-05-11 08:33:16 +0000638 unsigned int i = rx_ring->next_to_use;
Greg Rose92915f72010-01-09 02:24:10 +0000639
Emil Tantilovbafa5782014-11-08 01:39:15 +0000640 /* nothing to do or no valid netdev defined */
641 if (!cleaned_count || !rx_ring->netdev)
642 return;
Greg Roseb9dd2452012-11-02 05:50:21 +0000643
Emil Tantilovbafa5782014-11-08 01:39:15 +0000644 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
645 bi = &rx_ring->rx_buffer_info[i];
646 i -= rx_ring->count;
Greg Roseb9dd2452012-11-02 05:50:21 +0000647
Emil Tantilovbafa5782014-11-08 01:39:15 +0000648 do {
Emil Tantilovbad17232014-11-21 02:57:15 +0000649 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
Emil Tantilovbafa5782014-11-08 01:39:15 +0000650 break;
Emil Tantilov05d063a2014-01-17 18:29:59 -0800651
Emil Tantilovbafa5782014-11-08 01:39:15 +0000652 /* Refresh the desc even if pkt_addr didn't change
653 * because each write-back erases this info.
654 */
Emil Tantilovbad17232014-11-21 02:57:15 +0000655 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Greg Rose92915f72010-01-09 02:24:10 +0000656
Emil Tantilovbafa5782014-11-08 01:39:15 +0000657 rx_desc++;
658 bi++;
Greg Rose92915f72010-01-09 02:24:10 +0000659 i++;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000660 if (unlikely(!i)) {
661 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
662 bi = rx_ring->rx_buffer_info;
663 i -= rx_ring->count;
664 }
Greg Rose92915f72010-01-09 02:24:10 +0000665
Emil Tantilovbafa5782014-11-08 01:39:15 +0000666 /* clear the hdr_addr for the next_to_use descriptor */
667 rx_desc->read.hdr_addr = 0;
668
669 cleaned_count--;
670 } while (cleaned_count);
671
672 i += rx_ring->count;
673
674 if (rx_ring->next_to_use != i) {
675 /* record the next descriptor to use */
676 rx_ring->next_to_use = i;
677
Emil Tantilovbad17232014-11-21 02:57:15 +0000678 /* update next to alloc since we have filled the ring */
679 rx_ring->next_to_alloc = i;
680
Emil Tantilovbafa5782014-11-08 01:39:15 +0000681 /* Force memory writes to complete before letting h/w
682 * know there are new descriptors to fetch. (Only
683 * applicable for weak-ordered memory model archs,
684 * such as IA-64).
685 */
686 wmb();
687 ixgbevf_write_tail(rx_ring, i);
688 }
Greg Rose92915f72010-01-09 02:24:10 +0000689}
690
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000691/**
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000692 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
Emil Tantilovbad17232014-11-21 02:57:15 +0000693 * @rx_ring: rx descriptor ring packet is being transacted on
694 * @rx_desc: pointer to the EOP Rx descriptor
695 * @skb: pointer to current skb being fixed
696 *
697 * Check for corrupted packet headers caused by senders on the local L2
698 * embedded NIC switch not setting up their Tx Descriptors right. These
699 * should be very rare.
700 *
701 * Also address the case where we are pulling data in on pages only
702 * and as such no data is present in the skb header.
703 *
704 * In addition if skb is not at least 60 bytes we need to pad it so that
705 * it is large enough to qualify as a valid Ethernet frame.
706 *
707 * Returns true if an error was encountered and skb was freed.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000708 **/
Emil Tantilovbad17232014-11-21 02:57:15 +0000709static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
710 union ixgbe_adv_rx_desc *rx_desc,
711 struct sk_buff *skb)
712{
713 /* verify that the packet does not have any known errors */
714 if (unlikely(ixgbevf_test_staterr(rx_desc,
715 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
716 struct net_device *netdev = rx_ring->netdev;
717
718 if (!(netdev->features & NETIF_F_RXALL)) {
719 dev_kfree_skb_any(skb);
720 return true;
721 }
722 }
723
Alexander Duycka94d9e22014-12-03 08:17:39 -0800724 /* if eth_skb_pad returns an error the skb was freed */
725 if (eth_skb_pad(skb))
726 return true;
Emil Tantilovbad17232014-11-21 02:57:15 +0000727
728 return false;
729}
730
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000731/**
732 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
Emil Tantilovbad17232014-11-21 02:57:15 +0000733 * @rx_ring: rx descriptor ring to store buffers on
734 * @old_buff: donor buffer to have page reused
735 *
736 * Synchronizes page for reuse by the adapter
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000737 **/
Emil Tantilovbad17232014-11-21 02:57:15 +0000738static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
739 struct ixgbevf_rx_buffer *old_buff)
740{
741 struct ixgbevf_rx_buffer *new_buff;
742 u16 nta = rx_ring->next_to_alloc;
743
744 new_buff = &rx_ring->rx_buffer_info[nta];
745
746 /* update, and store next to alloc */
747 nta++;
748 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
749
750 /* transfer page from old buffer to new buffer */
751 new_buff->page = old_buff->page;
752 new_buff->dma = old_buff->dma;
753 new_buff->page_offset = old_buff->page_offset;
754
755 /* sync the buffer for use by the device */
756 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
757 new_buff->page_offset,
758 IXGBEVF_RX_BUFSZ,
759 DMA_FROM_DEVICE);
760}
761
762static inline bool ixgbevf_page_is_reserved(struct page *page)
763{
Michal Hocko2f064f32015-08-21 14:11:51 -0700764 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
Emil Tantilovbad17232014-11-21 02:57:15 +0000765}
766
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000767/**
768 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
Emil Tantilovbad17232014-11-21 02:57:15 +0000769 * @rx_ring: rx descriptor ring to transact packets on
770 * @rx_buffer: buffer containing page to add
771 * @rx_desc: descriptor containing length of buffer written by hardware
772 * @skb: sk_buff to place the data into
773 *
774 * This function will add the data contained in rx_buffer->page to the skb.
775 * This is done either through a direct copy if the data in the buffer is
776 * less than the skb header size, otherwise it will just attach the page as
777 * a frag to the skb.
778 *
779 * The function will then update the page offset if necessary and return
780 * true if the buffer can be reused by the adapter.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000781 **/
Emil Tantilovbad17232014-11-21 02:57:15 +0000782static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
783 struct ixgbevf_rx_buffer *rx_buffer,
784 union ixgbe_adv_rx_desc *rx_desc,
785 struct sk_buff *skb)
786{
787 struct page *page = rx_buffer->page;
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700788 unsigned char *va = page_address(page) + rx_buffer->page_offset;
Emil Tantilovbad17232014-11-21 02:57:15 +0000789 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
790#if (PAGE_SIZE < 8192)
791 unsigned int truesize = IXGBEVF_RX_BUFSZ;
792#else
793 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
794#endif
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700795 unsigned int pull_len;
Emil Tantilovbad17232014-11-21 02:57:15 +0000796
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700797 if (unlikely(skb_is_nonlinear(skb)))
798 goto add_tail_frag;
Emil Tantilovbad17232014-11-21 02:57:15 +0000799
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700800 if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
Emil Tantilovbad17232014-11-21 02:57:15 +0000801 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
802
803 /* page is not reserved, we can reuse buffer as is */
804 if (likely(!ixgbevf_page_is_reserved(page)))
805 return true;
806
807 /* this page cannot be reused so discard it */
808 put_page(page);
809 return false;
810 }
811
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700812 /* we need the header to contain the greater of either ETH_HLEN or
813 * 60 bytes if the skb->len is less than 60 for skb_pad.
814 */
815 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
816
817 /* align pull length to size of long to optimize memcpy performance */
818 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
819
820 /* update all of the pointers */
821 va += pull_len;
822 size -= pull_len;
823
824add_tail_frag:
Emil Tantilovbad17232014-11-21 02:57:15 +0000825 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
Alexander Duyck5505bdb2015-04-22 21:49:32 -0700826 (unsigned long)va & ~PAGE_MASK, size, truesize);
Emil Tantilovbad17232014-11-21 02:57:15 +0000827
828 /* avoid re-using remote pages */
829 if (unlikely(ixgbevf_page_is_reserved(page)))
830 return false;
831
832#if (PAGE_SIZE < 8192)
833 /* if we are only owner of page we can reuse it */
834 if (unlikely(page_count(page) != 1))
835 return false;
836
837 /* flip page offset to other buffer */
838 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
839
840#else
841 /* move offset up to the next cache line */
842 rx_buffer->page_offset += truesize;
843
844 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
845 return false;
846
847#endif
848 /* Even if we own the page, we are not allowed to use atomic_set()
849 * This would break get_page_unless_zero() users.
850 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700851 page_ref_inc(page);
Emil Tantilovbad17232014-11-21 02:57:15 +0000852
853 return true;
854}
855
856static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
857 union ixgbe_adv_rx_desc *rx_desc,
858 struct sk_buff *skb)
859{
860 struct ixgbevf_rx_buffer *rx_buffer;
861 struct page *page;
862
863 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
864 page = rx_buffer->page;
865 prefetchw(page);
866
867 if (likely(!skb)) {
868 void *page_addr = page_address(page) +
869 rx_buffer->page_offset;
870
871 /* prefetch first cache line of first page */
872 prefetch(page_addr);
873#if L1_CACHE_BYTES < 128
874 prefetch(page_addr + L1_CACHE_BYTES);
875#endif
876
877 /* allocate a skb to store the frags */
878 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
879 IXGBEVF_RX_HDR_SIZE);
880 if (unlikely(!skb)) {
881 rx_ring->rx_stats.alloc_rx_buff_failed++;
882 return NULL;
883 }
884
885 /* we will be copying header into skb->data in
886 * pskb_may_pull so it is in our interest to prefetch
887 * it now to avoid a possible cache miss
888 */
889 prefetchw(skb->data);
890 }
891
892 /* we are reusing so sync this buffer for CPU use */
893 dma_sync_single_range_for_cpu(rx_ring->dev,
894 rx_buffer->dma,
895 rx_buffer->page_offset,
896 IXGBEVF_RX_BUFSZ,
897 DMA_FROM_DEVICE);
898
899 /* pull page into skb */
900 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
901 /* hand second half of page back to the ring */
902 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
903 } else {
904 /* we are not reusing the buffer so unmap it */
905 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
906 PAGE_SIZE, DMA_FROM_DEVICE);
907 }
908
909 /* clear contents of buffer_info */
910 rx_buffer->dma = 0;
911 rx_buffer->page = NULL;
912
913 return skb;
914}
915
Greg Rose92915f72010-01-09 02:24:10 +0000916static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000917 u32 qmask)
Greg Rose92915f72010-01-09 02:24:10 +0000918{
Greg Rose92915f72010-01-09 02:24:10 +0000919 struct ixgbe_hw *hw = &adapter->hw;
920
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000921 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
Greg Rose92915f72010-01-09 02:24:10 +0000922}
923
Jacob Keller08e50a22013-09-21 06:24:14 +0000924static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
925 struct ixgbevf_ring *rx_ring,
926 int budget)
Greg Rose92915f72010-01-09 02:24:10 +0000927{
Greg Rose92915f72010-01-09 02:24:10 +0000928 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000929 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
Emil Tantilovbad17232014-11-21 02:57:15 +0000930 struct sk_buff *skb = rx_ring->skb;
Greg Rose92915f72010-01-09 02:24:10 +0000931
Emil Tantilov66224022014-11-08 01:39:51 +0000932 while (likely(total_rx_packets < budget)) {
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000933 union ixgbe_adv_rx_desc *rx_desc;
Emil Tantilovb97fe3b2014-11-08 01:39:30 +0000934
Emil Tantilov0579eef2014-11-08 01:39:35 +0000935 /* return some buffers to hardware, one at a time is too slow */
936 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
937 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
938 cleaned_count = 0;
939 }
940
Emil Tantilovbad17232014-11-21 02:57:15 +0000941 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
Emil Tantilov0579eef2014-11-08 01:39:35 +0000942
943 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
Greg Rose92915f72010-01-09 02:24:10 +0000944 break;
Greg Rose92915f72010-01-09 02:24:10 +0000945
Emil Tantilov0579eef2014-11-08 01:39:35 +0000946 /* This memory barrier is needed to keep us from reading
947 * any other fields out of the rx_desc until we know the
948 * RXD_STAT_DD bit is set
949 */
950 rmb();
Emil Tantilovec62fe22014-11-08 01:39:20 +0000951
Emil Tantilovbad17232014-11-21 02:57:15 +0000952 /* retrieve a buffer from the ring */
953 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000954
Emil Tantilovbad17232014-11-21 02:57:15 +0000955 /* exit if we failed to retrieve a buffer */
956 if (!skb)
957 break;
Greg Rose92915f72010-01-09 02:24:10 +0000958
Emil Tantilovb97fe3b2014-11-08 01:39:30 +0000959 cleaned_count++;
960
Emil Tantilovbad17232014-11-21 02:57:15 +0000961 /* fetch next buffer in frame if non-eop */
962 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
Emil Tantilov0579eef2014-11-08 01:39:35 +0000963 continue;
Greg Rose92915f72010-01-09 02:24:10 +0000964
Emil Tantilovbad17232014-11-21 02:57:15 +0000965 /* verify the packet layout is correct */
966 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
967 skb = NULL;
Emil Tantilov0579eef2014-11-08 01:39:35 +0000968 continue;
Greg Rose92915f72010-01-09 02:24:10 +0000969 }
970
Greg Rose92915f72010-01-09 02:24:10 +0000971 /* probably a little skewed due to removing CRC */
972 total_rx_bytes += skb->len;
Greg Rose92915f72010-01-09 02:24:10 +0000973
John Fastabend815cccb2012-10-24 08:13:09 +0000974 /* Workaround hardware that can't do proper VEPA multicast
975 * source pruning.
976 */
Florian Fainellibd9d5592014-02-28 15:46:49 -0800977 if ((skb->pkt_type == PACKET_BROADCAST ||
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +0000978 skb->pkt_type == PACKET_MULTICAST) &&
Emil Tantilov095e2612014-01-17 18:30:00 -0800979 ether_addr_equal(rx_ring->netdev->dev_addr,
Joe Perches7367d0b2013-09-01 11:51:23 -0700980 eth_hdr(skb)->h_source)) {
John Fastabend815cccb2012-10-24 08:13:09 +0000981 dev_kfree_skb_irq(skb);
Emil Tantilov0579eef2014-11-08 01:39:35 +0000982 continue;
John Fastabend815cccb2012-10-24 08:13:09 +0000983 }
984
Emil Tantilovdff80522014-11-08 01:39:25 +0000985 /* populate checksum, VLAN, and protocol */
986 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
987
988 ixgbevf_rx_skb(q_vector, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000989
Emil Tantilovbad17232014-11-21 02:57:15 +0000990 /* reset skb pointer */
991 skb = NULL;
992
Emil Tantilov0579eef2014-11-08 01:39:35 +0000993 /* update budget accounting */
Emil Tantilov66224022014-11-08 01:39:51 +0000994 total_rx_packets++;
995 }
Greg Rose92915f72010-01-09 02:24:10 +0000996
Emil Tantilovbad17232014-11-21 02:57:15 +0000997 /* place incomplete frames back on ring for completion */
998 rx_ring->skb = skb;
999
Eric Dumazet4197aa72011-06-22 05:01:35 +00001000 u64_stats_update_begin(&rx_ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -08001001 rx_ring->stats.packets += total_rx_packets;
1002 rx_ring->stats.bytes += total_rx_bytes;
Eric Dumazet4197aa72011-06-22 05:01:35 +00001003 u64_stats_update_end(&rx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +00001004 q_vector->rx.total_packets += total_rx_packets;
1005 q_vector->rx.total_bytes += total_rx_bytes;
Greg Rose92915f72010-01-09 02:24:10 +00001006
Jacob Keller08e50a22013-09-21 06:24:14 +00001007 return total_rx_packets;
Greg Rose92915f72010-01-09 02:24:10 +00001008}
1009
1010/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001011 * ixgbevf_poll - NAPI polling calback
Greg Rose92915f72010-01-09 02:24:10 +00001012 * @napi: napi struct with our devices info in it
1013 * @budget: amount of work driver is allowed to do this pass, in packets
1014 *
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001015 * This function will clean more than one or more rings associated with a
Greg Rose92915f72010-01-09 02:24:10 +00001016 * q_vector.
1017 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001018static int ixgbevf_poll(struct napi_struct *napi, int budget)
Greg Rose92915f72010-01-09 02:24:10 +00001019{
1020 struct ixgbevf_q_vector *q_vector =
1021 container_of(napi, struct ixgbevf_q_vector, napi);
1022 struct ixgbevf_adapter *adapter = q_vector->adapter;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001023 struct ixgbevf_ring *ring;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001024 int per_ring_budget, work_done = 0;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001025 bool clean_complete = true;
1026
Alexander Duyck8220bbc2016-03-07 09:30:09 -08001027 ixgbevf_for_each_ring(ring, q_vector->tx) {
1028 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1029 clean_complete = false;
1030 }
Greg Rose92915f72010-01-09 02:24:10 +00001031
William Dauchyd0f71af2015-10-30 18:16:30 +01001032 if (budget <= 0)
1033 return budget;
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001034#ifdef CONFIG_NET_RX_BUSY_POLL
1035 if (!ixgbevf_qv_lock_napi(q_vector))
1036 return budget;
1037#endif
1038
Greg Rose92915f72010-01-09 02:24:10 +00001039 /* attempt to distribute budget to each queue fairly, but don't allow
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001040 * the budget to go below 1 because we'll exit polling
1041 */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001042 if (q_vector->rx.count > 1)
1043 per_ring_budget = max(budget/q_vector->rx.count, 1);
1044 else
1045 per_ring_budget = budget;
Greg Rose92915f72010-01-09 02:24:10 +00001046
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001047 ixgbevf_for_each_ring(ring, q_vector->rx) {
1048 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1049 per_ring_budget);
1050 work_done += cleaned;
Alexander Duyck8220bbc2016-03-07 09:30:09 -08001051 if (cleaned >= per_ring_budget)
1052 clean_complete = false;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001053 }
Greg Rose92915f72010-01-09 02:24:10 +00001054
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001055#ifdef CONFIG_NET_RX_BUSY_POLL
1056 ixgbevf_qv_unlock_napi(q_vector);
1057#endif
1058
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001059 /* If all work not completed, return budget and keep polling */
1060 if (!clean_complete)
1061 return budget;
1062 /* all work done, exit the polling mode */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001063 napi_complete_done(napi, work_done);
Emil Tantilov9ad3d6f2015-11-04 16:02:21 -08001064 if (adapter->rx_itr_setting == 1)
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001065 ixgbevf_set_itr(q_vector);
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00001066 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1067 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001068 ixgbevf_irq_enable_queues(adapter,
Jacob Keller8d055cc2016-04-13 16:08:24 -07001069 BIT(q_vector->v_idx));
Greg Rose92915f72010-01-09 02:24:10 +00001070
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001071 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00001072}
1073
Greg Rosece422602012-05-22 02:17:49 +00001074/**
1075 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1076 * @q_vector: structure containing interrupt and ring information
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001077 **/
Jacob Keller38496232013-10-22 06:19:18 +00001078void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
Greg Rosece422602012-05-22 02:17:49 +00001079{
1080 struct ixgbevf_adapter *adapter = q_vector->adapter;
1081 struct ixgbe_hw *hw = &adapter->hw;
1082 int v_idx = q_vector->v_idx;
1083 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1084
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001085 /* set the WDIS bit to not clear the timer bits and cause an
Greg Rosece422602012-05-22 02:17:49 +00001086 * immediate assertion of the interrupt
1087 */
1088 itr_reg |= IXGBE_EITR_CNT_WDIS;
1089
1090 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1091}
Greg Rose92915f72010-01-09 02:24:10 +00001092
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001093#ifdef CONFIG_NET_RX_BUSY_POLL
1094/* must be called with local_bh_disable()d */
1095static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
1096{
1097 struct ixgbevf_q_vector *q_vector =
1098 container_of(napi, struct ixgbevf_q_vector, napi);
1099 struct ixgbevf_adapter *adapter = q_vector->adapter;
1100 struct ixgbevf_ring *ring;
1101 int found = 0;
1102
1103 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
1104 return LL_FLUSH_FAILED;
1105
1106 if (!ixgbevf_qv_lock_poll(q_vector))
1107 return LL_FLUSH_BUSY;
1108
1109 ixgbevf_for_each_ring(ring, q_vector->rx) {
1110 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
Jacob Keller3b5dca22013-09-21 06:24:25 +00001111#ifdef BP_EXTENDED_STATS
1112 if (found)
Emil Tantilov095e2612014-01-17 18:30:00 -08001113 ring->stats.cleaned += found;
Jacob Keller3b5dca22013-09-21 06:24:25 +00001114 else
Emil Tantilov095e2612014-01-17 18:30:00 -08001115 ring->stats.misses++;
Jacob Keller3b5dca22013-09-21 06:24:25 +00001116#endif
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001117 if (found)
1118 break;
1119 }
1120
1121 ixgbevf_qv_unlock_poll(q_vector);
1122
1123 return found;
1124}
1125#endif /* CONFIG_NET_RX_BUSY_POLL */
1126
Greg Rose92915f72010-01-09 02:24:10 +00001127/**
1128 * ixgbevf_configure_msix - Configure MSI-X hardware
1129 * @adapter: board private structure
1130 *
1131 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1132 * interrupts.
1133 **/
1134static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1135{
1136 struct ixgbevf_q_vector *q_vector;
Alexander Duyck6b43c442012-05-11 08:32:45 +00001137 int q_vectors, v_idx;
Greg Rose92915f72010-01-09 02:24:10 +00001138
1139 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001140 adapter->eims_enable_mask = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001141
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001142 /* Populate the IVAR table and set the ITR values to the
Greg Rose92915f72010-01-09 02:24:10 +00001143 * corresponding register.
1144 */
1145 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck6b43c442012-05-11 08:32:45 +00001146 struct ixgbevf_ring *ring;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001147
Greg Rose92915f72010-01-09 02:24:10 +00001148 q_vector = adapter->q_vector[v_idx];
Greg Rose92915f72010-01-09 02:24:10 +00001149
Alexander Duyck6b43c442012-05-11 08:32:45 +00001150 ixgbevf_for_each_ring(ring, q_vector->rx)
1151 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +00001152
Alexander Duyck6b43c442012-05-11 08:32:45 +00001153 ixgbevf_for_each_ring(ring, q_vector->tx)
1154 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +00001155
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001156 if (q_vector->tx.ring && !q_vector->rx.ring) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001157 /* Tx only vector */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001158 if (adapter->tx_itr_setting == 1)
Alexander Duyck8a9ca112015-09-29 13:11:15 -07001159 q_vector->itr = IXGBE_12K_ITR;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001160 else
1161 q_vector->itr = adapter->tx_itr_setting;
1162 } else {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001163 /* Rx or Rx/Tx vector */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001164 if (adapter->rx_itr_setting == 1)
1165 q_vector->itr = IXGBE_20K_ITR;
1166 else
1167 q_vector->itr = adapter->rx_itr_setting;
1168 }
Greg Rose92915f72010-01-09 02:24:10 +00001169
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001170 /* add q_vector eims value to global eims_enable_mask */
Jacob Keller8d055cc2016-04-13 16:08:24 -07001171 adapter->eims_enable_mask |= BIT(v_idx);
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001172
1173 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001174 }
1175
1176 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001177 /* setup eims_other and add value to global eims_enable_mask */
Jacob Keller8d055cc2016-04-13 16:08:24 -07001178 adapter->eims_other = BIT(v_idx);
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001179 adapter->eims_enable_mask |= adapter->eims_other;
Greg Rose92915f72010-01-09 02:24:10 +00001180}
1181
1182enum latency_range {
1183 lowest_latency = 0,
1184 low_latency = 1,
1185 bulk_latency = 2,
1186 latency_invalid = 255
1187};
1188
1189/**
1190 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001191 * @q_vector: structure containing interrupt and ring information
1192 * @ring_container: structure containing ring performance data
Greg Rose92915f72010-01-09 02:24:10 +00001193 *
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001194 * Stores a new ITR value based on packets and byte
1195 * counts during the last interrupt. The advantage of per interrupt
1196 * computation is faster updates and more accurate ITR for the current
1197 * traffic pattern. Constants in this function were computed
1198 * based on theoretical maximum wire speed and thresholds were set based
1199 * on testing data as well as attempting to minimize response time
1200 * while increasing bulk throughput.
Greg Rose92915f72010-01-09 02:24:10 +00001201 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001202static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1203 struct ixgbevf_ring_container *ring_container)
Greg Rose92915f72010-01-09 02:24:10 +00001204{
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001205 int bytes = ring_container->total_bytes;
1206 int packets = ring_container->total_packets;
Greg Rose92915f72010-01-09 02:24:10 +00001207 u32 timepassed_us;
1208 u64 bytes_perint;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001209 u8 itr_setting = ring_container->itr;
Greg Rose92915f72010-01-09 02:24:10 +00001210
1211 if (packets == 0)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001212 return;
Greg Rose92915f72010-01-09 02:24:10 +00001213
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001214 /* simple throttle rate management
Greg Rose92915f72010-01-09 02:24:10 +00001215 * 0-20MB/s lowest (100000 ints/s)
1216 * 20-100MB/s low (20000 ints/s)
Alexander Duyck8a9ca112015-09-29 13:11:15 -07001217 * 100-1249MB/s bulk (12000 ints/s)
Greg Rose92915f72010-01-09 02:24:10 +00001218 */
1219 /* what was last interrupt timeslice? */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001220 timepassed_us = q_vector->itr >> 2;
Greg Rose92915f72010-01-09 02:24:10 +00001221 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1222
1223 switch (itr_setting) {
1224 case lowest_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001225 if (bytes_perint > 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001226 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001227 break;
1228 case low_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001229 if (bytes_perint > 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001230 itr_setting = bulk_latency;
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001231 else if (bytes_perint <= 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001232 itr_setting = lowest_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001233 break;
1234 case bulk_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001235 if (bytes_perint <= 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001236 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001237 break;
1238 }
1239
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001240 /* clear work counters since we have the values we need */
1241 ring_container->total_bytes = 0;
1242 ring_container->total_packets = 0;
1243
1244 /* write updated itr to ring container */
1245 ring_container->itr = itr_setting;
Greg Rose92915f72010-01-09 02:24:10 +00001246}
1247
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001248static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
Greg Rose92915f72010-01-09 02:24:10 +00001249{
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001250 u32 new_itr = q_vector->itr;
1251 u8 current_itr;
Greg Rose92915f72010-01-09 02:24:10 +00001252
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001253 ixgbevf_update_itr(q_vector, &q_vector->tx);
1254 ixgbevf_update_itr(q_vector, &q_vector->rx);
Greg Rose92915f72010-01-09 02:24:10 +00001255
Alexander Duyck6b43c442012-05-11 08:32:45 +00001256 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Greg Rose92915f72010-01-09 02:24:10 +00001257
1258 switch (current_itr) {
1259 /* counts and packets in update_itr are dependent on these numbers */
1260 case lowest_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001261 new_itr = IXGBE_100K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001262 break;
1263 case low_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001264 new_itr = IXGBE_20K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001265 break;
1266 case bulk_latency:
Alexander Duyck8a9ca112015-09-29 13:11:15 -07001267 new_itr = IXGBE_12K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001268 break;
Emil Tantilov9ad3d6f2015-11-04 16:02:21 -08001269 default:
1270 break;
Greg Rose92915f72010-01-09 02:24:10 +00001271 }
1272
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001273 if (new_itr != q_vector->itr) {
Greg Rose92915f72010-01-09 02:24:10 +00001274 /* do an exponential smoothing */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001275 new_itr = (10 * new_itr * q_vector->itr) /
1276 ((9 * new_itr) + q_vector->itr);
1277
1278 /* save the algorithm value here */
1279 q_vector->itr = new_itr;
1280
1281 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001282 }
Greg Rose92915f72010-01-09 02:24:10 +00001283}
1284
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001285static irqreturn_t ixgbevf_msix_other(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +00001286{
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001287 struct ixgbevf_adapter *adapter = data;
Greg Rose92915f72010-01-09 02:24:10 +00001288 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001289
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001290 hw->mac.get_link_status = 1;
Greg Rose375b27c2012-01-18 22:13:31 +00001291
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00001292 ixgbevf_service_event_schedule(adapter);
Greg Rose3a2c4032012-02-01 01:28:15 +00001293
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001294 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1295
Greg Rose92915f72010-01-09 02:24:10 +00001296 return IRQ_HANDLED;
1297}
1298
Greg Rose92915f72010-01-09 02:24:10 +00001299/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001300 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
Greg Rose92915f72010-01-09 02:24:10 +00001301 * @irq: unused
1302 * @data: pointer to our q_vector struct for this interrupt vector
1303 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001304static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +00001305{
1306 struct ixgbevf_q_vector *q_vector = data;
Greg Rose92915f72010-01-09 02:24:10 +00001307
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001308 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001309 if (q_vector->rx.ring || q_vector->tx.ring)
Alexander Duyckef2662b2015-09-29 15:19:43 -07001310 napi_schedule_irqoff(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001311
1312 return IRQ_HANDLED;
1313}
1314
1315static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1316 int r_idx)
1317{
1318 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1319
Don Skidmore87e70ab2014-01-16 02:30:08 -08001320 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1321 q_vector->rx.ring = a->rx_ring[r_idx];
Alexander Duyck6b43c442012-05-11 08:32:45 +00001322 q_vector->rx.count++;
Greg Rose92915f72010-01-09 02:24:10 +00001323}
1324
1325static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1326 int t_idx)
1327{
1328 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1329
Don Skidmore87e70ab2014-01-16 02:30:08 -08001330 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1331 q_vector->tx.ring = a->tx_ring[t_idx];
Alexander Duyck6b43c442012-05-11 08:32:45 +00001332 q_vector->tx.count++;
Greg Rose92915f72010-01-09 02:24:10 +00001333}
1334
1335/**
1336 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1337 * @adapter: board private structure to initialize
1338 *
1339 * This function maps descriptor rings to the queue-specific vectors
1340 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1341 * one vector per ring/queue, but on a constrained vector budget, we
1342 * group the rings as "efficiently" as possible. You would add new
1343 * mapping configurations in here.
1344 **/
1345static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1346{
1347 int q_vectors;
1348 int v_start = 0;
1349 int rxr_idx = 0, txr_idx = 0;
1350 int rxr_remaining = adapter->num_rx_queues;
1351 int txr_remaining = adapter->num_tx_queues;
1352 int i, j;
1353 int rqpv, tqpv;
Greg Rose92915f72010-01-09 02:24:10 +00001354
1355 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1356
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001357 /* The ideal configuration...
Greg Rose92915f72010-01-09 02:24:10 +00001358 * We have enough vectors to map one per queue.
1359 */
1360 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1361 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1362 map_vector_to_rxq(adapter, v_start, rxr_idx);
1363
1364 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1365 map_vector_to_txq(adapter, v_start, txr_idx);
Mark Rustad50985b52015-10-21 17:21:20 -07001366 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00001367 }
1368
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001369 /* If we don't have enough vectors for a 1-to-1
Greg Rose92915f72010-01-09 02:24:10 +00001370 * mapping, we'll have to group them so there are
1371 * multiple queues per vector.
1372 */
1373 /* Re-adjusting *qpv takes care of the remainder. */
1374 for (i = v_start; i < q_vectors; i++) {
1375 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1376 for (j = 0; j < rqpv; j++) {
1377 map_vector_to_rxq(adapter, i, rxr_idx);
1378 rxr_idx++;
1379 rxr_remaining--;
1380 }
1381 }
1382 for (i = v_start; i < q_vectors; i++) {
1383 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1384 for (j = 0; j < tqpv; j++) {
1385 map_vector_to_txq(adapter, i, txr_idx);
1386 txr_idx++;
1387 txr_remaining--;
1388 }
1389 }
1390
Mark Rustad50985b52015-10-21 17:21:20 -07001391 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00001392}
1393
1394/**
1395 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1396 * @adapter: board private structure
1397 *
1398 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1399 * interrupts from the kernel.
1400 **/
1401static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1402{
1403 struct net_device *netdev = adapter->netdev;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001404 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1405 int vector, err;
Greg Rose92915f72010-01-09 02:24:10 +00001406 int ri = 0, ti = 0;
1407
Greg Rose92915f72010-01-09 02:24:10 +00001408 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001409 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1410 struct msix_entry *entry = &adapter->msix_entries[vector];
Greg Rose92915f72010-01-09 02:24:10 +00001411
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001412 if (q_vector->tx.ring && q_vector->rx.ring) {
1413 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1414 "%s-%s-%d", netdev->name, "TxRx", ri++);
1415 ti++;
1416 } else if (q_vector->rx.ring) {
1417 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1418 "%s-%s-%d", netdev->name, "rx", ri++);
1419 } else if (q_vector->tx.ring) {
1420 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1421 "%s-%s-%d", netdev->name, "tx", ti++);
Greg Rose92915f72010-01-09 02:24:10 +00001422 } else {
1423 /* skip this unused q_vector */
1424 continue;
1425 }
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001426 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1427 q_vector->name, q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001428 if (err) {
1429 hw_dbg(&adapter->hw,
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001430 "request_irq failed for MSIX interrupt Error: %d\n",
1431 err);
Greg Rose92915f72010-01-09 02:24:10 +00001432 goto free_queue_irqs;
1433 }
1434 }
1435
Greg Rose92915f72010-01-09 02:24:10 +00001436 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001437 &ixgbevf_msix_other, 0, netdev->name, adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001438 if (err) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001439 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1440 err);
Greg Rose92915f72010-01-09 02:24:10 +00001441 goto free_queue_irqs;
1442 }
1443
1444 return 0;
1445
1446free_queue_irqs:
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001447 while (vector) {
1448 vector--;
1449 free_irq(adapter->msix_entries[vector].vector,
1450 adapter->q_vector[vector]);
1451 }
xunleera1f6c6b2013-03-05 07:44:20 +00001452 /* This failure is non-recoverable - it indicates the system is
1453 * out of MSIX vector resources and the VF driver cannot run
1454 * without them. Set the number of msix vectors to zero
1455 * indicating that not enough can be allocated. The error
1456 * will be returned to the user indicating device open failed.
1457 * Any further attempts to force the driver to open will also
1458 * fail. The only way to recover is to unload the driver and
1459 * reload it again. If the system has recovered some MSIX
1460 * vectors then it may succeed.
1461 */
1462 adapter->num_msix_vectors = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001463 return err;
1464}
1465
1466static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1467{
1468 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1469
1470 for (i = 0; i < q_vectors; i++) {
1471 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001472
Alexander Duyck6b43c442012-05-11 08:32:45 +00001473 q_vector->rx.ring = NULL;
1474 q_vector->tx.ring = NULL;
1475 q_vector->rx.count = 0;
1476 q_vector->tx.count = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001477 }
1478}
1479
1480/**
1481 * ixgbevf_request_irq - initialize interrupts
1482 * @adapter: board private structure
1483 *
1484 * Attempts to configure interrupts using the best available
1485 * capabilities of the hardware and kernel.
1486 **/
1487static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1488{
Mark Rustad50985b52015-10-21 17:21:20 -07001489 int err = ixgbevf_request_msix_irqs(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001490
1491 if (err)
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001492 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +00001493
1494 return err;
1495}
1496
1497static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1498{
Greg Rose92915f72010-01-09 02:24:10 +00001499 int i, q_vectors;
1500
Mark Rustadeeffcee2016-10-28 10:46:39 -07001501 if (!adapter->msix_entries)
1502 return;
1503
Greg Rose92915f72010-01-09 02:24:10 +00001504 q_vectors = adapter->num_msix_vectors;
Greg Rose92915f72010-01-09 02:24:10 +00001505 i = q_vectors - 1;
1506
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001507 free_irq(adapter->msix_entries[i].vector, adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001508 i--;
1509
1510 for (; i >= 0; i--) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001511 /* free only the irqs that were actually requested */
1512 if (!adapter->q_vector[i]->rx.ring &&
1513 !adapter->q_vector[i]->tx.ring)
1514 continue;
1515
Greg Rose92915f72010-01-09 02:24:10 +00001516 free_irq(adapter->msix_entries[i].vector,
1517 adapter->q_vector[i]);
1518 }
1519
1520 ixgbevf_reset_q_vectors(adapter);
1521}
1522
1523/**
1524 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1525 * @adapter: board private structure
1526 **/
1527static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1528{
Greg Rose92915f72010-01-09 02:24:10 +00001529 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001530 int i;
Greg Rose92915f72010-01-09 02:24:10 +00001531
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001532 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001533 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001534 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001535
1536 IXGBE_WRITE_FLUSH(hw);
1537
1538 for (i = 0; i < adapter->num_msix_vectors; i++)
1539 synchronize_irq(adapter->msix_entries[i].vector);
1540}
1541
1542/**
1543 * ixgbevf_irq_enable - Enable default interrupt generation settings
1544 * @adapter: board private structure
1545 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001546static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001547{
1548 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001549
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001550 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1551 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1552 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
Greg Rose92915f72010-01-09 02:24:10 +00001553}
1554
1555/**
Don Skidmorede02dec2014-01-16 02:30:09 -08001556 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1557 * @adapter: board private structure
1558 * @ring: structure containing ring specific data
1559 *
1560 * Configure the Tx descriptor ring after a reset.
1561 **/
1562static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1563 struct ixgbevf_ring *ring)
1564{
1565 struct ixgbe_hw *hw = &adapter->hw;
1566 u64 tdba = ring->dma;
1567 int wait_loop = 10;
1568 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1569 u8 reg_idx = ring->reg_idx;
1570
1571 /* disable queue to avoid issues while updating state */
1572 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1573 IXGBE_WRITE_FLUSH(hw);
1574
1575 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1576 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1577 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1578 ring->count * sizeof(union ixgbe_adv_tx_desc));
1579
1580 /* disable head writeback */
1581 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1582 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1583
1584 /* enable relaxed ordering */
1585 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1586 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1587 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1588
1589 /* reset head and tail pointers */
1590 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1591 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00001592 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
Don Skidmorede02dec2014-01-16 02:30:09 -08001593
1594 /* reset ntu and ntc to place SW in sync with hardwdare */
1595 ring->next_to_clean = 0;
1596 ring->next_to_use = 0;
1597
1598 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1599 * to or less than the number of on chip descriptors, which is
1600 * currently 40.
1601 */
1602 txdctl |= (8 << 16); /* WTHRESH = 8 */
1603
1604 /* Setting PTHRESH to 32 both improves performance */
Jacob Keller8d055cc2016-04-13 16:08:24 -07001605 txdctl |= (1u << 8) | /* HTHRESH = 1 */
1606 32; /* PTHRESH = 32 */
Don Skidmorede02dec2014-01-16 02:30:09 -08001607
Emil Tantilove08400b2015-01-28 03:21:24 +00001608 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1609
Don Skidmorede02dec2014-01-16 02:30:09 -08001610 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1611
1612 /* poll to verify queue is enabled */
1613 do {
1614 usleep_range(1000, 2000);
1615 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1616 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1617 if (!wait_loop)
Emil Tantilovee950532016-07-29 10:30:16 -07001618 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
Don Skidmorede02dec2014-01-16 02:30:09 -08001619}
1620
1621/**
Greg Rose92915f72010-01-09 02:24:10 +00001622 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1623 * @adapter: board private structure
1624 *
1625 * Configure the Tx unit of the MAC after a reset.
1626 **/
1627static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1628{
Don Skidmorede02dec2014-01-16 02:30:09 -08001629 u32 i;
Greg Rose92915f72010-01-09 02:24:10 +00001630
1631 /* Setup the HW Tx Head and Tail descriptor pointers */
Don Skidmorede02dec2014-01-16 02:30:09 -08001632 for (i = 0; i < adapter->num_tx_queues; i++)
1633 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00001634}
1635
1636#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1637
1638static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1639{
Greg Rose92915f72010-01-09 02:24:10 +00001640 struct ixgbe_hw *hw = &adapter->hw;
1641 u32 srrctl;
1642
Greg Rose92915f72010-01-09 02:24:10 +00001643 srrctl = IXGBE_SRRCTL_DROP_EN;
1644
Emil Tantilovbad17232014-11-21 02:57:15 +00001645 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1646 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck77d5dfc2012-05-11 08:32:19 +00001647 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Greg Rose92915f72010-01-09 02:24:10 +00001648
Greg Rose92915f72010-01-09 02:24:10 +00001649 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1650}
1651
Don Skidmore1bb9c632013-09-21 01:57:33 +00001652static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1653{
1654 struct ixgbe_hw *hw = &adapter->hw;
1655
1656 /* PSRTYPE must be initialized in 82599 */
1657 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1658 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1659 IXGBE_PSRTYPE_L2HDR;
1660
1661 if (adapter->num_rx_queues > 1)
Jacob Keller8d055cc2016-04-13 16:08:24 -07001662 psrtype |= BIT(29);
Don Skidmore1bb9c632013-09-21 01:57:33 +00001663
1664 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1665}
1666
Don Skidmorede02dec2014-01-16 02:30:09 -08001667#define IXGBEVF_MAX_RX_DESC_POLL 10
1668static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1669 struct ixgbevf_ring *ring)
1670{
1671 struct ixgbe_hw *hw = &adapter->hw;
1672 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1673 u32 rxdctl;
1674 u8 reg_idx = ring->reg_idx;
1675
Mark Rustad26597802014-03-04 03:02:45 +00001676 if (IXGBE_REMOVED(hw->hw_addr))
1677 return;
Don Skidmorede02dec2014-01-16 02:30:09 -08001678 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1679 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1680
1681 /* write value back with RXDCTL.ENABLE bit cleared */
1682 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1683
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001684 /* the hardware may take up to 100us to really disable the Rx queue */
Don Skidmorede02dec2014-01-16 02:30:09 -08001685 do {
1686 udelay(10);
1687 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1688 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1689
1690 if (!wait_loop)
1691 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1692 reg_idx);
1693}
1694
1695static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1696 struct ixgbevf_ring *ring)
1697{
1698 struct ixgbe_hw *hw = &adapter->hw;
1699 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1700 u32 rxdctl;
1701 u8 reg_idx = ring->reg_idx;
1702
Mark Rustad26597802014-03-04 03:02:45 +00001703 if (IXGBE_REMOVED(hw->hw_addr))
1704 return;
Don Skidmorede02dec2014-01-16 02:30:09 -08001705 do {
1706 usleep_range(1000, 2000);
1707 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1708 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1709
1710 if (!wait_loop)
1711 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1712 reg_idx);
1713}
1714
Emil Tantilov9295edb2014-12-06 09:19:09 +00001715static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1716{
1717 struct ixgbe_hw *hw = &adapter->hw;
1718 u32 vfmrqc = 0, vfreta = 0;
Emil Tantilov9295edb2014-12-06 09:19:09 +00001719 u16 rss_i = adapter->num_rx_queues;
Emil Tantilov9cba4342015-04-30 11:50:55 -07001720 u8 i, j;
Emil Tantilov9295edb2014-12-06 09:19:09 +00001721
1722 /* Fill out hash function seeds */
Emil Tantilov9cba4342015-04-30 11:50:55 -07001723 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
1724 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1725 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
Emil Tantilov9295edb2014-12-06 09:19:09 +00001726
Emil Tantilov9cba4342015-04-30 11:50:55 -07001727 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
Emil Tantilov9295edb2014-12-06 09:19:09 +00001728 if (j == rss_i)
1729 j = 0;
Emil Tantilov9cba4342015-04-30 11:50:55 -07001730
1731 adapter->rss_indir_tbl[i] = j;
1732
1733 vfreta |= j << (i & 0x3) * 8;
1734 if ((i & 3) == 3) {
Emil Tantilov9295edb2014-12-06 09:19:09 +00001735 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
Emil Tantilov9cba4342015-04-30 11:50:55 -07001736 vfreta = 0;
1737 }
Emil Tantilov9295edb2014-12-06 09:19:09 +00001738 }
1739
1740 /* Perform hash on these packet types */
1741 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1742 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1743 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1744 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1745
1746 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1747
1748 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1749}
1750
Don Skidmorede02dec2014-01-16 02:30:09 -08001751static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1752 struct ixgbevf_ring *ring)
1753{
1754 struct ixgbe_hw *hw = &adapter->hw;
1755 u64 rdba = ring->dma;
1756 u32 rxdctl;
1757 u8 reg_idx = ring->reg_idx;
1758
1759 /* disable queue to avoid issues while updating state */
1760 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1761 ixgbevf_disable_rx_queue(adapter, ring);
1762
1763 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1764 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1765 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1766 ring->count * sizeof(union ixgbe_adv_rx_desc));
1767
Babu Moger33b0eb12016-04-21 15:56:49 -07001768#ifndef CONFIG_SPARC
Don Skidmorede02dec2014-01-16 02:30:09 -08001769 /* enable relaxed ordering */
1770 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1771 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
Babu Moger33b0eb12016-04-21 15:56:49 -07001772#else
1773 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1774 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1775 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1776#endif
Don Skidmorede02dec2014-01-16 02:30:09 -08001777
1778 /* reset head and tail pointers */
1779 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1780 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00001781 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
Don Skidmorede02dec2014-01-16 02:30:09 -08001782
1783 /* reset ntu and ntc to place SW in sync with hardwdare */
1784 ring->next_to_clean = 0;
1785 ring->next_to_use = 0;
Emil Tantilovbad17232014-11-21 02:57:15 +00001786 ring->next_to_alloc = 0;
Don Skidmorede02dec2014-01-16 02:30:09 -08001787
1788 ixgbevf_configure_srrctl(adapter, reg_idx);
1789
Emil Tantilovbad17232014-11-21 02:57:15 +00001790 /* allow any size packet since we can handle overflow */
1791 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1792
Don Skidmorede02dec2014-01-16 02:30:09 -08001793 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1794 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1795
1796 ixgbevf_rx_desc_queue_enable(adapter, ring);
Emil Tantilov095e2612014-01-17 18:30:00 -08001797 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
Don Skidmorede02dec2014-01-16 02:30:09 -08001798}
1799
Greg Rose92915f72010-01-09 02:24:10 +00001800/**
1801 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1802 * @adapter: board private structure
1803 *
1804 * Configure the Rx unit of the MAC after a reset.
1805 **/
1806static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1807{
Emil Tantilovbad17232014-11-21 02:57:15 +00001808 struct ixgbe_hw *hw = &adapter->hw;
1809 struct net_device *netdev = adapter->netdev;
Tony Nguyen6a11e522016-07-13 10:33:16 -07001810 int i, ret;
Greg Rose92915f72010-01-09 02:24:10 +00001811
Don Skidmore1bb9c632013-09-21 01:57:33 +00001812 ixgbevf_setup_psrtype(adapter);
Emil Tantilov9295edb2014-12-06 09:19:09 +00001813 if (hw->mac.type >= ixgbe_mac_X550_vf)
1814 ixgbevf_setup_vfmrqc(adapter);
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001815
Emil Tantilov14b22cd2016-08-29 16:39:28 -07001816 spin_lock_bh(&adapter->mbx_lock);
Emil Tantilovbad17232014-11-21 02:57:15 +00001817 /* notify the PF of our intent to use this size of frame */
Tony Nguyen6a11e522016-07-13 10:33:16 -07001818 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
Emil Tantilov14b22cd2016-08-29 16:39:28 -07001819 spin_unlock_bh(&adapter->mbx_lock);
Tony Nguyen6a11e522016-07-13 10:33:16 -07001820 if (ret)
1821 dev_err(&adapter->pdev->dev,
1822 "Failed to set MTU at %d\n", netdev->mtu);
Greg Rose92915f72010-01-09 02:24:10 +00001823
Greg Rose92915f72010-01-09 02:24:10 +00001824 /* Setup the HW Rx Head and Tail Descriptor Pointers and
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001825 * the Base and Length of the Rx Descriptor Ring
1826 */
Don Skidmorede02dec2014-01-16 02:30:09 -08001827 for (i = 0; i < adapter->num_rx_queues; i++)
1828 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00001829}
1830
Patrick McHardy80d5c362013-04-19 02:04:28 +00001831static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1832 __be16 proto, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001833{
1834 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1835 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001836 int err;
1837
John Fastabend55fdd45b2012-10-01 14:52:20 +00001838 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001839
Greg Rose92915f72010-01-09 02:24:10 +00001840 /* add VID to filter table */
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001841 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001842
John Fastabend55fdd45b2012-10-01 14:52:20 +00001843 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001844
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001845 /* translate error return types so error makes sense */
1846 if (err == IXGBE_ERR_MBX)
1847 return -EIO;
1848
1849 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1850 return -EACCES;
1851
Jiri Pirkodadcd652011-07-21 03:25:09 +00001852 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001853
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001854 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001855}
1856
Patrick McHardy80d5c362013-04-19 02:04:28 +00001857static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1858 __be16 proto, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001859{
1860 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1861 struct ixgbe_hw *hw = &adapter->hw;
Mark Rustad50985b52015-10-21 17:21:20 -07001862 int err;
Greg Rose92915f72010-01-09 02:24:10 +00001863
John Fastabend55fdd45b2012-10-01 14:52:20 +00001864 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001865
Greg Rose92915f72010-01-09 02:24:10 +00001866 /* remove VID from filter table */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001867 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001868
John Fastabend55fdd45b2012-10-01 14:52:20 +00001869 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001870
Jiri Pirkodadcd652011-07-21 03:25:09 +00001871 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001872
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001873 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001874}
1875
1876static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1877{
Jiri Pirkodadcd652011-07-21 03:25:09 +00001878 u16 vid;
Greg Rose92915f72010-01-09 02:24:10 +00001879
Jiri Pirkodadcd652011-07-21 03:25:09 +00001880 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
Patrick McHardy80d5c362013-04-19 02:04:28 +00001881 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1882 htons(ETH_P_8021Q), vid);
Greg Rose92915f72010-01-09 02:24:10 +00001883}
1884
Greg Rose46ec20f2011-05-13 01:33:42 +00001885static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1886{
1887 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1888 struct ixgbe_hw *hw = &adapter->hw;
1889 int count = 0;
1890
1891 if ((netdev_uc_count(netdev)) > 10) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00001892 pr_err("Too many unicast filters - No Space\n");
Greg Rose46ec20f2011-05-13 01:33:42 +00001893 return -ENOSPC;
1894 }
1895
1896 if (!netdev_uc_empty(netdev)) {
1897 struct netdev_hw_addr *ha;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001898
Greg Rose46ec20f2011-05-13 01:33:42 +00001899 netdev_for_each_uc_addr(ha, netdev) {
1900 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1901 udelay(200);
1902 }
1903 } else {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00001904 /* If the list is empty then send message to PF driver to
1905 * clear all MAC VLANs on this VF.
Greg Rose46ec20f2011-05-13 01:33:42 +00001906 */
1907 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1908 }
1909
1910 return count;
1911}
1912
Greg Rose92915f72010-01-09 02:24:10 +00001913/**
Greg Rosedee847f2012-11-02 05:50:57 +00001914 * ixgbevf_set_rx_mode - Multicast and unicast set
Greg Rose92915f72010-01-09 02:24:10 +00001915 * @netdev: network interface device structure
1916 *
1917 * The set_rx_method entry point is called whenever the multicast address
Greg Rosedee847f2012-11-02 05:50:57 +00001918 * list, unicast address list or the network interface flags are updated.
1919 * This routine is responsible for configuring the hardware for proper
1920 * multicast mode and configuring requested unicast filters.
Greg Rose92915f72010-01-09 02:24:10 +00001921 **/
1922static void ixgbevf_set_rx_mode(struct net_device *netdev)
1923{
1924 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1925 struct ixgbe_hw *hw = &adapter->hw;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001926 unsigned int flags = netdev->flags;
1927 int xcast_mode;
1928
1929 xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
1930 (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
1931 IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
Greg Rose92915f72010-01-09 02:24:10 +00001932
John Fastabend55fdd45b2012-10-01 14:52:20 +00001933 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001934
Tony Nguyen8b44a8a2016-04-27 14:14:14 -07001935 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001936
Greg Rose92915f72010-01-09 02:24:10 +00001937 /* reprogram multicast list */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001938 hw->mac.ops.update_mc_addr_list(hw, netdev);
Greg Rose46ec20f2011-05-13 01:33:42 +00001939
1940 ixgbevf_write_uc_addr_list(netdev);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001941
John Fastabend55fdd45b2012-10-01 14:52:20 +00001942 spin_unlock_bh(&adapter->mbx_lock);
Greg Rose92915f72010-01-09 02:24:10 +00001943}
1944
1945static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1946{
1947 int q_idx;
1948 struct ixgbevf_q_vector *q_vector;
1949 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1950
1951 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Greg Rose92915f72010-01-09 02:24:10 +00001952 q_vector = adapter->q_vector[q_idx];
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001953#ifdef CONFIG_NET_RX_BUSY_POLL
1954 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1955#endif
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001956 napi_enable(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001957 }
1958}
1959
1960static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1961{
1962 int q_idx;
1963 struct ixgbevf_q_vector *q_vector;
1964 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1965
1966 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1967 q_vector = adapter->q_vector[q_idx];
Greg Rose92915f72010-01-09 02:24:10 +00001968 napi_disable(&q_vector->napi);
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001969#ifdef CONFIG_NET_RX_BUSY_POLL
1970 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1971 pr_info("QV %d locked\n", q_idx);
1972 usleep_range(1000, 20000);
1973 }
1974#endif /* CONFIG_NET_RX_BUSY_POLL */
Greg Rose92915f72010-01-09 02:24:10 +00001975 }
1976}
1977
Don Skidmore220fe052013-09-21 01:40:49 +00001978static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1979{
1980 struct ixgbe_hw *hw = &adapter->hw;
1981 unsigned int def_q = 0;
1982 unsigned int num_tcs = 0;
Emil Tantilov2dc571a2014-12-06 09:19:02 +00001983 unsigned int num_rx_queues = adapter->num_rx_queues;
1984 unsigned int num_tx_queues = adapter->num_tx_queues;
Don Skidmore220fe052013-09-21 01:40:49 +00001985 int err;
1986
1987 spin_lock_bh(&adapter->mbx_lock);
1988
1989 /* fetch queue configuration from the PF */
1990 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1991
1992 spin_unlock_bh(&adapter->mbx_lock);
1993
1994 if (err)
1995 return err;
1996
1997 if (num_tcs > 1) {
Emil Tantilov2dc571a2014-12-06 09:19:02 +00001998 /* we need only one Tx queue */
1999 num_tx_queues = 1;
2000
Don Skidmore220fe052013-09-21 01:40:49 +00002001 /* update default Tx ring register index */
Don Skidmore87e70ab2014-01-16 02:30:08 -08002002 adapter->tx_ring[0]->reg_idx = def_q;
Don Skidmore220fe052013-09-21 01:40:49 +00002003
2004 /* we need as many queues as traffic classes */
2005 num_rx_queues = num_tcs;
2006 }
2007
2008 /* if we have a bad config abort request queue reset */
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002009 if ((adapter->num_rx_queues != num_rx_queues) ||
2010 (adapter->num_tx_queues != num_tx_queues)) {
Don Skidmore220fe052013-09-21 01:40:49 +00002011 /* force mailbox timeout to prevent further messages */
2012 hw->mbx.timeout = 0;
2013
2014 /* wait for watchdog to come around and bail us out */
Emil Tantilovd5dd7c32015-12-17 17:32:55 -08002015 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
Don Skidmore220fe052013-09-21 01:40:49 +00002016 }
2017
2018 return 0;
2019}
2020
Greg Rose92915f72010-01-09 02:24:10 +00002021static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2022{
Don Skidmore220fe052013-09-21 01:40:49 +00002023 ixgbevf_configure_dcb(adapter);
2024
Don Skidmorede02dec2014-01-16 02:30:09 -08002025 ixgbevf_set_rx_mode(adapter->netdev);
Greg Rose92915f72010-01-09 02:24:10 +00002026
2027 ixgbevf_restore_vlan(adapter);
2028
2029 ixgbevf_configure_tx(adapter);
2030 ixgbevf_configure_rx(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002031}
2032
Greg Rose33bd9f62010-03-19 02:59:52 +00002033static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2034{
2035 /* Only save pre-reset stats if there are some */
2036 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2037 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2038 adapter->stats.base_vfgprc;
2039 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2040 adapter->stats.base_vfgptc;
2041 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2042 adapter->stats.base_vfgorc;
2043 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2044 adapter->stats.base_vfgotc;
2045 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2046 adapter->stats.base_vfmprc;
2047 }
2048}
2049
2050static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2051{
2052 struct ixgbe_hw *hw = &adapter->hw;
2053
2054 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2055 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2056 adapter->stats.last_vfgorc |=
2057 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2058 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2059 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2060 adapter->stats.last_vfgotc |=
2061 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2062 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2063
2064 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2065 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2066 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2067 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2068 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2069}
2070
Alexander Duyck31186782012-07-20 08:09:58 +00002071static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2072{
2073 struct ixgbe_hw *hw = &adapter->hw;
Vlad Zolotarov94cf66f2015-03-30 21:35:26 +03002074 int api[] = { ixgbe_mbox_api_12,
2075 ixgbe_mbox_api_11,
Alexander Duyck56e94092012-07-20 08:10:03 +00002076 ixgbe_mbox_api_10,
Alexander Duyck31186782012-07-20 08:09:58 +00002077 ixgbe_mbox_api_unknown };
Mark Rustad50985b52015-10-21 17:21:20 -07002078 int err, idx = 0;
Alexander Duyck31186782012-07-20 08:09:58 +00002079
John Fastabend55fdd45b2012-10-01 14:52:20 +00002080 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00002081
2082 while (api[idx] != ixgbe_mbox_api_unknown) {
Alexander Duyck2f8214f2016-04-22 13:18:26 -04002083 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
Alexander Duyck31186782012-07-20 08:09:58 +00002084 if (!err)
2085 break;
2086 idx++;
2087 }
2088
John Fastabend55fdd45b2012-10-01 14:52:20 +00002089 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00002090}
2091
Greg Rose795180d2012-04-17 04:29:34 +00002092static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002093{
2094 struct net_device *netdev = adapter->netdev;
2095 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00002096
2097 ixgbevf_configure_msix(adapter);
2098
John Fastabend55fdd45b2012-10-01 14:52:20 +00002099 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002100
Greg Rose92fe0bf2012-11-02 05:50:47 +00002101 if (is_valid_ether_addr(hw->mac.addr))
2102 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2103 else
2104 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
Greg Rose92915f72010-01-09 02:24:10 +00002105
John Fastabend55fdd45b2012-10-01 14:52:20 +00002106 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002107
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002108 smp_mb__before_atomic();
Greg Rose92915f72010-01-09 02:24:10 +00002109 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2110 ixgbevf_napi_enable_all(adapter);
2111
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002112 /* clear any pending interrupts, may auto mask */
2113 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2114 ixgbevf_irq_enable(adapter);
2115
Greg Rose92915f72010-01-09 02:24:10 +00002116 /* enable transmits */
2117 netif_tx_start_all_queues(netdev);
2118
Greg Rose33bd9f62010-03-19 02:59:52 +00002119 ixgbevf_save_reset_stats(adapter);
2120 ixgbevf_init_last_counter_stats(adapter);
2121
Alexander Duyck4b2cd272012-08-02 01:16:59 +00002122 hw->mac.get_link_status = 1;
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002123 mod_timer(&adapter->service_timer, jiffies);
Greg Rose92915f72010-01-09 02:24:10 +00002124}
2125
Greg Rose795180d2012-04-17 04:29:34 +00002126void ixgbevf_up(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002127{
Greg Rose92915f72010-01-09 02:24:10 +00002128 ixgbevf_configure(adapter);
2129
Greg Rose795180d2012-04-17 04:29:34 +00002130 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002131}
2132
2133/**
2134 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
Greg Rose92915f72010-01-09 02:24:10 +00002135 * @rx_ring: ring to free buffers from
2136 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002137static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002138{
Emil Tantilovbad17232014-11-21 02:57:15 +00002139 struct device *dev = rx_ring->dev;
Greg Rose92915f72010-01-09 02:24:10 +00002140 unsigned long size;
2141 unsigned int i;
2142
Emil Tantilovbad17232014-11-21 02:57:15 +00002143 /* Free Rx ring sk_buff */
2144 if (rx_ring->skb) {
2145 dev_kfree_skb(rx_ring->skb);
2146 rx_ring->skb = NULL;
2147 }
2148
2149 /* ring already cleared, nothing to do */
Greg Rosec0456c22010-01-22 22:47:18 +00002150 if (!rx_ring->rx_buffer_info)
2151 return;
Greg Rose92915f72010-01-09 02:24:10 +00002152
Emil Tantilovbad17232014-11-21 02:57:15 +00002153 /* Free all the Rx ring pages */
Greg Rose92915f72010-01-09 02:24:10 +00002154 for (i = 0; i < rx_ring->count; i++) {
Emil Tantilovbad17232014-11-21 02:57:15 +00002155 struct ixgbevf_rx_buffer *rx_buffer;
Greg Rose92915f72010-01-09 02:24:10 +00002156
Emil Tantilovbad17232014-11-21 02:57:15 +00002157 rx_buffer = &rx_ring->rx_buffer_info[i];
2158 if (rx_buffer->dma)
2159 dma_unmap_page(dev, rx_buffer->dma,
2160 PAGE_SIZE, DMA_FROM_DEVICE);
2161 rx_buffer->dma = 0;
2162 if (rx_buffer->page)
2163 __free_page(rx_buffer->page);
2164 rx_buffer->page = NULL;
Greg Rose92915f72010-01-09 02:24:10 +00002165 }
2166
2167 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2168 memset(rx_ring->rx_buffer_info, 0, size);
2169
2170 /* Zero out the descriptor ring */
2171 memset(rx_ring->desc, 0, rx_ring->size);
Greg Rose92915f72010-01-09 02:24:10 +00002172}
2173
2174/**
2175 * ixgbevf_clean_tx_ring - Free Tx Buffers
Greg Rose92915f72010-01-09 02:24:10 +00002176 * @tx_ring: ring to be cleaned
2177 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002178static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002179{
2180 struct ixgbevf_tx_buffer *tx_buffer_info;
2181 unsigned long size;
2182 unsigned int i;
2183
Greg Rosec0456c22010-01-22 22:47:18 +00002184 if (!tx_ring->tx_buffer_info)
2185 return;
2186
Greg Rose92915f72010-01-09 02:24:10 +00002187 /* Free all the Tx ring sk_buffs */
Greg Rose92915f72010-01-09 02:24:10 +00002188 for (i = 0; i < tx_ring->count; i++) {
2189 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck70a10e22012-05-11 08:33:21 +00002190 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Greg Rose92915f72010-01-09 02:24:10 +00002191 }
2192
2193 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2194 memset(tx_ring->tx_buffer_info, 0, size);
2195
2196 memset(tx_ring->desc, 0, tx_ring->size);
Greg Rose92915f72010-01-09 02:24:10 +00002197}
2198
2199/**
2200 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2201 * @adapter: board private structure
2202 **/
2203static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2204{
2205 int i;
2206
2207 for (i = 0; i < adapter->num_rx_queues; i++)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002208 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002209}
2210
2211/**
2212 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2213 * @adapter: board private structure
2214 **/
2215static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2216{
2217 int i;
2218
2219 for (i = 0; i < adapter->num_tx_queues; i++)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002220 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002221}
2222
2223void ixgbevf_down(struct ixgbevf_adapter *adapter)
2224{
2225 struct net_device *netdev = adapter->netdev;
2226 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmorede02dec2014-01-16 02:30:09 -08002227 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002228
2229 /* signal that we are down to the interrupt handler */
Mark Rustad5b346dc2014-03-04 03:02:18 +00002230 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2231 return; /* do nothing if already down */
Don Skidmore858c3dd2013-10-01 04:33:50 -07002232
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002233 /* disable all enabled Rx queues */
Don Skidmore858c3dd2013-10-01 04:33:50 -07002234 for (i = 0; i < adapter->num_rx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08002235 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002236
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002237 usleep_range(10000, 20000);
Greg Rose92915f72010-01-09 02:24:10 +00002238
2239 netif_tx_stop_all_queues(netdev);
2240
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002241 /* call carrier off first to avoid false dev_watchdog timeouts */
2242 netif_carrier_off(netdev);
2243 netif_tx_disable(netdev);
2244
Greg Rose92915f72010-01-09 02:24:10 +00002245 ixgbevf_irq_disable(adapter);
2246
2247 ixgbevf_napi_disable_all(adapter);
2248
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002249 del_timer_sync(&adapter->service_timer);
Greg Rose92915f72010-01-09 02:24:10 +00002250
2251 /* disable transmits in the hardware now that interrupts are off */
2252 for (i = 0; i < adapter->num_tx_queues; i++) {
Don Skidmorede02dec2014-01-16 02:30:09 -08002253 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2254
2255 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2256 IXGBE_TXDCTL_SWFLSH);
Greg Rose92915f72010-01-09 02:24:10 +00002257 }
2258
Greg Rose92915f72010-01-09 02:24:10 +00002259 if (!pci_channel_offline(adapter->pdev))
2260 ixgbevf_reset(adapter);
2261
2262 ixgbevf_clean_all_tx_rings(adapter);
2263 ixgbevf_clean_all_rx_rings(adapter);
2264}
2265
2266void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2267{
2268 WARN_ON(in_interrupt());
Greg Rosec0456c22010-01-22 22:47:18 +00002269
Greg Rose92915f72010-01-09 02:24:10 +00002270 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2271 msleep(1);
2272
Alexander Duyck4b2cd272012-08-02 01:16:59 +00002273 ixgbevf_down(adapter);
2274 ixgbevf_up(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002275
2276 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2277}
2278
2279void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2280{
2281 struct ixgbe_hw *hw = &adapter->hw;
2282 struct net_device *netdev = adapter->netdev;
2283
Don Skidmore798e3812013-10-01 04:33:51 -07002284 if (hw->mac.ops.reset_hw(hw)) {
Greg Rose92915f72010-01-09 02:24:10 +00002285 hw_dbg(hw, "PF still resetting\n");
Don Skidmore798e3812013-10-01 04:33:51 -07002286 } else {
Greg Rose92915f72010-01-09 02:24:10 +00002287 hw->mac.ops.init_hw(hw);
Don Skidmore798e3812013-10-01 04:33:51 -07002288 ixgbevf_negotiate_api(adapter);
2289 }
Greg Rose92915f72010-01-09 02:24:10 +00002290
2291 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
Emil Tantilov91a76ba2015-10-12 10:55:51 -07002292 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2293 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
Greg Rose92915f72010-01-09 02:24:10 +00002294 }
Emil Tantilove66c92a2015-01-28 03:21:29 +00002295
2296 adapter->last_reset = jiffies;
Greg Rose92915f72010-01-09 02:24:10 +00002297}
2298
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002299static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2300 int vectors)
Greg Rose92915f72010-01-09 02:24:10 +00002301{
Emil Tantilova5f93372012-11-13 04:03:17 +00002302 int vector_threshold;
Greg Rose92915f72010-01-09 02:24:10 +00002303
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002304 /* We'll want at least 2 (vector_threshold):
2305 * 1) TxQ[0] + RxQ[0] handler
2306 * 2) Other (Link Status Change, etc.)
Greg Rose92915f72010-01-09 02:24:10 +00002307 */
2308 vector_threshold = MIN_MSIX_COUNT;
2309
2310 /* The more we get, the more we will assign to Tx/Rx Cleanup
2311 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2312 * Right now, we simply care about how many we'll get; we'll
2313 * set them up later while requesting irq's.
2314 */
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002315 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2316 vector_threshold, vectors);
Greg Rose92915f72010-01-09 02:24:10 +00002317
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002318 if (vectors < 0) {
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002319 dev_err(&adapter->pdev->dev,
2320 "Unable to allocate MSI-X interrupts\n");
Greg Rose92915f72010-01-09 02:24:10 +00002321 kfree(adapter->msix_entries);
2322 adapter->msix_entries = NULL;
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002323 return vectors;
Greg Rose92915f72010-01-09 02:24:10 +00002324 }
Greg Rosedee847f2012-11-02 05:50:57 +00002325
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002326 /* Adjust for only the vectors we'll use, which is minimum
2327 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2328 * vectors we were allocated.
2329 */
2330 adapter->num_msix_vectors = vectors;
2331
2332 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002333}
2334
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002335/**
2336 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
Greg Rose92915f72010-01-09 02:24:10 +00002337 * @adapter: board private structure to initialize
2338 *
2339 * This is the top level queue allocation routine. The order here is very
2340 * important, starting with the "most" number of features turned on at once,
2341 * and ending with the smallest set of features. This way large combinations
2342 * can be allocated if they're turned on, and smaller combinations are the
2343 * fallthrough conditions.
2344 *
2345 **/
2346static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2347{
Don Skidmore220fe052013-09-21 01:40:49 +00002348 struct ixgbe_hw *hw = &adapter->hw;
2349 unsigned int def_q = 0;
2350 unsigned int num_tcs = 0;
2351 int err;
2352
Greg Rose92915f72010-01-09 02:24:10 +00002353 /* Start with base case */
2354 adapter->num_rx_queues = 1;
2355 adapter->num_tx_queues = 1;
Don Skidmore220fe052013-09-21 01:40:49 +00002356
2357 spin_lock_bh(&adapter->mbx_lock);
2358
2359 /* fetch queue configuration from the PF */
2360 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2361
2362 spin_unlock_bh(&adapter->mbx_lock);
2363
2364 if (err)
2365 return;
2366
2367 /* we need as many queues as traffic classes */
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002368 if (num_tcs > 1) {
Don Skidmore220fe052013-09-21 01:40:49 +00002369 adapter->num_rx_queues = num_tcs;
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002370 } else {
2371 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2372
2373 switch (hw->api_version) {
2374 case ixgbe_mbox_api_11:
Vlad Zolotarov94cf66f2015-03-30 21:35:26 +03002375 case ixgbe_mbox_api_12:
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002376 adapter->num_rx_queues = rss;
2377 adapter->num_tx_queues = rss;
2378 default:
2379 break;
2380 }
2381 }
Greg Rose92915f72010-01-09 02:24:10 +00002382}
2383
2384/**
2385 * ixgbevf_alloc_queues - Allocate memory for all rings
2386 * @adapter: board private structure to initialize
2387 *
2388 * We allocate one ring per queue at run-time since we don't know the
2389 * number of queues at compile-time. The polling_netdev array is
2390 * intended for Multiqueue, but should work fine with a single queue.
2391 **/
2392static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2393{
Don Skidmore87e70ab2014-01-16 02:30:08 -08002394 struct ixgbevf_ring *ring;
2395 int rx = 0, tx = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002396
Don Skidmore87e70ab2014-01-16 02:30:08 -08002397 for (; tx < adapter->num_tx_queues; tx++) {
2398 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2399 if (!ring)
2400 goto err_allocation;
Greg Rose92915f72010-01-09 02:24:10 +00002401
Don Skidmore87e70ab2014-01-16 02:30:08 -08002402 ring->dev = &adapter->pdev->dev;
2403 ring->netdev = adapter->netdev;
2404 ring->count = adapter->tx_ring_count;
2405 ring->queue_index = tx;
2406 ring->reg_idx = tx;
Greg Rose92915f72010-01-09 02:24:10 +00002407
Don Skidmore87e70ab2014-01-16 02:30:08 -08002408 adapter->tx_ring[tx] = ring;
Greg Rose92915f72010-01-09 02:24:10 +00002409 }
2410
Don Skidmore87e70ab2014-01-16 02:30:08 -08002411 for (; rx < adapter->num_rx_queues; rx++) {
2412 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2413 if (!ring)
2414 goto err_allocation;
2415
2416 ring->dev = &adapter->pdev->dev;
2417 ring->netdev = adapter->netdev;
2418
2419 ring->count = adapter->rx_ring_count;
2420 ring->queue_index = rx;
2421 ring->reg_idx = rx;
2422
2423 adapter->rx_ring[rx] = ring;
Greg Rose92915f72010-01-09 02:24:10 +00002424 }
2425
2426 return 0;
2427
Don Skidmore87e70ab2014-01-16 02:30:08 -08002428err_allocation:
2429 while (tx) {
2430 kfree(adapter->tx_ring[--tx]);
2431 adapter->tx_ring[tx] = NULL;
2432 }
2433
2434 while (rx) {
2435 kfree(adapter->rx_ring[--rx]);
2436 adapter->rx_ring[rx] = NULL;
2437 }
Greg Rose92915f72010-01-09 02:24:10 +00002438 return -ENOMEM;
2439}
2440
2441/**
2442 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2443 * @adapter: board private structure to initialize
2444 *
2445 * Attempt to configure the interrupts using the best available
2446 * capabilities of the hardware and the kernel.
2447 **/
2448static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2449{
Greg Rose91e2b892012-10-03 00:57:23 +00002450 struct net_device *netdev = adapter->netdev;
Mark Rustad50985b52015-10-21 17:21:20 -07002451 int err;
Greg Rose92915f72010-01-09 02:24:10 +00002452 int vector, v_budget;
2453
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002454 /* It's easy to be greedy for MSI-X vectors, but it really
Greg Rose92915f72010-01-09 02:24:10 +00002455 * doesn't do us much good if we have a lot more vectors
2456 * than CPU's. So let's be conservative and only ask for
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002457 * (roughly) the same number of vectors as there are CPU's.
2458 * The default is to use pairs of vectors.
Greg Rose92915f72010-01-09 02:24:10 +00002459 */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002460 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2461 v_budget = min_t(int, v_budget, num_online_cpus());
2462 v_budget += NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002463
2464 /* A failure in MSI-X entry allocation isn't fatal, but it does
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002465 * mean we disable MSI-X capabilities of the adapter.
2466 */
Greg Rose92915f72010-01-09 02:24:10 +00002467 adapter->msix_entries = kcalloc(v_budget,
2468 sizeof(struct msix_entry), GFP_KERNEL);
Mark Rustad50985b52015-10-21 17:21:20 -07002469 if (!adapter->msix_entries)
2470 return -ENOMEM;
Greg Rose92915f72010-01-09 02:24:10 +00002471
2472 for (vector = 0; vector < v_budget; vector++)
2473 adapter->msix_entries[vector].entry = vector;
2474
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002475 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2476 if (err)
Mark Rustad50985b52015-10-21 17:21:20 -07002477 return err;
Greg Rose92915f72010-01-09 02:24:10 +00002478
Greg Rose91e2b892012-10-03 00:57:23 +00002479 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2480 if (err)
Mark Rustad50985b52015-10-21 17:21:20 -07002481 return err;
Greg Rose91e2b892012-10-03 00:57:23 +00002482
Mark Rustad50985b52015-10-21 17:21:20 -07002483 return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
Greg Rose92915f72010-01-09 02:24:10 +00002484}
2485
2486/**
2487 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2488 * @adapter: board private structure to initialize
2489 *
2490 * We allocate one q_vector per queue interrupt. If allocation fails we
2491 * return -ENOMEM.
2492 **/
2493static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2494{
2495 int q_idx, num_q_vectors;
2496 struct ixgbevf_q_vector *q_vector;
Greg Rose92915f72010-01-09 02:24:10 +00002497
2498 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002499
2500 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2501 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2502 if (!q_vector)
2503 goto err_out;
2504 q_vector->adapter = adapter;
2505 q_vector->v_idx = q_idx;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002506 netif_napi_add(adapter->netdev, &q_vector->napi,
2507 ixgbevf_poll, 64);
Greg Rose92915f72010-01-09 02:24:10 +00002508 adapter->q_vector[q_idx] = q_vector;
2509 }
2510
2511 return 0;
2512
2513err_out:
2514 while (q_idx) {
2515 q_idx--;
2516 q_vector = adapter->q_vector[q_idx];
Jacob Kellerc777cdf2013-09-21 06:24:20 +00002517#ifdef CONFIG_NET_RX_BUSY_POLL
2518 napi_hash_del(&q_vector->napi);
2519#endif
Greg Rose92915f72010-01-09 02:24:10 +00002520 netif_napi_del(&q_vector->napi);
2521 kfree(q_vector);
2522 adapter->q_vector[q_idx] = NULL;
2523 }
2524 return -ENOMEM;
2525}
2526
2527/**
2528 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2529 * @adapter: board private structure to initialize
2530 *
2531 * This function frees the memory allocated to the q_vectors. In addition if
2532 * NAPI is enabled it will delete any references to the NAPI struct prior
2533 * to freeing the q_vector.
2534 **/
2535static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2536{
John Fastabendf4477702012-09-16 08:19:46 +00002537 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002538
2539 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2540 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2541
2542 adapter->q_vector[q_idx] = NULL;
Jacob Kellerc777cdf2013-09-21 06:24:20 +00002543#ifdef CONFIG_NET_RX_BUSY_POLL
2544 napi_hash_del(&q_vector->napi);
2545#endif
John Fastabendf4477702012-09-16 08:19:46 +00002546 netif_napi_del(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00002547 kfree(q_vector);
2548 }
2549}
2550
2551/**
2552 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2553 * @adapter: board private structure
2554 *
2555 **/
2556static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2557{
Mark Rustadeeffcee2016-10-28 10:46:39 -07002558 if (!adapter->msix_entries)
2559 return;
2560
Greg Rose92915f72010-01-09 02:24:10 +00002561 pci_disable_msix(adapter->pdev);
2562 kfree(adapter->msix_entries);
2563 adapter->msix_entries = NULL;
Greg Rose92915f72010-01-09 02:24:10 +00002564}
2565
2566/**
2567 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2568 * @adapter: board private structure to initialize
2569 *
2570 **/
2571static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2572{
2573 int err;
2574
2575 /* Number of supported queues */
2576 ixgbevf_set_num_queues(adapter);
2577
2578 err = ixgbevf_set_interrupt_capability(adapter);
2579 if (err) {
2580 hw_dbg(&adapter->hw,
2581 "Unable to setup interrupt capabilities\n");
2582 goto err_set_interrupt;
2583 }
2584
2585 err = ixgbevf_alloc_q_vectors(adapter);
2586 if (err) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002587 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
Greg Rose92915f72010-01-09 02:24:10 +00002588 goto err_alloc_q_vectors;
2589 }
2590
2591 err = ixgbevf_alloc_queues(adapter);
2592 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002593 pr_err("Unable to allocate memory for queues\n");
Greg Rose92915f72010-01-09 02:24:10 +00002594 goto err_alloc_queues;
2595 }
2596
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002597 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
Greg Rose92915f72010-01-09 02:24:10 +00002598 (adapter->num_rx_queues > 1) ? "Enabled" :
2599 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2600
2601 set_bit(__IXGBEVF_DOWN, &adapter->state);
2602
2603 return 0;
2604err_alloc_queues:
2605 ixgbevf_free_q_vectors(adapter);
2606err_alloc_q_vectors:
2607 ixgbevf_reset_interrupt_capability(adapter);
2608err_set_interrupt:
2609 return err;
2610}
2611
2612/**
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002613 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2614 * @adapter: board private structure to clear interrupt scheme on
2615 *
2616 * We go through and clear interrupt specific resources and reset the structure
2617 * to pre-load conditions
2618 **/
2619static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2620{
Don Skidmore87e70ab2014-01-16 02:30:08 -08002621 int i;
2622
2623 for (i = 0; i < adapter->num_tx_queues; i++) {
2624 kfree(adapter->tx_ring[i]);
2625 adapter->tx_ring[i] = NULL;
2626 }
2627 for (i = 0; i < adapter->num_rx_queues; i++) {
2628 kfree(adapter->rx_ring[i]);
2629 adapter->rx_ring[i] = NULL;
2630 }
2631
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002632 adapter->num_tx_queues = 0;
2633 adapter->num_rx_queues = 0;
2634
2635 ixgbevf_free_q_vectors(adapter);
2636 ixgbevf_reset_interrupt_capability(adapter);
2637}
2638
2639/**
Greg Rose92915f72010-01-09 02:24:10 +00002640 * ixgbevf_sw_init - Initialize general software structures
Greg Rose92915f72010-01-09 02:24:10 +00002641 * @adapter: board private structure to initialize
2642 *
2643 * ixgbevf_sw_init initializes the Adapter private data structure.
2644 * Fields are initialized based on PCI device information and
2645 * OS network device settings (MTU size).
2646 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05002647static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002648{
2649 struct ixgbe_hw *hw = &adapter->hw;
2650 struct pci_dev *pdev = adapter->pdev;
Greg Rosee1941a72013-02-13 03:02:05 +00002651 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00002652 int err;
2653
2654 /* PCI config space info */
Greg Rose92915f72010-01-09 02:24:10 +00002655 hw->vendor_id = pdev->vendor;
2656 hw->device_id = pdev->device;
Sergei Shtylyovff938e42011-02-28 11:57:33 -08002657 hw->revision_id = pdev->revision;
Greg Rose92915f72010-01-09 02:24:10 +00002658 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2659 hw->subsystem_device_id = pdev->subsystem_device;
2660
2661 hw->mbx.ops.init_params(hw);
Alexander Duyck56e94092012-07-20 08:10:03 +00002662
2663 /* assume legacy case in which PF would only give VF 2 queues */
2664 hw->mac.max_tx_queues = 2;
2665 hw->mac.max_rx_queues = 2;
2666
Don Skidmore798e3812013-10-01 04:33:51 -07002667 /* lock to protect mailbox accesses */
2668 spin_lock_init(&adapter->mbx_lock);
2669
Greg Rose92915f72010-01-09 02:24:10 +00002670 err = hw->mac.ops.reset_hw(hw);
2671 if (err) {
2672 dev_info(&pdev->dev,
Greg Rosee1941a72013-02-13 03:02:05 +00002673 "PF still in reset state. Is the PF interface up?\n");
Greg Rose92915f72010-01-09 02:24:10 +00002674 } else {
2675 err = hw->mac.ops.init_hw(hw);
2676 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002677 pr_err("init_shared_code failed: %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +00002678 goto out;
2679 }
Don Skidmore798e3812013-10-01 04:33:51 -07002680 ixgbevf_negotiate_api(adapter);
Greg Rosee1941a72013-02-13 03:02:05 +00002681 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2682 if (err)
2683 dev_info(&pdev->dev, "Error reading MAC address\n");
2684 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2685 dev_info(&pdev->dev,
2686 "MAC address not assigned by administrator.\n");
Emil Tantilov91a76ba2015-10-12 10:55:51 -07002687 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
Greg Rosee1941a72013-02-13 03:02:05 +00002688 }
2689
2690 if (!is_valid_ether_addr(netdev->dev_addr)) {
2691 dev_info(&pdev->dev, "Assigning random MAC address\n");
2692 eth_hw_addr_random(netdev);
Emil Tantilov91a76ba2015-10-12 10:55:51 -07002693 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
Emil Tantilov465fc6432015-10-12 10:56:00 -07002694 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
Greg Rose92915f72010-01-09 02:24:10 +00002695 }
2696
2697 /* Enable dynamic interrupt throttling rates */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002698 adapter->rx_itr_setting = 1;
2699 adapter->tx_itr_setting = 1;
Greg Rose92915f72010-01-09 02:24:10 +00002700
Greg Rose92915f72010-01-09 02:24:10 +00002701 /* set default ring sizes */
2702 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2703 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2704
Greg Rose92915f72010-01-09 02:24:10 +00002705 set_bit(__IXGBEVF_DOWN, &adapter->state);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00002706 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002707
2708out:
2709 return err;
2710}
2711
Greg Rose92915f72010-01-09 02:24:10 +00002712#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2713 { \
2714 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2715 if (current_counter < last_counter) \
2716 counter += 0x100000000LL; \
2717 last_counter = current_counter; \
2718 counter &= 0xFFFFFFFF00000000LL; \
2719 counter |= current_counter; \
2720 }
2721
2722#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2723 { \
2724 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2725 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002726 u64 current_counter = (current_counter_msb << 32) | \
2727 current_counter_lsb; \
Greg Rose92915f72010-01-09 02:24:10 +00002728 if (current_counter < last_counter) \
2729 counter += 0x1000000000LL; \
2730 last_counter = current_counter; \
2731 counter &= 0xFFFFFFF000000000LL; \
2732 counter |= current_counter; \
2733 }
2734/**
2735 * ixgbevf_update_stats - Update the board statistics counters.
2736 * @adapter: board private structure
2737 **/
2738void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2739{
2740 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose55fb2772012-11-06 05:53:32 +00002741 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002742
Emil Tantilove66c92a2015-01-28 03:21:29 +00002743 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2744 test_bit(__IXGBEVF_RESETTING, &adapter->state))
Greg Rose088245a2013-01-04 07:37:31 +00002745 return;
2746
Greg Rose92915f72010-01-09 02:24:10 +00002747 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2748 adapter->stats.vfgprc);
2749 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2750 adapter->stats.vfgptc);
2751 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2752 adapter->stats.last_vfgorc,
2753 adapter->stats.vfgorc);
2754 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2755 adapter->stats.last_vfgotc,
2756 adapter->stats.vfgotc);
2757 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2758 adapter->stats.vfmprc);
Greg Rose55fb2772012-11-06 05:53:32 +00002759
2760 for (i = 0; i < adapter->num_rx_queues; i++) {
2761 adapter->hw_csum_rx_error +=
Don Skidmore87e70ab2014-01-16 02:30:08 -08002762 adapter->rx_ring[i]->hw_csum_rx_error;
Don Skidmore87e70ab2014-01-16 02:30:08 -08002763 adapter->rx_ring[i]->hw_csum_rx_error = 0;
Greg Rose55fb2772012-11-06 05:53:32 +00002764 }
Greg Rose92915f72010-01-09 02:24:10 +00002765}
2766
2767/**
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002768 * ixgbevf_service_timer - Timer Call-back
Greg Rose92915f72010-01-09 02:24:10 +00002769 * @data: pointer to adapter cast into an unsigned long
2770 **/
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002771static void ixgbevf_service_timer(unsigned long data)
Greg Rose92915f72010-01-09 02:24:10 +00002772{
2773 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
Emil Tantilove66c92a2015-01-28 03:21:29 +00002774
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002775 /* Reset the timer */
2776 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2777
2778 ixgbevf_service_event_schedule(adapter);
Emil Tantilove66c92a2015-01-28 03:21:29 +00002779}
2780
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002781static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
Emil Tantilove66c92a2015-01-28 03:21:29 +00002782{
Emil Tantilovd5dd7c32015-12-17 17:32:55 -08002783 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002784 return;
Emil Tantilove66c92a2015-01-28 03:21:29 +00002785
Emil Tantilove66c92a2015-01-28 03:21:29 +00002786 /* If we're already down or resetting, just bail */
2787 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
Don Skidmore6e469ed2016-07-12 18:47:38 -04002788 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
Emil Tantilove66c92a2015-01-28 03:21:29 +00002789 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2790 return;
2791
2792 adapter->tx_timeout_count++;
2793
Don Skidmore8e8247a2016-07-11 21:29:56 -04002794 rtnl_lock();
Emil Tantilove66c92a2015-01-28 03:21:29 +00002795 ixgbevf_reinit_locked(adapter);
Don Skidmore8e8247a2016-07-11 21:29:56 -04002796 rtnl_unlock();
Emil Tantilove66c92a2015-01-28 03:21:29 +00002797}
2798
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002799/**
2800 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2801 * @adapter: pointer to the device adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002802 *
2803 * This function serves two purposes. First it strobes the interrupt lines
2804 * in order to make certain interrupts are occurring. Secondly it sets the
2805 * bits needed to check for TX hangs. As a result we should immediately
2806 * determine if a hang has occurred.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002807 **/
Emil Tantilove66c92a2015-01-28 03:21:29 +00002808static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2809{
Greg Rose92915f72010-01-09 02:24:10 +00002810 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002811 u32 eics = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002812 int i;
2813
Emil Tantilove66c92a2015-01-28 03:21:29 +00002814 /* If we're down or resetting, just bail */
2815 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2816 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2817 return;
Greg Rose92915f72010-01-09 02:24:10 +00002818
Emil Tantilove08400b2015-01-28 03:21:24 +00002819 /* Force detection of hung controller */
2820 if (netif_carrier_ok(adapter->netdev)) {
2821 for (i = 0; i < adapter->num_tx_queues; i++)
2822 set_check_for_tx_hang(adapter->tx_ring[i]);
2823 }
2824
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002825 /* get one bit for every active Tx/Rx interrupt vector */
Greg Rose92915f72010-01-09 02:24:10 +00002826 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2827 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002828
Alexander Duyck6b43c442012-05-11 08:32:45 +00002829 if (qv->rx.ring || qv->tx.ring)
Jacob Keller8d055cc2016-04-13 16:08:24 -07002830 eics |= BIT(i);
Greg Rose92915f72010-01-09 02:24:10 +00002831 }
2832
Emil Tantilove66c92a2015-01-28 03:21:29 +00002833 /* Cause software interrupt to ensure rings are cleaned */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002834 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
Greg Rose92915f72010-01-09 02:24:10 +00002835}
2836
Emil Tantilove66c92a2015-01-28 03:21:29 +00002837/**
2838 * ixgbevf_watchdog_update_link - update the link status
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002839 * @adapter: pointer to the device adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002840 **/
2841static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002842{
Emil Tantilove66c92a2015-01-28 03:21:29 +00002843 struct ixgbe_hw *hw = &adapter->hw;
2844 u32 link_speed = adapter->link_speed;
2845 bool link_up = adapter->link_up;
2846 s32 err;
Greg Rose92915f72010-01-09 02:24:10 +00002847
Emil Tantilove66c92a2015-01-28 03:21:29 +00002848 spin_lock_bh(&adapter->mbx_lock);
2849
2850 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2851
2852 spin_unlock_bh(&adapter->mbx_lock);
2853
2854 /* if check for link returns error we will need to reset */
2855 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
Emil Tantilovd5dd7c32015-12-17 17:32:55 -08002856 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
Emil Tantilove66c92a2015-01-28 03:21:29 +00002857 link_up = false;
2858 }
2859
2860 adapter->link_up = link_up;
2861 adapter->link_speed = link_speed;
2862}
2863
2864/**
2865 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2866 * print link up message
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002867 * @adapter: pointer to the device adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002868 **/
2869static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
2870{
2871 struct net_device *netdev = adapter->netdev;
2872
2873 /* only continue if link was previously down */
2874 if (netif_carrier_ok(netdev))
Greg Rose92915f72010-01-09 02:24:10 +00002875 return;
2876
Emil Tantilove66c92a2015-01-28 03:21:29 +00002877 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2878 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2879 "10 Gbps" :
2880 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2881 "1 Gbps" :
2882 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2883 "100 Mbps" :
2884 "unknown speed");
Greg Rose92915f72010-01-09 02:24:10 +00002885
Emil Tantilove66c92a2015-01-28 03:21:29 +00002886 netif_carrier_on(netdev);
2887}
2888
2889/**
2890 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2891 * print link down message
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002892 * @adapter: pointer to the adapter structure
Emil Tantilove66c92a2015-01-28 03:21:29 +00002893 **/
2894static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2895{
2896 struct net_device *netdev = adapter->netdev;
2897
2898 adapter->link_speed = 0;
2899
2900 /* only continue if link was up previously */
2901 if (!netif_carrier_ok(netdev))
2902 return;
2903
2904 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2905
2906 netif_carrier_off(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00002907}
2908
2909/**
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002910 * ixgbevf_watchdog_subtask - worker thread to bring link up
Greg Rose92915f72010-01-09 02:24:10 +00002911 * @work: pointer to work_struct containing our data
2912 **/
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002913static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2914{
2915 /* if interface is down do nothing */
2916 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2917 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2918 return;
2919
2920 ixgbevf_watchdog_update_link(adapter);
2921
2922 if (adapter->link_up)
2923 ixgbevf_watchdog_link_is_up(adapter);
2924 else
2925 ixgbevf_watchdog_link_is_down(adapter);
2926
2927 ixgbevf_update_stats(adapter);
2928}
2929
2930/**
2931 * ixgbevf_service_task - manages and runs subtasks
2932 * @work: pointer to work_struct containing our data
2933 **/
2934static void ixgbevf_service_task(struct work_struct *work)
Greg Rose92915f72010-01-09 02:24:10 +00002935{
2936 struct ixgbevf_adapter *adapter = container_of(work,
2937 struct ixgbevf_adapter,
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002938 service_task);
Greg Rose92915f72010-01-09 02:24:10 +00002939 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00002940
Mark Rustad26597802014-03-04 03:02:45 +00002941 if (IXGBE_REMOVED(hw->hw_addr)) {
2942 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2943 rtnl_lock();
2944 ixgbevf_down(adapter);
2945 rtnl_unlock();
2946 }
2947 return;
2948 }
Emil Tantilove66c92a2015-01-28 03:21:29 +00002949
Don Skidmore220fe052013-09-21 01:40:49 +00002950 ixgbevf_queue_reset_subtask(adapter);
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002951 ixgbevf_reset_subtask(adapter);
2952 ixgbevf_watchdog_subtask(adapter);
Emil Tantilove66c92a2015-01-28 03:21:29 +00002953 ixgbevf_check_hang_subtask(adapter);
2954
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00002955 ixgbevf_service_event_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002956}
2957
2958/**
2959 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
Greg Rose92915f72010-01-09 02:24:10 +00002960 * @tx_ring: Tx descriptor ring for a specific queue
2961 *
2962 * Free all transmit software resources
2963 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002964void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002965{
Emil Tantilov05d063a2014-01-17 18:29:59 -08002966 ixgbevf_clean_tx_ring(tx_ring);
Greg Rose92915f72010-01-09 02:24:10 +00002967
2968 vfree(tx_ring->tx_buffer_info);
2969 tx_ring->tx_buffer_info = NULL;
2970
Don Skidmorede02dec2014-01-16 02:30:09 -08002971 /* if not set, then don't free */
2972 if (!tx_ring->desc)
2973 return;
2974
Emil Tantilov05d063a2014-01-17 18:29:59 -08002975 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002976 tx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00002977
2978 tx_ring->desc = NULL;
2979}
2980
2981/**
2982 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2983 * @adapter: board private structure
2984 *
2985 * Free all transmit software resources
2986 **/
2987static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2988{
2989 int i;
2990
2991 for (i = 0; i < adapter->num_tx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08002992 if (adapter->tx_ring[i]->desc)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002993 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002994}
2995
2996/**
2997 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00002998 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
Greg Rose92915f72010-01-09 02:24:10 +00002999 *
3000 * Return 0 on success, negative on failure
3001 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08003002int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00003003{
Emil Tantilov4ad6af02016-07-29 10:30:11 -07003004 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
Greg Rose92915f72010-01-09 02:24:10 +00003005 int size;
3006
3007 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00003008 tx_ring->tx_buffer_info = vzalloc(size);
Greg Rose92915f72010-01-09 02:24:10 +00003009 if (!tx_ring->tx_buffer_info)
3010 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00003011
3012 /* round up to nearest 4K */
3013 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3014 tx_ring->size = ALIGN(tx_ring->size, 4096);
3015
Emil Tantilov05d063a2014-01-17 18:29:59 -08003016 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
Nick Nunley2a1f8792010-04-27 13:10:50 +00003017 &tx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00003018 if (!tx_ring->desc)
3019 goto err;
3020
Greg Rose92915f72010-01-09 02:24:10 +00003021 return 0;
3022
3023err:
3024 vfree(tx_ring->tx_buffer_info);
3025 tx_ring->tx_buffer_info = NULL;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003026 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
Greg Rose92915f72010-01-09 02:24:10 +00003027 return -ENOMEM;
3028}
3029
3030/**
3031 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3032 * @adapter: board private structure
3033 *
3034 * If this function returns with an error, then it's possible one or
3035 * more of the rings is populated (while the rest are not). It is the
3036 * callers duty to clean those orphaned rings.
3037 *
3038 * Return 0 on success, negative on failure
3039 **/
3040static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3041{
3042 int i, err = 0;
3043
3044 for (i = 0; i < adapter->num_tx_queues; i++) {
Emil Tantilov05d063a2014-01-17 18:29:59 -08003045 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00003046 if (!err)
3047 continue;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003048 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
Greg Rose92915f72010-01-09 02:24:10 +00003049 break;
3050 }
3051
3052 return err;
3053}
3054
3055/**
3056 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003057 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
Greg Rose92915f72010-01-09 02:24:10 +00003058 *
3059 * Returns 0 on success, negative on failure
3060 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08003061int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00003062{
Greg Rose92915f72010-01-09 02:24:10 +00003063 int size;
3064
3065 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00003066 rx_ring->rx_buffer_info = vzalloc(size);
Joe Perchese404dec2012-01-29 12:56:23 +00003067 if (!rx_ring->rx_buffer_info)
Emil Tantilov05d063a2014-01-17 18:29:59 -08003068 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00003069
3070 /* Round up to nearest 4K */
3071 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3072 rx_ring->size = ALIGN(rx_ring->size, 4096);
3073
Emil Tantilov05d063a2014-01-17 18:29:59 -08003074 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
Nick Nunley2a1f8792010-04-27 13:10:50 +00003075 &rx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00003076
Emil Tantilov05d063a2014-01-17 18:29:59 -08003077 if (!rx_ring->desc)
3078 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00003079
Greg Rose92915f72010-01-09 02:24:10 +00003080 return 0;
Emil Tantilov05d063a2014-01-17 18:29:59 -08003081err:
3082 vfree(rx_ring->rx_buffer_info);
3083 rx_ring->rx_buffer_info = NULL;
3084 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
Greg Rose92915f72010-01-09 02:24:10 +00003085 return -ENOMEM;
3086}
3087
3088/**
3089 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3090 * @adapter: board private structure
3091 *
3092 * If this function returns with an error, then it's possible one or
3093 * more of the rings is populated (while the rest are not). It is the
3094 * callers duty to clean those orphaned rings.
3095 *
3096 * Return 0 on success, negative on failure
3097 **/
3098static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3099{
3100 int i, err = 0;
3101
3102 for (i = 0; i < adapter->num_rx_queues; i++) {
Emil Tantilov05d063a2014-01-17 18:29:59 -08003103 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00003104 if (!err)
3105 continue;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003106 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
Greg Rose92915f72010-01-09 02:24:10 +00003107 break;
3108 }
3109 return err;
3110}
3111
3112/**
3113 * ixgbevf_free_rx_resources - Free Rx Resources
Greg Rose92915f72010-01-09 02:24:10 +00003114 * @rx_ring: ring to clean the resources from
3115 *
3116 * Free all receive software resources
3117 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08003118void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00003119{
Emil Tantilov05d063a2014-01-17 18:29:59 -08003120 ixgbevf_clean_rx_ring(rx_ring);
Greg Rose92915f72010-01-09 02:24:10 +00003121
3122 vfree(rx_ring->rx_buffer_info);
3123 rx_ring->rx_buffer_info = NULL;
3124
Emil Tantilov05d063a2014-01-17 18:29:59 -08003125 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
Nick Nunley2a1f8792010-04-27 13:10:50 +00003126 rx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00003127
3128 rx_ring->desc = NULL;
3129}
3130
3131/**
3132 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3133 * @adapter: board private structure
3134 *
3135 * Free all receive software resources
3136 **/
3137static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3138{
3139 int i;
3140
3141 for (i = 0; i < adapter->num_rx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08003142 if (adapter->rx_ring[i]->desc)
Emil Tantilov05d063a2014-01-17 18:29:59 -08003143 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00003144}
3145
3146/**
3147 * ixgbevf_open - Called when a network interface is made active
3148 * @netdev: network interface device structure
3149 *
3150 * Returns 0 on success, negative value on failure
3151 *
3152 * The open entry point is called when a network interface is made
3153 * active by the system (IFF_UP). At this point all resources needed
3154 * for transmit and receive operations are allocated, the interrupt
3155 * handler is registered with the OS, the watchdog timer is started,
3156 * and the stack is notified that the interface is ready.
3157 **/
Stefan Assmann324d0862016-02-03 09:20:49 +01003158int ixgbevf_open(struct net_device *netdev)
Greg Rose92915f72010-01-09 02:24:10 +00003159{
3160 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3161 struct ixgbe_hw *hw = &adapter->hw;
3162 int err;
3163
xunleera1f6c6b2013-03-05 07:44:20 +00003164 /* A previous failure to open the device because of a lack of
3165 * available MSIX vector resources may have reset the number
3166 * of msix vectors variable to zero. The only way to recover
3167 * is to unload/reload the driver and hope that the system has
3168 * been able to recover some MSIX vector resources.
3169 */
3170 if (!adapter->num_msix_vectors)
3171 return -ENOMEM;
3172
Greg Rose92915f72010-01-09 02:24:10 +00003173 if (hw->adapter_stopped) {
3174 ixgbevf_reset(adapter);
3175 /* if adapter is still stopped then PF isn't up and
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003176 * the VF can't start.
3177 */
Greg Rose92915f72010-01-09 02:24:10 +00003178 if (hw->adapter_stopped) {
3179 err = IXGBE_ERR_MBX;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003180 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
Greg Rose92915f72010-01-09 02:24:10 +00003181 goto err_setup_reset;
3182 }
3183 }
3184
Emil Tantilovd9bdb572015-01-28 03:21:18 +00003185 /* disallow open during test */
3186 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3187 return -EBUSY;
3188
3189 netif_carrier_off(netdev);
3190
Greg Rose92915f72010-01-09 02:24:10 +00003191 /* allocate transmit descriptors */
3192 err = ixgbevf_setup_all_tx_resources(adapter);
3193 if (err)
3194 goto err_setup_tx;
3195
3196 /* allocate receive descriptors */
3197 err = ixgbevf_setup_all_rx_resources(adapter);
3198 if (err)
3199 goto err_setup_rx;
3200
3201 ixgbevf_configure(adapter);
3202
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003203 /* Map the Tx/Rx rings to the vectors we were allotted.
Greg Rose92915f72010-01-09 02:24:10 +00003204 * if request_irq will be called in this function map_rings
3205 * must be called *before* up_complete
3206 */
3207 ixgbevf_map_rings_to_vectors(adapter);
3208
Greg Rose92915f72010-01-09 02:24:10 +00003209 err = ixgbevf_request_irq(adapter);
3210 if (err)
3211 goto err_req_irq;
3212
Emil Tantilovd9bdb572015-01-28 03:21:18 +00003213 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003214
3215 return 0;
3216
3217err_req_irq:
3218 ixgbevf_down(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003219err_setup_rx:
3220 ixgbevf_free_all_rx_resources(adapter);
3221err_setup_tx:
3222 ixgbevf_free_all_tx_resources(adapter);
3223 ixgbevf_reset(adapter);
3224
3225err_setup_reset:
3226
3227 return err;
3228}
3229
3230/**
3231 * ixgbevf_close - Disables a network interface
3232 * @netdev: network interface device structure
3233 *
3234 * Returns 0, this is not allowed to fail
3235 *
3236 * The close entry point is called when an interface is de-activated
3237 * by the OS. The hardware is still under the drivers control, but
3238 * needs to be disabled. A global MAC reset is issued to stop the
3239 * hardware, and all transmit and receive resources are freed.
3240 **/
Stefan Assmann324d0862016-02-03 09:20:49 +01003241int ixgbevf_close(struct net_device *netdev)
Greg Rose92915f72010-01-09 02:24:10 +00003242{
3243 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3244
Emil Tantilov2dad7b22016-11-11 10:12:51 -08003245 if (!netif_device_present(netdev))
3246 return 0;
3247
Greg Rose92915f72010-01-09 02:24:10 +00003248 ixgbevf_down(adapter);
3249 ixgbevf_free_irq(adapter);
3250
3251 ixgbevf_free_all_tx_resources(adapter);
3252 ixgbevf_free_all_rx_resources(adapter);
3253
3254 return 0;
3255}
3256
Don Skidmore220fe052013-09-21 01:40:49 +00003257static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3258{
3259 struct net_device *dev = adapter->netdev;
3260
Emil Tantilovd5dd7c32015-12-17 17:32:55 -08003261 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3262 &adapter->state))
Don Skidmore220fe052013-09-21 01:40:49 +00003263 return;
3264
Don Skidmore220fe052013-09-21 01:40:49 +00003265 /* if interface is down do nothing */
3266 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3267 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3268 return;
3269
3270 /* Hardware has to reinitialize queues and interrupts to
3271 * match packet buffer alignment. Unfortunately, the
3272 * hardware is not flexible enough to do this dynamically.
3273 */
Emil Tantilov2dad7b22016-11-11 10:12:51 -08003274 rtnl_lock();
3275
Don Skidmore220fe052013-09-21 01:40:49 +00003276 if (netif_running(dev))
3277 ixgbevf_close(dev);
3278
3279 ixgbevf_clear_interrupt_scheme(adapter);
3280 ixgbevf_init_interrupt_scheme(adapter);
3281
3282 if (netif_running(dev))
3283 ixgbevf_open(dev);
Emil Tantilov2dad7b22016-11-11 10:12:51 -08003284
3285 rtnl_unlock();
Don Skidmore220fe052013-09-21 01:40:49 +00003286}
3287
Alexander Duyck70a10e22012-05-11 08:33:21 +00003288static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3289 u32 vlan_macip_lens, u32 type_tucmd,
3290 u32 mss_l4len_idx)
3291{
3292 struct ixgbe_adv_tx_context_desc *context_desc;
3293 u16 i = tx_ring->next_to_use;
3294
3295 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3296
3297 i++;
3298 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3299
3300 /* set bits to identify this as an advanced context descriptor */
3301 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3302
3303 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3304 context_desc->seqnum_seed = 0;
3305 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3306 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3307}
3308
3309static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003310 struct ixgbevf_tx_buffer *first,
3311 u8 *hdr_len)
Greg Rose92915f72010-01-09 02:24:10 +00003312{
Alexander Duyckb83e3012016-04-14 17:19:31 -04003313 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003314 struct sk_buff *skb = first->skb;
Alexander Duyckb83e3012016-04-14 17:19:31 -04003315 union {
3316 struct iphdr *v4;
3317 struct ipv6hdr *v6;
3318 unsigned char *hdr;
3319 } ip;
3320 union {
3321 struct tcphdr *tcp;
3322 unsigned char *hdr;
3323 } l4;
3324 u32 paylen, l4_offset;
Francois Romieu8f12c032014-03-30 03:14:32 +00003325 int err;
Greg Rose92915f72010-01-09 02:24:10 +00003326
Emil Tantilov01a545c2014-02-27 20:32:45 -08003327 if (skb->ip_summed != CHECKSUM_PARTIAL)
3328 return 0;
3329
Alexander Duyck70a10e22012-05-11 08:33:21 +00003330 if (!skb_is_gso(skb))
3331 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00003332
Francois Romieu8f12c032014-03-30 03:14:32 +00003333 err = skb_cow_head(skb, 0);
3334 if (err < 0)
3335 return err;
Greg Rose92915f72010-01-09 02:24:10 +00003336
Alexander Duyckb83e3012016-04-14 17:19:31 -04003337 ip.hdr = skb_network_header(skb);
3338 l4.hdr = skb_checksum_start(skb);
3339
Alexander Duyck70a10e22012-05-11 08:33:21 +00003340 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3341 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3342
Alexander Duyckb83e3012016-04-14 17:19:31 -04003343 /* initialize outer IP header fields */
3344 if (ip.v4->version == 4) {
Alexander Duyckc54cdc32016-11-28 10:42:29 -05003345 unsigned char *csum_start = skb_checksum_start(skb);
3346 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3347
Alexander Duyckb83e3012016-04-14 17:19:31 -04003348 /* IP header will have to cancel out any data that
3349 * is not a part of the outer IP header
3350 */
Alexander Duyckc54cdc32016-11-28 10:42:29 -05003351 ip.v4->check = csum_fold(csum_partial(trans_start,
3352 csum_start - trans_start,
3353 0));
Alexander Duyck70a10e22012-05-11 08:33:21 +00003354 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
Alexander Duyckb83e3012016-04-14 17:19:31 -04003355
3356 ip.v4->tot_len = 0;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003357 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3358 IXGBE_TX_FLAGS_CSUM |
3359 IXGBE_TX_FLAGS_IPV4;
Alexander Duyckb83e3012016-04-14 17:19:31 -04003360 } else {
3361 ip.v6->payload_len = 0;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003362 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3363 IXGBE_TX_FLAGS_CSUM;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003364 }
3365
Alexander Duyckb83e3012016-04-14 17:19:31 -04003366 /* determine offset of inner transport header */
3367 l4_offset = l4.hdr - skb->data;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003368
Alexander Duyckb83e3012016-04-14 17:19:31 -04003369 /* compute length of segmentation header */
3370 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3371
3372 /* remove payload length from inner checksum */
3373 paylen = skb->len - l4_offset;
3374 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
3375
3376 /* update gso size and bytecount with header size */
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003377 first->gso_segs = skb_shinfo(skb)->gso_segs;
3378 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3379
Alexander Duyck70a10e22012-05-11 08:33:21 +00003380 /* mss_l4len_id: use 1 as index for TSO */
Alexander Duyckb83e3012016-04-14 17:19:31 -04003381 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003382 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
Jacob Keller8d055cc2016-04-13 16:08:24 -07003383 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
Alexander Duyck70a10e22012-05-11 08:33:21 +00003384
3385 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
Alexander Duyckb83e3012016-04-14 17:19:31 -04003386 vlan_macip_lens = l4.hdr - ip.hdr;
3387 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003388 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003389
3390 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3391 type_tucmd, mss_l4len_idx);
3392
3393 return 1;
Greg Rose92915f72010-01-09 02:24:10 +00003394}
3395
Alexander Duyckcb2b3ed2016-01-13 07:31:17 -08003396static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
3397{
3398 unsigned int offset = 0;
3399
3400 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
3401
3402 return offset == skb_checksum_start_offset(skb);
3403}
3404
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003405static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3406 struct ixgbevf_tx_buffer *first)
Greg Rose92915f72010-01-09 02:24:10 +00003407{
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003408 struct sk_buff *skb = first->skb;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003409 u32 vlan_macip_lens = 0;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003410 u32 type_tucmd = 0;
Greg Rose92915f72010-01-09 02:24:10 +00003411
Alexander Duyckcb2b3ed2016-01-13 07:31:17 -08003412 if (skb->ip_summed != CHECKSUM_PARTIAL)
3413 goto no_csum;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003414
Alexander Duyckcb2b3ed2016-01-13 07:31:17 -08003415 switch (skb->csum_offset) {
3416 case offsetof(struct tcphdr, check):
3417 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3418 /* fall through */
3419 case offsetof(struct udphdr, check):
3420 break;
3421 case offsetof(struct sctphdr, checksum):
3422 /* validate that this is actually an SCTP request */
3423 if (((first->protocol == htons(ETH_P_IP)) &&
3424 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
3425 ((first->protocol == htons(ETH_P_IPV6)) &&
3426 ixgbevf_ipv6_csum_is_sctp(skb))) {
3427 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003428 break;
Greg Rose92915f72010-01-09 02:24:10 +00003429 }
Alexander Duyckcb2b3ed2016-01-13 07:31:17 -08003430 /* fall through */
3431 default:
3432 skb_checksum_help(skb);
3433 goto no_csum;
Greg Rose92915f72010-01-09 02:24:10 +00003434 }
Alexander Duyckcb2b3ed2016-01-13 07:31:17 -08003435 /* update TX checksum flag */
3436 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3437 vlan_macip_lens = skb_checksum_start_offset(skb) -
3438 skb_network_offset(skb);
Mark Rustadd34a6142015-11-19 13:56:30 -08003439no_csum:
Alexander Duyck70a10e22012-05-11 08:33:21 +00003440 /* vlan_macip_lens: MACLEN, VLAN tag */
3441 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003442 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003443
Alexander Duyckcb2b3ed2016-01-13 07:31:17 -08003444 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
Greg Rose92915f72010-01-09 02:24:10 +00003445}
3446
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003447static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3448{
3449 /* set type for advanced descriptor with frame checksum insertion */
3450 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3451 IXGBE_ADVTXD_DCMD_IFCS |
3452 IXGBE_ADVTXD_DCMD_DEXT);
3453
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003454 /* set HW VLAN bit if VLAN is present */
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003455 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3456 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3457
3458 /* set segmentation enable bits for TSO/FSO */
3459 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3460 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3461
3462 return cmd_type;
3463}
3464
3465static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3466 u32 tx_flags, unsigned int paylen)
3467{
3468 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3469
3470 /* enable L4 checksum for TSO and TX checksum offload */
3471 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3472 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3473
3474 /* enble IPv4 checksum for TSO */
3475 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3476 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3477
3478 /* use index 1 context for TSO/FSO/FCOE */
3479 if (tx_flags & IXGBE_TX_FLAGS_TSO)
Jacob Keller8d055cc2016-04-13 16:08:24 -07003480 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003481
3482 /* Check Context must be set if Tx switch is enabled, which it
3483 * always is for case where virtual functions are running
3484 */
3485 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3486
3487 tx_desc->read.olinfo_status = olinfo_status;
3488}
3489
3490static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3491 struct ixgbevf_tx_buffer *first,
3492 const u8 hdr_len)
Greg Rose92915f72010-01-09 02:24:10 +00003493{
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003494 dma_addr_t dma;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003495 struct sk_buff *skb = first->skb;
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003496 struct ixgbevf_tx_buffer *tx_buffer;
3497 union ixgbe_adv_tx_desc *tx_desc;
3498 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3499 unsigned int data_len = skb->data_len;
3500 unsigned int size = skb_headlen(skb);
3501 unsigned int paylen = skb->len - hdr_len;
3502 u32 tx_flags = first->tx_flags;
3503 __le32 cmd_type;
3504 u16 i = tx_ring->next_to_use;
Greg Rose92915f72010-01-09 02:24:10 +00003505
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003506 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +00003507
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003508 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3509 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
Greg Rose92915f72010-01-09 02:24:10 +00003510
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003511 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3512 if (dma_mapping_error(tx_ring->dev, dma))
3513 goto dma_error;
3514
3515 /* record length, and DMA address */
3516 dma_unmap_len_set(first, len, size);
3517 dma_unmap_addr_set(first, dma, dma);
3518
3519 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3520
3521 for (;;) {
3522 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3523 tx_desc->read.cmd_type_len =
3524 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3525
3526 i++;
3527 tx_desc++;
3528 if (i == tx_ring->count) {
3529 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3530 i = 0;
3531 }
3532
3533 dma += IXGBE_MAX_DATA_PER_TXD;
3534 size -= IXGBE_MAX_DATA_PER_TXD;
3535
3536 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3537 tx_desc->read.olinfo_status = 0;
3538 }
3539
3540 if (likely(!data_len))
3541 break;
3542
3543 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3544
3545 i++;
3546 tx_desc++;
3547 if (i == tx_ring->count) {
3548 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3549 i = 0;
3550 }
3551
3552 size = skb_frag_size(frag);
3553 data_len -= size;
3554
3555 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3556 DMA_TO_DEVICE);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003557 if (dma_mapping_error(tx_ring->dev, dma))
Greg Rose92915f72010-01-09 02:24:10 +00003558 goto dma_error;
Greg Rose92915f72010-01-09 02:24:10 +00003559
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003560 tx_buffer = &tx_ring->tx_buffer_info[i];
3561 dma_unmap_len_set(tx_buffer, len, size);
3562 dma_unmap_addr_set(tx_buffer, dma, dma);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003563
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003564 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3565 tx_desc->read.olinfo_status = 0;
3566
3567 frag++;
Greg Rose92915f72010-01-09 02:24:10 +00003568 }
3569
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003570 /* write last descriptor with RS and EOP bits */
3571 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3572 tx_desc->read.cmd_type_len = cmd_type;
Greg Rose92915f72010-01-09 02:24:10 +00003573
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003574 /* set the timestamp */
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003575 first->time_stamp = jiffies;
Greg Rose92915f72010-01-09 02:24:10 +00003576
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003577 /* Force memory writes to complete before letting h/w know there
3578 * are new descriptors to fetch. (Only applicable for weak-ordered
3579 * memory model archs, such as IA-64).
3580 *
3581 * We also need this memory barrier (wmb) to make certain all of the
3582 * status bits have been updated before next_to_watch is written.
3583 */
3584 wmb();
Greg Rose92915f72010-01-09 02:24:10 +00003585
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003586 /* set next_to_watch value indicating a packet is present */
3587 first->next_to_watch = tx_desc;
3588
3589 i++;
3590 if (i == tx_ring->count)
3591 i = 0;
3592
3593 tx_ring->next_to_use = i;
3594
3595 /* notify HW of packet */
Mark Rustad06380db2014-03-04 03:02:23 +00003596 ixgbevf_write_tail(tx_ring, i);
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003597
3598 return;
Greg Rose92915f72010-01-09 02:24:10 +00003599dma_error:
Alexander Duyck70a10e22012-05-11 08:33:21 +00003600 dev_err(tx_ring->dev, "TX DMA map failed\n");
Greg Rose92915f72010-01-09 02:24:10 +00003601
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003602 /* clear dma mappings for failed tx_buffer_info map */
3603 for (;;) {
3604 tx_buffer = &tx_ring->tx_buffer_info[i];
3605 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3606 if (tx_buffer == first)
3607 break;
3608 if (i == 0)
3609 i = tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +00003610 i--;
Greg Rose92915f72010-01-09 02:24:10 +00003611 }
3612
Greg Rose92915f72010-01-09 02:24:10 +00003613 tx_ring->next_to_use = i;
Greg Rose92915f72010-01-09 02:24:10 +00003614}
3615
Alexander Duyckfb401952012-05-11 08:33:16 +00003616static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00003617{
Alexander Duyckfb401952012-05-11 08:33:16 +00003618 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +00003619 /* Herbert's original patch had:
3620 * smp_mb__after_netif_stop_queue();
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003621 * but since that doesn't exist yet, just open code it.
3622 */
Greg Rose92915f72010-01-09 02:24:10 +00003623 smp_mb();
3624
3625 /* We need to check again in a case another CPU has just
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003626 * made room available.
3627 */
Don Skidmoref880d072013-10-23 02:17:52 +00003628 if (likely(ixgbevf_desc_unused(tx_ring) < size))
Greg Rose92915f72010-01-09 02:24:10 +00003629 return -EBUSY;
3630
3631 /* A reprieve! - use start_queue because it doesn't call schedule */
Alexander Duyckfb401952012-05-11 08:33:16 +00003632 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
Emil Tantilov095e2612014-01-17 18:30:00 -08003633 ++tx_ring->tx_stats.restart_queue;
3634
Greg Rose92915f72010-01-09 02:24:10 +00003635 return 0;
3636}
3637
Alexander Duyckfb401952012-05-11 08:33:16 +00003638static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00003639{
Don Skidmoref880d072013-10-23 02:17:52 +00003640 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
Greg Rose92915f72010-01-09 02:24:10 +00003641 return 0;
Alexander Duyckfb401952012-05-11 08:33:16 +00003642 return __ixgbevf_maybe_stop_tx(tx_ring, size);
Greg Rose92915f72010-01-09 02:24:10 +00003643}
3644
3645static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3646{
3647 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003648 struct ixgbevf_tx_buffer *first;
Greg Rose92915f72010-01-09 02:24:10 +00003649 struct ixgbevf_ring *tx_ring;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003650 int tso;
3651 u32 tx_flags = 0;
Alexander Duyck35959902012-05-11 08:32:40 +00003652 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3653#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3654 unsigned short f;
3655#endif
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003656 u8 hdr_len = 0;
Greg Rosef9d08f162012-10-02 00:50:52 +00003657 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003658
Ben Hutchings46acc462012-11-01 09:11:11 +00003659 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
Alexander Duycke7fcd542015-05-01 10:34:50 -07003660 dev_kfree_skb_any(skb);
Greg Rosef9d08f162012-10-02 00:50:52 +00003661 return NETDEV_TX_OK;
3662 }
Greg Rose92915f72010-01-09 02:24:10 +00003663
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003664 tx_ring = adapter->tx_ring[skb->queue_mapping];
Greg Rose92915f72010-01-09 02:24:10 +00003665
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003666 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
Alexander Duyck35959902012-05-11 08:32:40 +00003667 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3668 * + 2 desc gap to keep tail from touching head,
3669 * + 1 desc for context descriptor,
3670 * otherwise try next time
3671 */
3672#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3673 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3674 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3675#else
3676 count += skb_shinfo(skb)->nr_frags;
3677#endif
Alexander Duyckfb401952012-05-11 08:33:16 +00003678 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
Emil Tantilov095e2612014-01-17 18:30:00 -08003679 tx_ring->tx_stats.tx_busy++;
Alexander Duyck35959902012-05-11 08:32:40 +00003680 return NETDEV_TX_BUSY;
3681 }
3682
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003683 /* record the location of the first descriptor for this packet */
3684 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3685 first->skb = skb;
3686 first->bytecount = skb->len;
3687 first->gso_segs = 1;
3688
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003689 if (skb_vlan_tag_present(skb)) {
3690 tx_flags |= skb_vlan_tag_get(skb);
Greg Rose92915f72010-01-09 02:24:10 +00003691 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3692 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3693 }
3694
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003695 /* record initial flags and protocol */
3696 first->tx_flags = tx_flags;
3697 first->protocol = vlan_get_protocol(skb);
Greg Rose92915f72010-01-09 02:24:10 +00003698
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003699 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3700 if (tso < 0)
3701 goto out_drop;
Emil Tantilovb5d217f2014-02-27 20:32:44 -08003702 else if (!tso)
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003703 ixgbevf_tx_csum(tx_ring, first);
Greg Rose92915f72010-01-09 02:24:10 +00003704
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003705 ixgbevf_tx_map(tx_ring, first, hdr_len);
Greg Rose92915f72010-01-09 02:24:10 +00003706
Alexander Duyckfb401952012-05-11 08:33:16 +00003707 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
Greg Rose92915f72010-01-09 02:24:10 +00003708
3709 return NETDEV_TX_OK;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003710
3711out_drop:
3712 dev_kfree_skb_any(first->skb);
3713 first->skb = NULL;
3714
3715 return NETDEV_TX_OK;
Greg Rose92915f72010-01-09 02:24:10 +00003716}
3717
3718/**
Greg Rose92915f72010-01-09 02:24:10 +00003719 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3720 * @netdev: network interface device structure
3721 * @p: pointer to an address structure
3722 *
3723 * Returns 0 on success, negative on failure
3724 **/
3725static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3726{
3727 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3728 struct ixgbe_hw *hw = &adapter->hw;
3729 struct sockaddr *addr = p;
Emil Tantilov32ca6862016-02-10 15:16:30 -08003730 int err;
Greg Rose92915f72010-01-09 02:24:10 +00003731
3732 if (!is_valid_ether_addr(addr->sa_data))
3733 return -EADDRNOTAVAIL;
3734
John Fastabend55fdd45b2012-10-01 14:52:20 +00003735 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003736
Emil Tantilov32ca6862016-02-10 15:16:30 -08003737 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
Greg Rose92915f72010-01-09 02:24:10 +00003738
John Fastabend55fdd45b2012-10-01 14:52:20 +00003739 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003740
Emil Tantilov32ca6862016-02-10 15:16:30 -08003741 if (err)
3742 return -EPERM;
3743
3744 ether_addr_copy(hw->mac.addr, addr->sa_data);
3745 ether_addr_copy(netdev->dev_addr, addr->sa_data);
3746
Greg Rose92915f72010-01-09 02:24:10 +00003747 return 0;
3748}
3749
3750/**
3751 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3752 * @netdev: network interface device structure
3753 * @new_mtu: new value for maximum frame size
3754 *
3755 * Returns 0 on success, negative on failure
3756 **/
3757static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3758{
3759 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Emil Tantilovbad17232014-11-21 02:57:15 +00003760 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00003761 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Tony Nguyen6a11e522016-07-13 10:33:16 -07003762 int ret;
Greg Rose69bfbec2011-01-26 01:06:12 +00003763
Emil Tantilov14b22cd2016-08-29 16:39:28 -07003764 spin_lock_bh(&adapter->mbx_lock);
Tony Nguyen6a11e522016-07-13 10:33:16 -07003765 /* notify the PF of our intent to use this size of frame */
3766 ret = hw->mac.ops.set_rlpml(hw, max_frame);
Emil Tantilov14b22cd2016-08-29 16:39:28 -07003767 spin_unlock_bh(&adapter->mbx_lock);
Tony Nguyen6a11e522016-07-13 10:33:16 -07003768 if (ret)
3769 return -EINVAL;
3770
Emil Tantilovbad17232014-11-21 02:57:15 +00003771 hw_dbg(hw, "changing MTU from %d to %d\n",
Greg Rose92915f72010-01-09 02:24:10 +00003772 netdev->mtu, new_mtu);
Tony Nguyen6a11e522016-07-13 10:33:16 -07003773
Greg Rose92915f72010-01-09 02:24:10 +00003774 /* must set new MTU before calling down or up */
3775 netdev->mtu = new_mtu;
3776
Greg Rose92915f72010-01-09 02:24:10 +00003777 return 0;
3778}
3779
Emil Tantilov688ff322014-11-08 01:39:56 +00003780#ifdef CONFIG_NET_POLL_CONTROLLER
3781/* Polling 'interrupt' - used by things like netconsole to send skbs
3782 * without having to re-enable interrupts. It's not called while
3783 * the interrupt routine is executing.
3784 */
3785static void ixgbevf_netpoll(struct net_device *netdev)
3786{
3787 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3788 int i;
3789
3790 /* if interface is down do nothing */
3791 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3792 return;
3793 for (i = 0; i < adapter->num_rx_queues; i++)
3794 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3795}
3796#endif /* CONFIG_NET_POLL_CONTROLLER */
3797
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003798static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
Greg Rose92915f72010-01-09 02:24:10 +00003799{
3800 struct net_device *netdev = pci_get_drvdata(pdev);
3801 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003802#ifdef CONFIG_PM
3803 int retval = 0;
3804#endif
Greg Rose92915f72010-01-09 02:24:10 +00003805
Emil Tantilov2dad7b22016-11-11 10:12:51 -08003806 rtnl_lock();
Greg Rose92915f72010-01-09 02:24:10 +00003807 netif_device_detach(netdev);
3808
3809 if (netif_running(netdev)) {
3810 ixgbevf_down(adapter);
3811 ixgbevf_free_irq(adapter);
3812 ixgbevf_free_all_tx_resources(adapter);
3813 ixgbevf_free_all_rx_resources(adapter);
Mark Rustadeeffcee2016-10-28 10:46:39 -07003814 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003815 }
Emil Tantilov2dad7b22016-11-11 10:12:51 -08003816 rtnl_unlock();
Greg Rose92915f72010-01-09 02:24:10 +00003817
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003818#ifdef CONFIG_PM
3819 retval = pci_save_state(pdev);
3820 if (retval)
3821 return retval;
3822
3823#endif
Mark Rustadbc0c7152014-03-12 00:38:45 +00003824 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3825 pci_disable_device(pdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003826
3827 return 0;
3828}
3829
3830#ifdef CONFIG_PM
3831static int ixgbevf_resume(struct pci_dev *pdev)
3832{
Wei Yongjun27ae2962014-01-16 02:30:07 -08003833 struct net_device *netdev = pci_get_drvdata(pdev);
3834 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003835 u32 err;
3836
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003837 pci_restore_state(pdev);
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00003838 /* pci_restore_state clears dev->state_saved so call
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003839 * pci_save_state to restore it.
3840 */
Greg Rose92915f72010-01-09 02:24:10 +00003841 pci_save_state(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00003842
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003843 err = pci_enable_device_mem(pdev);
3844 if (err) {
3845 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3846 return err;
3847 }
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003848 smp_mb__before_atomic();
Mark Rustadbc0c7152014-03-12 00:38:45 +00003849 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003850 pci_set_master(pdev);
3851
Don Skidmore798e3812013-10-01 04:33:51 -07003852 ixgbevf_reset(adapter);
3853
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003854 rtnl_lock();
3855 err = ixgbevf_init_interrupt_scheme(adapter);
3856 rtnl_unlock();
3857 if (err) {
3858 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3859 return err;
3860 }
3861
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003862 if (netif_running(netdev)) {
3863 err = ixgbevf_open(netdev);
3864 if (err)
3865 return err;
3866 }
3867
3868 netif_device_attach(netdev);
3869
3870 return err;
3871}
3872
3873#endif /* CONFIG_PM */
3874static void ixgbevf_shutdown(struct pci_dev *pdev)
3875{
3876 ixgbevf_suspend(pdev, PMSG_SUSPEND);
Greg Rose92915f72010-01-09 02:24:10 +00003877}
3878
Eric Dumazet4197aa72011-06-22 05:01:35 +00003879static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3880 struct rtnl_link_stats64 *stats)
3881{
3882 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3883 unsigned int start;
3884 u64 bytes, packets;
3885 const struct ixgbevf_ring *ring;
3886 int i;
3887
3888 ixgbevf_update_stats(adapter);
3889
3890 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3891
3892 for (i = 0; i < adapter->num_rx_queues; i++) {
Don Skidmore87e70ab2014-01-16 02:30:08 -08003893 ring = adapter->rx_ring[i];
Eric Dumazet4197aa72011-06-22 05:01:35 +00003894 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07003895 start = u64_stats_fetch_begin_irq(&ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -08003896 bytes = ring->stats.bytes;
3897 packets = ring->stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07003898 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
Eric Dumazet4197aa72011-06-22 05:01:35 +00003899 stats->rx_bytes += bytes;
3900 stats->rx_packets += packets;
3901 }
3902
3903 for (i = 0; i < adapter->num_tx_queues; i++) {
Don Skidmore87e70ab2014-01-16 02:30:08 -08003904 ring = adapter->tx_ring[i];
Eric Dumazet4197aa72011-06-22 05:01:35 +00003905 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07003906 start = u64_stats_fetch_begin_irq(&ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -08003907 bytes = ring->stats.bytes;
3908 packets = ring->stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07003909 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
Eric Dumazet4197aa72011-06-22 05:01:35 +00003910 stats->tx_bytes += bytes;
3911 stats->tx_packets += packets;
3912 }
3913
3914 return stats;
3915}
3916
Alexander Duyckb83e3012016-04-14 17:19:31 -04003917#define IXGBEVF_MAX_MAC_HDR_LEN 127
3918#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
3919
3920static netdev_features_t
3921ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
3922 netdev_features_t features)
3923{
3924 unsigned int network_hdr_len, mac_hdr_len;
3925
3926 /* Make certain the headers can be described by a context descriptor */
3927 mac_hdr_len = skb_network_header(skb) - skb->data;
3928 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
3929 return features & ~(NETIF_F_HW_CSUM |
3930 NETIF_F_SCTP_CRC |
3931 NETIF_F_HW_VLAN_CTAG_TX |
3932 NETIF_F_TSO |
3933 NETIF_F_TSO6);
3934
3935 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
3936 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
3937 return features & ~(NETIF_F_HW_CSUM |
3938 NETIF_F_SCTP_CRC |
3939 NETIF_F_TSO |
3940 NETIF_F_TSO6);
3941
3942 /* We can only support IPV4 TSO in tunnels if we can mangle the
3943 * inner IP ID field, so strip TSO if MANGLEID is not supported.
3944 */
3945 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
3946 features &= ~NETIF_F_TSO;
3947
3948 return features;
3949}
3950
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003951static const struct net_device_ops ixgbevf_netdev_ops = {
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003952 .ndo_open = ixgbevf_open,
3953 .ndo_stop = ixgbevf_close,
3954 .ndo_start_xmit = ixgbevf_xmit_frame,
3955 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
Eric Dumazet4197aa72011-06-22 05:01:35 +00003956 .ndo_get_stats64 = ixgbevf_get_stats,
Greg Rose92915f72010-01-09 02:24:10 +00003957 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003958 .ndo_set_mac_address = ixgbevf_set_mac,
3959 .ndo_change_mtu = ixgbevf_change_mtu,
3960 .ndo_tx_timeout = ixgbevf_tx_timeout,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003961 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3962 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
Jacob Kellerc777cdf2013-09-21 06:24:20 +00003963#ifdef CONFIG_NET_RX_BUSY_POLL
3964 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3965#endif
Emil Tantilov688ff322014-11-08 01:39:56 +00003966#ifdef CONFIG_NET_POLL_CONTROLLER
3967 .ndo_poll_controller = ixgbevf_netpoll,
3968#endif
Alexander Duyckb83e3012016-04-14 17:19:31 -04003969 .ndo_features_check = ixgbevf_features_check,
Greg Rose92915f72010-01-09 02:24:10 +00003970};
Greg Rose92915f72010-01-09 02:24:10 +00003971
3972static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3973{
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003974 dev->netdev_ops = &ixgbevf_netdev_ops;
Greg Rose92915f72010-01-09 02:24:10 +00003975 ixgbevf_set_ethtool_ops(dev);
3976 dev->watchdog_timeo = 5 * HZ;
3977}
3978
3979/**
3980 * ixgbevf_probe - Device Initialization Routine
3981 * @pdev: PCI device information struct
3982 * @ent: entry in ixgbevf_pci_tbl
3983 *
3984 * Returns 0 on success, negative on failure
3985 *
3986 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3987 * The OS initialization, configuring of the adapter private structure,
3988 * and a hardware reset occur.
3989 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003990static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Greg Rose92915f72010-01-09 02:24:10 +00003991{
3992 struct net_device *netdev;
3993 struct ixgbevf_adapter *adapter = NULL;
3994 struct ixgbe_hw *hw = NULL;
3995 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
Greg Rose92915f72010-01-09 02:24:10 +00003996 int err, pci_using_dac;
Emil Tantilov03334642014-12-05 04:32:44 +00003997 bool disable_dev = false;
Greg Rose92915f72010-01-09 02:24:10 +00003998
3999 err = pci_enable_device(pdev);
4000 if (err)
4001 return err;
4002
Russell King53567aa2013-06-10 12:49:38 +01004003 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
Greg Rose92915f72010-01-09 02:24:10 +00004004 pci_using_dac = 1;
4005 } else {
Russell King53567aa2013-06-10 12:49:38 +01004006 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Greg Rose92915f72010-01-09 02:24:10 +00004007 if (err) {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004008 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
Russell King53567aa2013-06-10 12:49:38 +01004009 goto err_dma;
Greg Rose92915f72010-01-09 02:24:10 +00004010 }
4011 pci_using_dac = 0;
4012 }
4013
4014 err = pci_request_regions(pdev, ixgbevf_driver_name);
4015 if (err) {
4016 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4017 goto err_pci_reg;
4018 }
4019
4020 pci_set_master(pdev);
4021
Greg Rose92915f72010-01-09 02:24:10 +00004022 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4023 MAX_TX_QUEUES);
Greg Rose92915f72010-01-09 02:24:10 +00004024 if (!netdev) {
4025 err = -ENOMEM;
4026 goto err_alloc_etherdev;
4027 }
4028
4029 SET_NETDEV_DEV(netdev, &pdev->dev);
4030
Greg Rose92915f72010-01-09 02:24:10 +00004031 adapter = netdev_priv(netdev);
4032
4033 adapter->netdev = netdev;
4034 adapter->pdev = pdev;
4035 hw = &adapter->hw;
4036 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00004037 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Greg Rose92915f72010-01-09 02:24:10 +00004038
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004039 /* call save state here in standalone driver because it relies on
Greg Rose92915f72010-01-09 02:24:10 +00004040 * adapter struct to exist, and needs to call netdev_priv
4041 */
4042 pci_save_state(pdev);
4043
4044 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4045 pci_resource_len(pdev, 0));
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00004046 adapter->io_addr = hw->hw_addr;
Greg Rose92915f72010-01-09 02:24:10 +00004047 if (!hw->hw_addr) {
4048 err = -EIO;
4049 goto err_ioremap;
4050 }
4051
4052 ixgbevf_assign_netdev_ops(netdev);
4053
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004054 /* Setup HW API */
Greg Rose92915f72010-01-09 02:24:10 +00004055 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4056 hw->mac.type = ii->mac;
4057
4058 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
Greg Rosef416dfc2011-06-08 07:32:38 +00004059 sizeof(struct ixgbe_mbx_operations));
Greg Rose92915f72010-01-09 02:24:10 +00004060
Greg Rose92915f72010-01-09 02:24:10 +00004061 /* setup the private structure */
4062 err = ixgbevf_sw_init(adapter);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00004063 if (err)
4064 goto err_sw_init;
4065
4066 /* The HW MAC address was set and/or determined in sw_init */
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00004067 if (!is_valid_ether_addr(netdev->dev_addr)) {
4068 pr_err("invalid MAC address\n");
4069 err = -EIO;
4070 goto err_sw_init;
4071 }
Greg Rose92915f72010-01-09 02:24:10 +00004072
Michał Mirosław471a76d2011-06-08 08:53:03 +00004073 netdev->hw_features = NETIF_F_SG |
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004074 NETIF_F_TSO |
4075 NETIF_F_TSO6 |
Alexander Duyckcb2b3ed2016-01-13 07:31:17 -08004076 NETIF_F_RXCSUM |
4077 NETIF_F_HW_CSUM |
4078 NETIF_F_SCTP_CRC;
Michał Mirosław471a76d2011-06-08 08:53:03 +00004079
Alexander Duyckb83e3012016-04-14 17:19:31 -04004080#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4081 NETIF_F_GSO_GRE_CSUM | \
Tom Herbert7e133182016-05-18 09:06:10 -07004082 NETIF_F_GSO_IPXIP4 | \
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07004083 NETIF_F_GSO_IPXIP6 | \
Alexander Duyckb83e3012016-04-14 17:19:31 -04004084 NETIF_F_GSO_UDP_TUNNEL | \
4085 NETIF_F_GSO_UDP_TUNNEL_CSUM)
Greg Rose92915f72010-01-09 02:24:10 +00004086
Alexander Duyckb83e3012016-04-14 17:19:31 -04004087 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4088 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4089 IXGBEVF_GSO_PARTIAL_FEATURES;
Alexander Duyckcb2b3ed2016-01-13 07:31:17 -08004090
Alexander Duyckb83e3012016-04-14 17:19:31 -04004091 netdev->features = netdev->hw_features;
Greg Rose92915f72010-01-09 02:24:10 +00004092
4093 if (pci_using_dac)
4094 netdev->features |= NETIF_F_HIGHDMA;
4095
Alexander Duyckb83e3012016-04-14 17:19:31 -04004096 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4097 netdev->mpls_features |= NETIF_F_HW_CSUM;
4098 netdev->hw_enc_features |= netdev->vlan_features;
4099
4100 /* set this bit last since it cannot be part of vlan_features */
4101 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4102 NETIF_F_HW_VLAN_CTAG_RX |
4103 NETIF_F_HW_VLAN_CTAG_TX;
4104
Jiri Pirko01789342011-08-16 06:29:00 +00004105 netdev->priv_flags |= IFF_UNICAST_FLT;
4106
Jarod Wilson91c527a2016-10-17 15:54:05 -04004107 /* MTU range: 68 - 1504 or 9710 */
4108 netdev->min_mtu = ETH_MIN_MTU;
4109 switch (adapter->hw.api_version) {
4110 case ixgbe_mbox_api_11:
4111 case ixgbe_mbox_api_12:
4112 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4113 (ETH_HLEN + ETH_FCS_LEN);
4114 break;
4115 default:
4116 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4117 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4118 (ETH_HLEN + ETH_FCS_LEN);
4119 else
4120 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4121 break;
4122 }
4123
Mark Rustadea699562014-03-12 00:38:51 +00004124 if (IXGBE_REMOVED(hw->hw_addr)) {
4125 err = -EIO;
4126 goto err_sw_init;
4127 }
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00004128
4129 setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
4130 (unsigned long)adapter);
4131
4132 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4133 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4134 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00004135
4136 err = ixgbevf_init_interrupt_scheme(adapter);
4137 if (err)
4138 goto err_sw_init;
4139
Greg Rose92915f72010-01-09 02:24:10 +00004140 strcpy(netdev->name, "eth%d");
4141
4142 err = register_netdev(netdev);
4143 if (err)
4144 goto err_register;
4145
Emil Tantilov03334642014-12-05 04:32:44 +00004146 pci_set_drvdata(pdev, netdev);
Greg Rose5d426ad2010-11-16 19:27:19 -08004147 netif_carrier_off(netdev);
4148
Greg Rose33bd9f62010-03-19 02:59:52 +00004149 ixgbevf_init_last_counter_stats(adapter);
4150
Emil Tantilov47068b02014-11-22 07:59:56 +00004151 /* print the VF info */
4152 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4153 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
Greg Rose92915f72010-01-09 02:24:10 +00004154
Emil Tantilov47068b02014-11-22 07:59:56 +00004155 switch (hw->mac.type) {
4156 case ixgbe_mac_X550_vf:
4157 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4158 break;
4159 case ixgbe_mac_X540_vf:
4160 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4161 break;
4162 case ixgbe_mac_82599_vf:
4163 default:
4164 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4165 break;
4166 }
Greg Rose92915f72010-01-09 02:24:10 +00004167
Greg Rose92915f72010-01-09 02:24:10 +00004168 return 0;
4169
4170err_register:
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004171 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00004172err_sw_init:
4173 ixgbevf_reset_interrupt_capability(adapter);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00004174 iounmap(adapter->io_addr);
Greg Rose92915f72010-01-09 02:24:10 +00004175err_ioremap:
Emil Tantilov03334642014-12-05 04:32:44 +00004176 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00004177 free_netdev(netdev);
4178err_alloc_etherdev:
4179 pci_release_regions(pdev);
4180err_pci_reg:
4181err_dma:
Emil Tantilov03334642014-12-05 04:32:44 +00004182 if (!adapter || disable_dev)
Mark Rustadbc0c7152014-03-12 00:38:45 +00004183 pci_disable_device(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00004184 return err;
4185}
4186
4187/**
4188 * ixgbevf_remove - Device Removal Routine
4189 * @pdev: PCI device information struct
4190 *
4191 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4192 * that it should release a PCI device. The could be caused by a
4193 * Hot-Plug event, or because the driver is going to be removed from
4194 * memory.
4195 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05004196static void ixgbevf_remove(struct pci_dev *pdev)
Greg Rose92915f72010-01-09 02:24:10 +00004197{
4198 struct net_device *netdev = pci_get_drvdata(pdev);
Emil Tantilov03334642014-12-05 04:32:44 +00004199 struct ixgbevf_adapter *adapter;
4200 bool disable_dev;
4201
4202 if (!netdev)
4203 return;
4204
4205 adapter = netdev_priv(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00004206
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00004207 set_bit(__IXGBEVF_REMOVING, &adapter->state);
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00004208 cancel_work_sync(&adapter->service_task);
Greg Rose92915f72010-01-09 02:24:10 +00004209
Alexander Duyckfd13a9a2012-05-11 08:32:24 +00004210 if (netdev->reg_state == NETREG_REGISTERED)
Greg Rose92915f72010-01-09 02:24:10 +00004211 unregister_netdev(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00004212
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004213 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00004214 ixgbevf_reset_interrupt_capability(adapter);
4215
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00004216 iounmap(adapter->io_addr);
Greg Rose92915f72010-01-09 02:24:10 +00004217 pci_release_regions(pdev);
4218
4219 hw_dbg(&adapter->hw, "Remove complete\n");
4220
Emil Tantilov03334642014-12-05 04:32:44 +00004221 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00004222 free_netdev(netdev);
4223
Emil Tantilov03334642014-12-05 04:32:44 +00004224 if (disable_dev)
Mark Rustadbc0c7152014-03-12 00:38:45 +00004225 pci_disable_device(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00004226}
4227
Alexander Duyck9f19f312012-05-11 08:33:32 +00004228/**
4229 * ixgbevf_io_error_detected - called when PCI error is detected
4230 * @pdev: Pointer to PCI device
4231 * @state: The current pci connection state
4232 *
4233 * This function is called after a PCI bus error affecting
4234 * this device has been detected.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004235 **/
Alexander Duyck9f19f312012-05-11 08:33:32 +00004236static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4237 pci_channel_state_t state)
4238{
4239 struct net_device *netdev = pci_get_drvdata(pdev);
4240 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4241
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +00004242 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
Mark Rustadea699562014-03-12 00:38:51 +00004243 return PCI_ERS_RESULT_DISCONNECT;
4244
Mark Rustadbc0c7152014-03-12 00:38:45 +00004245 rtnl_lock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004246 netif_device_detach(netdev);
4247
Mark Rustadbc0c7152014-03-12 00:38:45 +00004248 if (state == pci_channel_io_perm_failure) {
4249 rtnl_unlock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004250 return PCI_ERS_RESULT_DISCONNECT;
Mark Rustadbc0c7152014-03-12 00:38:45 +00004251 }
Alexander Duyck9f19f312012-05-11 08:33:32 +00004252
4253 if (netif_running(netdev))
4254 ixgbevf_down(adapter);
4255
Mark Rustadbc0c7152014-03-12 00:38:45 +00004256 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4257 pci_disable_device(pdev);
4258 rtnl_unlock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004259
4260 /* Request a slot slot reset. */
4261 return PCI_ERS_RESULT_NEED_RESET;
4262}
4263
4264/**
4265 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4266 * @pdev: Pointer to PCI device
4267 *
4268 * Restart the card from scratch, as if from a cold-boot. Implementation
4269 * resembles the first-half of the ixgbevf_resume routine.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004270 **/
Alexander Duyck9f19f312012-05-11 08:33:32 +00004271static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4272{
4273 struct net_device *netdev = pci_get_drvdata(pdev);
4274 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4275
4276 if (pci_enable_device_mem(pdev)) {
4277 dev_err(&pdev->dev,
4278 "Cannot re-enable PCI device after reset.\n");
4279 return PCI_ERS_RESULT_DISCONNECT;
4280 }
4281
Peter Zijlstra4e857c52014-03-17 18:06:10 +01004282 smp_mb__before_atomic();
Mark Rustadbc0c7152014-03-12 00:38:45 +00004283 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
Alexander Duyck9f19f312012-05-11 08:33:32 +00004284 pci_set_master(pdev);
4285
4286 ixgbevf_reset(adapter);
4287
4288 return PCI_ERS_RESULT_RECOVERED;
4289}
4290
4291/**
4292 * ixgbevf_io_resume - called when traffic can start flowing again.
4293 * @pdev: Pointer to PCI device
4294 *
4295 * This callback is called when the error recovery driver tells us that
4296 * its OK to resume normal operation. Implementation resembles the
4297 * second-half of the ixgbevf_resume routine.
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004298 **/
Alexander Duyck9f19f312012-05-11 08:33:32 +00004299static void ixgbevf_io_resume(struct pci_dev *pdev)
4300{
4301 struct net_device *netdev = pci_get_drvdata(pdev);
4302 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4303
4304 if (netif_running(netdev))
4305 ixgbevf_up(adapter);
4306
4307 netif_device_attach(netdev);
4308}
4309
4310/* PCI Error Recovery (ERS) */
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004311static const struct pci_error_handlers ixgbevf_err_handler = {
Alexander Duyck9f19f312012-05-11 08:33:32 +00004312 .error_detected = ixgbevf_io_error_detected,
4313 .slot_reset = ixgbevf_io_slot_reset,
4314 .resume = ixgbevf_io_resume,
4315};
4316
Greg Rose92915f72010-01-09 02:24:10 +00004317static struct pci_driver ixgbevf_driver = {
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004318 .name = ixgbevf_driver_name,
4319 .id_table = ixgbevf_pci_tbl,
4320 .probe = ixgbevf_probe,
4321 .remove = ixgbevf_remove,
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004322#ifdef CONFIG_PM
4323 /* Power Management Hooks */
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004324 .suspend = ixgbevf_suspend,
4325 .resume = ixgbevf_resume,
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004326#endif
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004327 .shutdown = ixgbevf_shutdown,
4328 .err_handler = &ixgbevf_err_handler
Greg Rose92915f72010-01-09 02:24:10 +00004329};
4330
4331/**
Greg Rose65d676c2011-02-03 06:54:13 +00004332 * ixgbevf_init_module - Driver Registration Routine
Greg Rose92915f72010-01-09 02:24:10 +00004333 *
Greg Rose65d676c2011-02-03 06:54:13 +00004334 * ixgbevf_init_module is the first routine called when the driver is
Greg Rose92915f72010-01-09 02:24:10 +00004335 * loaded. All it does is register with the PCI subsystem.
4336 **/
4337static int __init ixgbevf_init_module(void)
4338{
Jeff Kirsherdbd96362011-10-21 19:38:18 +00004339 pr_info("%s - version %s\n", ixgbevf_driver_string,
4340 ixgbevf_driver_version);
Greg Rose92915f72010-01-09 02:24:10 +00004341
Jeff Kirsherdbd96362011-10-21 19:38:18 +00004342 pr_info("%s\n", ixgbevf_copyright);
Mark Rustad40a13e22015-10-21 17:21:15 -07004343 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4344 if (!ixgbevf_wq) {
4345 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4346 return -ENOMEM;
4347 }
Greg Rose92915f72010-01-09 02:24:10 +00004348
Mark Rustad50985b52015-10-21 17:21:20 -07004349 return pci_register_driver(&ixgbevf_driver);
Greg Rose92915f72010-01-09 02:24:10 +00004350}
4351
4352module_init(ixgbevf_init_module);
4353
4354/**
Greg Rose65d676c2011-02-03 06:54:13 +00004355 * ixgbevf_exit_module - Driver Exit Cleanup Routine
Greg Rose92915f72010-01-09 02:24:10 +00004356 *
Greg Rose65d676c2011-02-03 06:54:13 +00004357 * ixgbevf_exit_module is called just before the driver is removed
Greg Rose92915f72010-01-09 02:24:10 +00004358 * from memory.
4359 **/
4360static void __exit ixgbevf_exit_module(void)
4361{
4362 pci_unregister_driver(&ixgbevf_driver);
Mark Rustad40a13e22015-10-21 17:21:15 -07004363 if (ixgbevf_wq) {
4364 destroy_workqueue(ixgbevf_wq);
4365 ixgbevf_wq = NULL;
4366 }
Greg Rose92915f72010-01-09 02:24:10 +00004367}
4368
4369#ifdef DEBUG
4370/**
Greg Rose65d676c2011-02-03 06:54:13 +00004371 * ixgbevf_get_hw_dev_name - return device name string
Greg Rose92915f72010-01-09 02:24:10 +00004372 * used by hardware layer to print debugging information
4373 **/
4374char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4375{
4376 struct ixgbevf_adapter *adapter = hw->back;
Jeff Kirsherdec0d8e2015-02-10 11:42:33 +00004377
Greg Rose92915f72010-01-09 02:24:10 +00004378 return adapter->netdev->name;
4379}
4380
4381#endif
4382module_exit(ixgbevf_exit_module);
4383
4384/* ixgbevf_main.c */