blob: c690aba8e8d1ab47d8f67b5205c94a16b13da08a [file] [log] [blame]
Greg Rose5eae00c2013-12-21 06:12:45 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
Catherine Sullivaneaab59e2016-01-13 16:51:44 -08004 * Copyright(c) 2013 - 2016 Intel Corporation.
Greg Rose5eae00c2013-12-21 06:12:45 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Jesse Brandeburgb8316072014-04-05 07:46:11 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
Greg Rose5eae00c2013-12-21 06:12:45 +000018 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27#include "i40evf.h"
28#include "i40e_prototype.h"
Mitch Williamsed0e8942017-01-24 10:23:59 -080029#include "i40evf_client.h"
Greg Rose5eae00c2013-12-21 06:12:45 +000030static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
31static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
32static int i40evf_close(struct net_device *netdev);
33
34char i40evf_driver_name[] = "i40evf";
35static const char i40evf_driver_string[] =
Catherine Sullivaneaab59e2016-01-13 16:51:44 -080036 "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
Greg Rose5eae00c2013-12-21 06:12:45 +000037
Mitch Williams69ebe9552015-11-19 11:34:24 -080038#define DRV_KERN "-k"
39
Bimmy Pujari15990832017-01-30 12:29:37 -080040#define DRV_VERSION_MAJOR 2
41#define DRV_VERSION_MINOR 1
42#define DRV_VERSION_BUILD 7
Mitch Williams69ebe9552015-11-19 11:34:24 -080043#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) \
46 DRV_KERN
Greg Rose5eae00c2013-12-21 06:12:45 +000047const char i40evf_driver_version[] = DRV_VERSION;
48static const char i40evf_copyright[] =
Catherine Sullivanbf418462015-07-10 19:36:10 -040049 "Copyright (c) 2013 - 2015 Intel Corporation.";
Greg Rose5eae00c2013-12-21 06:12:45 +000050
51/* i40evf_pci_tbl - PCI Device ID Table
52 *
53 * Wildcard entries (PCI_ANY_ID) should come last
54 * Last entry must be all 0s
55 *
56 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
57 * Class, Class Mask, private data (not used) }
58 */
Benoit Taine9baa3c32014-08-08 15:56:03 +020059static const struct pci_device_id i40evf_pci_tbl[] = {
Shannon Nelsonab600852014-01-17 15:36:39 -080060 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
Joshua Hay92871412016-06-20 09:10:37 -070061 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
Anjali Singhai Jain87e6c1d2015-06-05 12:20:25 -040062 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
Greg Rose5eae00c2013-12-21 06:12:45 +000063 /* required last entry */
64 {0, }
65};
66
67MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
68
69MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
70MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
71MODULE_LICENSE("GPL");
72MODULE_VERSION(DRV_VERSION);
73
Jesse Brandeburg2803b162015-12-22 14:25:08 -080074static struct workqueue_struct *i40evf_wq;
75
Greg Rose5eae00c2013-12-21 06:12:45 +000076/**
77 * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
78 * @hw: pointer to the HW structure
79 * @mem: ptr to mem struct to fill out
80 * @size: size of memory requested
81 * @alignment: what to align the allocation to
82 **/
83i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
84 struct i40e_dma_mem *mem,
85 u64 size, u32 alignment)
86{
87 struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
88
89 if (!mem)
90 return I40E_ERR_PARAM;
91
92 mem->size = ALIGN(size, alignment);
93 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
94 (dma_addr_t *)&mem->pa, GFP_KERNEL);
95 if (mem->va)
96 return 0;
97 else
98 return I40E_ERR_NO_MEMORY;
99}
100
101/**
102 * i40evf_free_dma_mem_d - OS specific memory free for shared code
103 * @hw: pointer to the HW structure
104 * @mem: ptr to mem struct to free
105 **/
106i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
107{
108 struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
109
110 if (!mem || !mem->va)
111 return I40E_ERR_PARAM;
112 dma_free_coherent(&adapter->pdev->dev, mem->size,
113 mem->va, (dma_addr_t)mem->pa);
114 return 0;
115}
116
117/**
118 * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
119 * @hw: pointer to the HW structure
120 * @mem: ptr to mem struct to fill out
121 * @size: size of memory requested
122 **/
123i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
124 struct i40e_virt_mem *mem, u32 size)
125{
126 if (!mem)
127 return I40E_ERR_PARAM;
128
129 mem->size = size;
130 mem->va = kzalloc(size, GFP_KERNEL);
131
132 if (mem->va)
133 return 0;
134 else
135 return I40E_ERR_NO_MEMORY;
136}
137
138/**
139 * i40evf_free_virt_mem_d - OS specific memory free for shared code
140 * @hw: pointer to the HW structure
141 * @mem: ptr to mem struct to free
142 **/
143i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
144 struct i40e_virt_mem *mem)
145{
146 if (!mem)
147 return I40E_ERR_PARAM;
148
149 /* it's ok to kfree a NULL pointer */
150 kfree(mem->va);
151
152 return 0;
153}
154
155/**
156 * i40evf_debug_d - OS dependent version of debug printing
157 * @hw: pointer to the HW structure
158 * @mask: debug level mask
159 * @fmt_str: printf-type format description
160 **/
161void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
162{
163 char buf[512];
164 va_list argptr;
165
166 if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
167 return;
168
169 va_start(argptr, fmt_str);
170 vsnprintf(buf, sizeof(buf), fmt_str, argptr);
171 va_end(argptr);
172
173 /* the debug string is already formatted with a newline */
174 pr_info("%s", buf);
175}
176
177/**
Mitch Williams00e5ec42016-01-15 14:33:10 -0800178 * i40evf_schedule_reset - Set the flags and schedule a reset event
179 * @adapter: board private structure
180 **/
181void i40evf_schedule_reset(struct i40evf_adapter *adapter)
182{
183 if (!(adapter->flags &
184 (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
185 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
186 schedule_work(&adapter->reset_task);
187 }
188}
189
190/**
Greg Rose5eae00c2013-12-21 06:12:45 +0000191 * i40evf_tx_timeout - Respond to a Tx Hang
192 * @netdev: network interface device structure
193 **/
194static void i40evf_tx_timeout(struct net_device *netdev)
195{
196 struct i40evf_adapter *adapter = netdev_priv(netdev);
197
198 adapter->tx_timeout_count++;
Mitch Williams00e5ec42016-01-15 14:33:10 -0800199 i40evf_schedule_reset(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +0000200}
201
202/**
203 * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
204 * @adapter: board private structure
205 **/
206static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
207{
208 struct i40e_hw *hw = &adapter->hw;
Mitch Williams75a64432014-11-11 20:02:42 +0000209
Jacob Kelleref4603e2016-11-08 13:05:08 -0800210 if (!adapter->msix_entries)
211 return;
212
Greg Rose5eae00c2013-12-21 06:12:45 +0000213 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
214
215 /* read flush */
216 rd32(hw, I40E_VFGEN_RSTAT);
217
218 synchronize_irq(adapter->msix_entries[0].vector);
219}
220
221/**
222 * i40evf_misc_irq_enable - Enable default interrupt generation settings
223 * @adapter: board private structure
224 **/
225static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
226{
227 struct i40e_hw *hw = &adapter->hw;
Mitch Williams75a64432014-11-11 20:02:42 +0000228
Greg Rose5eae00c2013-12-21 06:12:45 +0000229 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
230 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -0400231 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
Greg Rose5eae00c2013-12-21 06:12:45 +0000232
233 /* read flush */
234 rd32(hw, I40E_VFGEN_RSTAT);
235}
236
237/**
238 * i40evf_irq_disable - Mask off interrupt generation on the NIC
239 * @adapter: board private structure
240 **/
241static void i40evf_irq_disable(struct i40evf_adapter *adapter)
242{
243 int i;
244 struct i40e_hw *hw = &adapter->hw;
245
Mitch Williamsdbb01c82014-02-20 19:29:07 -0800246 if (!adapter->msix_entries)
247 return;
248
Greg Rose5eae00c2013-12-21 06:12:45 +0000249 for (i = 1; i < adapter->num_msix_vectors; i++) {
250 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
251 synchronize_irq(adapter->msix_entries[i].vector);
252 }
253 /* read flush */
254 rd32(hw, I40E_VFGEN_RSTAT);
Greg Rose5eae00c2013-12-21 06:12:45 +0000255}
256
257/**
258 * i40evf_irq_enable_queues - Enable interrupt for specified queues
259 * @adapter: board private structure
260 * @mask: bitmap of queues to enable
261 **/
262void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
263{
264 struct i40e_hw *hw = &adapter->hw;
265 int i;
266
267 for (i = 1; i < adapter->num_msix_vectors; i++) {
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400268 if (mask & BIT(i - 1)) {
Greg Rose5eae00c2013-12-21 06:12:45 +0000269 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
270 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
Jesse Brandeburg97bf75f2015-02-27 09:18:32 +0000271 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -0400272 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
Greg Rose5eae00c2013-12-21 06:12:45 +0000273 }
274 }
275}
276
277/**
278 * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
279 * @adapter: board private structure
280 * @mask: bitmap of vectors to trigger
281 **/
Mitch Williams75a64432014-11-11 20:02:42 +0000282static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
Greg Rose5eae00c2013-12-21 06:12:45 +0000283{
284 struct i40e_hw *hw = &adapter->hw;
285 int i;
Mitch Williamsd82acb32015-11-06 15:26:06 -0800286 u32 dyn_ctl;
Greg Rose5eae00c2013-12-21 06:12:45 +0000287
Mitch Williams164ec1b2014-06-04 08:45:19 +0000288 if (mask & 1) {
289 dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -0400290 dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
Jesse Brandeburg97bf75f2015-02-27 09:18:32 +0000291 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -0400292 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
Mitch Williams164ec1b2014-06-04 08:45:19 +0000293 wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
294 }
Greg Rose5eae00c2013-12-21 06:12:45 +0000295 for (i = 1; i < adapter->num_msix_vectors; i++) {
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400296 if (mask & BIT(i)) {
Greg Rose5eae00c2013-12-21 06:12:45 +0000297 dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -0400298 dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
Jesse Brandeburg97bf75f2015-02-27 09:18:32 +0000299 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -0400300 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
Greg Rose5eae00c2013-12-21 06:12:45 +0000301 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
302 }
303 }
304}
305
306/**
307 * i40evf_irq_enable - Enable default interrupt generation settings
308 * @adapter: board private structure
Jean Sacren69c1d702015-10-13 01:06:27 -0600309 * @flush: boolean value whether to run rd32()
Greg Rose5eae00c2013-12-21 06:12:45 +0000310 **/
311void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
312{
313 struct i40e_hw *hw = &adapter->hw;
314
Mitch Williams164ec1b2014-06-04 08:45:19 +0000315 i40evf_misc_irq_enable(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +0000316 i40evf_irq_enable_queues(adapter, ~0);
317
318 if (flush)
319 rd32(hw, I40E_VFGEN_RSTAT);
320}
321
322/**
323 * i40evf_msix_aq - Interrupt handler for vector 0
324 * @irq: interrupt number
325 * @data: pointer to netdev
326 **/
327static irqreturn_t i40evf_msix_aq(int irq, void *data)
328{
329 struct net_device *netdev = data;
330 struct i40evf_adapter *adapter = netdev_priv(netdev);
331 struct i40e_hw *hw = &adapter->hw;
332 u32 val;
Greg Rose5eae00c2013-12-21 06:12:45 +0000333
Jesse Brandeburgcfbe4db2015-10-04 01:09:49 -0700334 /* handle non-queue interrupts, these reads clear the registers */
335 val = rd32(hw, I40E_VFINT_ICR01);
336 val = rd32(hw, I40E_VFINT_ICR0_ENA1);
Greg Rose5eae00c2013-12-21 06:12:45 +0000337
Jean Sacrened17f7e2015-10-13 01:06:30 -0600338 val = rd32(hw, I40E_VFINT_DYN_CTL01) |
339 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
Greg Rose5eae00c2013-12-21 06:12:45 +0000340 wr32(hw, I40E_VFINT_DYN_CTL01, val);
341
Greg Rose5eae00c2013-12-21 06:12:45 +0000342 /* schedule work on the private workqueue */
343 schedule_work(&adapter->adminq_task);
344
345 return IRQ_HANDLED;
346}
347
348/**
349 * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
350 * @irq: interrupt number
351 * @data: pointer to a q_vector
352 **/
353static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
354{
355 struct i40e_q_vector *q_vector = data;
356
357 if (!q_vector->tx.ring && !q_vector->rx.ring)
358 return IRQ_HANDLED;
359
Alexander Duyck5d3465a2015-09-29 15:19:50 -0700360 napi_schedule_irqoff(&q_vector->napi);
Greg Rose5eae00c2013-12-21 06:12:45 +0000361
362 return IRQ_HANDLED;
363}
364
365/**
366 * i40evf_map_vector_to_rxq - associate irqs with rx queues
367 * @adapter: board private structure
368 * @v_idx: interrupt number
369 * @r_idx: queue number
370 **/
371static void
372i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
373{
Mitch Williams7d96ba12015-10-26 19:44:39 -0400374 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
Mitch Williams0dd438d2015-10-26 19:44:40 -0400375 struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
Mitch Williamsf19a9732016-09-12 14:18:38 -0700376 struct i40e_hw *hw = &adapter->hw;
Greg Rose5eae00c2013-12-21 06:12:45 +0000377
378 rx_ring->q_vector = q_vector;
379 rx_ring->next = q_vector->rx.ring;
380 rx_ring->vsi = &adapter->vsi;
381 q_vector->rx.ring = rx_ring;
382 q_vector->rx.count++;
383 q_vector->rx.latency_range = I40E_LOW_LATENCY;
Jacob Keller65e87c02016-09-12 14:18:44 -0700384 q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
Mitch Williamsf19a9732016-09-12 14:18:38 -0700385 q_vector->ring_mask |= BIT(r_idx);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400386 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Mitch Williamsf19a9732016-09-12 14:18:38 -0700387 wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr);
Greg Rose5eae00c2013-12-21 06:12:45 +0000388}
389
390/**
391 * i40evf_map_vector_to_txq - associate irqs with tx queues
392 * @adapter: board private structure
393 * @v_idx: interrupt number
394 * @t_idx: queue number
395 **/
396static void
397i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
398{
Mitch Williams7d96ba12015-10-26 19:44:39 -0400399 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
Mitch Williams0dd438d2015-10-26 19:44:40 -0400400 struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
Mitch Williamsf19a9732016-09-12 14:18:38 -0700401 struct i40e_hw *hw = &adapter->hw;
Greg Rose5eae00c2013-12-21 06:12:45 +0000402
403 tx_ring->q_vector = q_vector;
404 tx_ring->next = q_vector->tx.ring;
405 tx_ring->vsi = &adapter->vsi;
406 q_vector->tx.ring = tx_ring;
407 q_vector->tx.count++;
408 q_vector->tx.latency_range = I40E_LOW_LATENCY;
Jacob Keller65e87c02016-09-12 14:18:44 -0700409 q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400410 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Greg Rose5eae00c2013-12-21 06:12:45 +0000411 q_vector->num_ringpairs++;
Mitch Williamsf19a9732016-09-12 14:18:38 -0700412 wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr);
Greg Rose5eae00c2013-12-21 06:12:45 +0000413}
414
415/**
416 * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
417 * @adapter: board private structure to initialize
418 *
419 * This function maps descriptor rings to the queue-specific vectors
420 * we were allotted through the MSI-X enabling code. Ideally, we'd have
421 * one vector per ring/queue, but on a constrained vector budget, we
422 * group the rings as "efficiently" as possible. You would add new
423 * mapping configurations in here.
424 **/
425static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
426{
427 int q_vectors;
428 int v_start = 0;
429 int rxr_idx = 0, txr_idx = 0;
Mitch Williamscc052922014-10-25 03:24:34 +0000430 int rxr_remaining = adapter->num_active_queues;
431 int txr_remaining = adapter->num_active_queues;
Greg Rose5eae00c2013-12-21 06:12:45 +0000432 int i, j;
433 int rqpv, tqpv;
434 int err = 0;
435
436 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
437
438 /* The ideal configuration...
439 * We have enough vectors to map one per queue.
440 */
Mitch Williams973371d2015-04-27 14:57:09 -0400441 if (q_vectors >= (rxr_remaining * 2)) {
Greg Rose5eae00c2013-12-21 06:12:45 +0000442 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
443 i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
444
445 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
446 i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
447 goto out;
448 }
449
450 /* If we don't have enough vectors for a 1-to-1
451 * mapping, we'll have to group them so there are
452 * multiple queues per vector.
453 * Re-adjusting *qpv takes care of the remainder.
454 */
455 for (i = v_start; i < q_vectors; i++) {
456 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
457 for (j = 0; j < rqpv; j++) {
458 i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
459 rxr_idx++;
460 rxr_remaining--;
461 }
462 }
463 for (i = v_start; i < q_vectors; i++) {
464 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
465 for (j = 0; j < tqpv; j++) {
466 i40evf_map_vector_to_txq(adapter, i, txr_idx);
467 txr_idx++;
468 txr_remaining--;
469 }
470 }
471
472out:
473 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
474
475 return err;
476}
477
Alexander Duyck7709b4c2015-09-24 09:04:38 -0700478#ifdef CONFIG_NET_POLL_CONTROLLER
479/**
480 * i40evf_netpoll - A Polling 'interrupt' handler
481 * @netdev: network interface device structure
482 *
483 * This is used by netconsole to send skbs without having to re-enable
484 * interrupts. It's not called while the normal interrupt routine is executing.
485 **/
486static void i40evf_netpoll(struct net_device *netdev)
487{
488 struct i40evf_adapter *adapter = netdev_priv(netdev);
489 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
490 int i;
491
492 /* if interface is down do nothing */
493 if (test_bit(__I40E_DOWN, &adapter->vsi.state))
494 return;
495
496 for (i = 0; i < q_vectors; i++)
Mitch Williams7d96ba12015-10-26 19:44:39 -0400497 i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
Alexander Duyck7709b4c2015-09-24 09:04:38 -0700498}
499
500#endif
Greg Rose5eae00c2013-12-21 06:12:45 +0000501/**
Alan Brady96db7762016-09-14 16:24:38 -0700502 * i40evf_irq_affinity_notify - Callback for affinity changes
503 * @notify: context as to what irq was changed
504 * @mask: the new affinity mask
505 *
506 * This is a callback function used by the irq_set_affinity_notifier function
507 * so that we may register to receive changes to the irq affinity masks.
508 **/
509static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
510 const cpumask_t *mask)
511{
512 struct i40e_q_vector *q_vector =
513 container_of(notify, struct i40e_q_vector, affinity_notify);
514
515 q_vector->affinity_mask = *mask;
516}
517
518/**
519 * i40evf_irq_affinity_release - Callback for affinity notifier release
520 * @ref: internal core kernel usage
521 *
522 * This is a callback function used by the irq_set_affinity_notifier function
523 * to inform the current notification subscriber that they will no longer
524 * receive notifications.
525 **/
526static void i40evf_irq_affinity_release(struct kref *ref) {}
527
528/**
Greg Rose5eae00c2013-12-21 06:12:45 +0000529 * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
530 * @adapter: board private structure
531 *
532 * Allocates MSI-X vectors for tx and rx handling, and requests
533 * interrupts from the kernel.
534 **/
535static int
536i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
537{
538 int vector, err, q_vectors;
539 int rx_int_idx = 0, tx_int_idx = 0;
Alan Brady96db7762016-09-14 16:24:38 -0700540 int irq_num;
Greg Rose5eae00c2013-12-21 06:12:45 +0000541
542 i40evf_irq_disable(adapter);
543 /* Decrement for Other and TCP Timer vectors */
544 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
545
546 for (vector = 0; vector < q_vectors; vector++) {
Mitch Williams7d96ba12015-10-26 19:44:39 -0400547 struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
Alan Brady96db7762016-09-14 16:24:38 -0700548 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
Greg Rose5eae00c2013-12-21 06:12:45 +0000549
550 if (q_vector->tx.ring && q_vector->rx.ring) {
551 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
552 "i40evf-%s-%s-%d", basename,
553 "TxRx", rx_int_idx++);
554 tx_int_idx++;
555 } else if (q_vector->rx.ring) {
556 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
557 "i40evf-%s-%s-%d", basename,
558 "rx", rx_int_idx++);
559 } else if (q_vector->tx.ring) {
560 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
561 "i40evf-%s-%s-%d", basename,
562 "tx", tx_int_idx++);
563 } else {
564 /* skip this unused q_vector */
565 continue;
566 }
Alan Brady96db7762016-09-14 16:24:38 -0700567 err = request_irq(irq_num,
568 i40evf_msix_clean_rings,
569 0,
570 q_vector->name,
571 q_vector);
Greg Rose5eae00c2013-12-21 06:12:45 +0000572 if (err) {
573 dev_info(&adapter->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -0400574 "Request_irq failed, error: %d\n", err);
Greg Rose5eae00c2013-12-21 06:12:45 +0000575 goto free_queue_irqs;
576 }
Alan Brady96db7762016-09-14 16:24:38 -0700577 /* register for affinity change notifications */
578 q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
579 q_vector->affinity_notify.release =
580 i40evf_irq_affinity_release;
581 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
Greg Rose5eae00c2013-12-21 06:12:45 +0000582 /* assign the mask for this irq */
Alan Brady96db7762016-09-14 16:24:38 -0700583 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
Greg Rose5eae00c2013-12-21 06:12:45 +0000584 }
585
586 return 0;
587
588free_queue_irqs:
589 while (vector) {
590 vector--;
Alan Brady96db7762016-09-14 16:24:38 -0700591 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
592 irq_set_affinity_notifier(irq_num, NULL);
593 irq_set_affinity_hint(irq_num, NULL);
594 free_irq(irq_num, &adapter->q_vectors[vector]);
Greg Rose5eae00c2013-12-21 06:12:45 +0000595 }
596 return err;
597}
598
599/**
600 * i40evf_request_misc_irq - Initialize MSI-X interrupts
601 * @adapter: board private structure
602 *
603 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
604 * vector is only for the admin queue, and stays active even when the netdev
605 * is closed.
606 **/
607static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
608{
609 struct net_device *netdev = adapter->netdev;
610 int err;
611
Jesse Brandeburgb39c1e22014-08-01 13:27:08 -0700612 snprintf(adapter->misc_vector_name,
Carolyn Wyborny9a21a002015-02-06 08:52:20 +0000613 sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
614 dev_name(&adapter->pdev->dev));
Greg Rose5eae00c2013-12-21 06:12:45 +0000615 err = request_irq(adapter->msix_entries[0].vector,
Mitch Williamse1dfee82014-02-13 03:48:51 -0800616 &i40evf_msix_aq, 0,
617 adapter->misc_vector_name, netdev);
Greg Rose5eae00c2013-12-21 06:12:45 +0000618 if (err) {
619 dev_err(&adapter->pdev->dev,
Catherine Sullivan77fa28b2014-02-20 19:29:17 -0800620 "request_irq for %s failed: %d\n",
621 adapter->misc_vector_name, err);
Greg Rose5eae00c2013-12-21 06:12:45 +0000622 free_irq(adapter->msix_entries[0].vector, netdev);
623 }
624 return err;
625}
626
627/**
628 * i40evf_free_traffic_irqs - Free MSI-X interrupts
629 * @adapter: board private structure
630 *
631 * Frees all MSI-X vectors other than 0.
632 **/
633static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
634{
Alan Brady96db7762016-09-14 16:24:38 -0700635 int vector, irq_num, q_vectors;
Mitch Williams75a64432014-11-11 20:02:42 +0000636
Alan Brady47d2a5d2016-11-08 13:05:05 -0800637 if (!adapter->msix_entries)
638 return;
639
Greg Rose5eae00c2013-12-21 06:12:45 +0000640 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
641
Alan Brady96db7762016-09-14 16:24:38 -0700642 for (vector = 0; vector < q_vectors; vector++) {
643 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
644 irq_set_affinity_notifier(irq_num, NULL);
645 irq_set_affinity_hint(irq_num, NULL);
646 free_irq(irq_num, &adapter->q_vectors[vector]);
Greg Rose5eae00c2013-12-21 06:12:45 +0000647 }
648}
649
650/**
651 * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
652 * @adapter: board private structure
653 *
654 * Frees MSI-X vector 0.
655 **/
656static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
657{
658 struct net_device *netdev = adapter->netdev;
659
Jacob Kelleref4603e2016-11-08 13:05:08 -0800660 if (!adapter->msix_entries)
661 return;
662
Greg Rose5eae00c2013-12-21 06:12:45 +0000663 free_irq(adapter->msix_entries[0].vector, netdev);
664}
665
666/**
667 * i40evf_configure_tx - Configure Transmit Unit after Reset
668 * @adapter: board private structure
669 *
670 * Configure the Tx unit of the MAC after a reset.
671 **/
672static void i40evf_configure_tx(struct i40evf_adapter *adapter)
673{
674 struct i40e_hw *hw = &adapter->hw;
675 int i;
Mitch Williams75a64432014-11-11 20:02:42 +0000676
Mitch Williamscc052922014-10-25 03:24:34 +0000677 for (i = 0; i < adapter->num_active_queues; i++)
Mitch Williams0dd438d2015-10-26 19:44:40 -0400678 adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
Greg Rose5eae00c2013-12-21 06:12:45 +0000679}
680
681/**
682 * i40evf_configure_rx - Configure Receive Unit after Reset
683 * @adapter: board private structure
684 *
685 * Configure the Rx unit of the MAC after a reset.
686 **/
687static void i40evf_configure_rx(struct i40evf_adapter *adapter)
688{
Alexander Duyckdab86af2017-03-14 10:15:27 -0700689 unsigned int rx_buf_len = I40E_RXBUFFER_2048;
690 struct net_device *netdev = adapter->netdev;
Greg Rose5eae00c2013-12-21 06:12:45 +0000691 struct i40e_hw *hw = &adapter->hw;
Greg Rose5eae00c2013-12-21 06:12:45 +0000692 int i;
Greg Rose5eae00c2013-12-21 06:12:45 +0000693
Alexander Duyckdab86af2017-03-14 10:15:27 -0700694 /* Legacy Rx will always default to a 2048 buffer size. */
695#if (PAGE_SIZE < 8192)
696 if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
697 /* We use a 1536 buffer size for configurations with
698 * standard Ethernet mtu. On x86 this gives us enough room
699 * for shared info and 192 bytes of padding.
700 */
701 if (netdev->mtu <= ETH_DATA_LEN)
702 rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
703 }
704#endif
705
Mitch Williamscc052922014-10-25 03:24:34 +0000706 for (i = 0; i < adapter->num_active_queues; i++) {
Mitch Williams0dd438d2015-10-26 19:44:40 -0400707 adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
Alexander Duyckdab86af2017-03-14 10:15:27 -0700708 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
Greg Rose5eae00c2013-12-21 06:12:45 +0000709 }
710}
711
712/**
713 * i40evf_find_vlan - Search filter list for specific vlan filter
714 * @adapter: board private structure
715 * @vlan: vlan tag
716 *
717 * Returns ptr to the filter object or NULL
718 **/
719static struct
720i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
721{
722 struct i40evf_vlan_filter *f;
723
724 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
725 if (vlan == f->vlan)
726 return f;
727 }
728 return NULL;
729}
730
731/**
732 * i40evf_add_vlan - Add a vlan filter to the list
733 * @adapter: board private structure
734 * @vlan: VLAN tag
735 *
736 * Returns ptr to the filter object or NULL when no memory available.
737 **/
738static struct
739i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
740{
Mitch Williams13acb542015-03-31 00:45:05 -0700741 struct i40evf_vlan_filter *f = NULL;
742 int count = 50;
743
744 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
745 &adapter->crit_section)) {
746 udelay(1);
747 if (--count == 0)
748 goto out;
749 }
Greg Rose5eae00c2013-12-21 06:12:45 +0000750
751 f = i40evf_find_vlan(adapter, vlan);
Mitch Williams348d4992014-11-11 20:02:52 +0000752 if (!f) {
Greg Rose5eae00c2013-12-21 06:12:45 +0000753 f = kzalloc(sizeof(*f), GFP_ATOMIC);
Mitch Williams348d4992014-11-11 20:02:52 +0000754 if (!f)
Mitch Williams13acb542015-03-31 00:45:05 -0700755 goto clearout;
Mitch Williams249c8b82014-05-10 04:49:04 +0000756
Greg Rose5eae00c2013-12-21 06:12:45 +0000757 f->vlan = vlan;
758
759 INIT_LIST_HEAD(&f->list);
760 list_add(&f->list, &adapter->vlan_filter_list);
761 f->add = true;
762 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
763 }
764
Mitch Williams13acb542015-03-31 00:45:05 -0700765clearout:
766 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
767out:
Greg Rose5eae00c2013-12-21 06:12:45 +0000768 return f;
769}
770
771/**
772 * i40evf_del_vlan - Remove a vlan filter from the list
773 * @adapter: board private structure
774 * @vlan: VLAN tag
775 **/
776static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
777{
778 struct i40evf_vlan_filter *f;
Mitch Williams13acb542015-03-31 00:45:05 -0700779 int count = 50;
780
781 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
782 &adapter->crit_section)) {
783 udelay(1);
784 if (--count == 0)
785 return;
786 }
Greg Rose5eae00c2013-12-21 06:12:45 +0000787
788 f = i40evf_find_vlan(adapter, vlan);
789 if (f) {
790 f->remove = true;
791 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
792 }
Mitch Williams13acb542015-03-31 00:45:05 -0700793 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
Greg Rose5eae00c2013-12-21 06:12:45 +0000794}
795
796/**
797 * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
798 * @netdev: network device struct
799 * @vid: VLAN tag
800 **/
801static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
Mitch Williams75a64432014-11-11 20:02:42 +0000802 __always_unused __be16 proto, u16 vid)
Greg Rose5eae00c2013-12-21 06:12:45 +0000803{
804 struct i40evf_adapter *adapter = netdev_priv(netdev);
805
Mitch Williams8ed995f2015-08-28 17:55:57 -0400806 if (!VLAN_ALLOWED(adapter))
807 return -EIO;
Greg Rose5eae00c2013-12-21 06:12:45 +0000808 if (i40evf_add_vlan(adapter, vid) == NULL)
809 return -ENOMEM;
810 return 0;
811}
812
813/**
814 * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
815 * @netdev: network device struct
816 * @vid: VLAN tag
817 **/
818static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
Mitch Williams75a64432014-11-11 20:02:42 +0000819 __always_unused __be16 proto, u16 vid)
Greg Rose5eae00c2013-12-21 06:12:45 +0000820{
821 struct i40evf_adapter *adapter = netdev_priv(netdev);
822
Mitch Williams8ed995f2015-08-28 17:55:57 -0400823 if (VLAN_ALLOWED(adapter)) {
824 i40evf_del_vlan(adapter, vid);
825 return 0;
826 }
827 return -EIO;
Greg Rose5eae00c2013-12-21 06:12:45 +0000828}
829
830/**
831 * i40evf_find_filter - Search filter list for specific mac filter
832 * @adapter: board private structure
833 * @macaddr: the MAC address
834 *
835 * Returns ptr to the filter object or NULL
836 **/
837static struct
838i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
839 u8 *macaddr)
840{
841 struct i40evf_mac_filter *f;
842
843 if (!macaddr)
844 return NULL;
845
846 list_for_each_entry(f, &adapter->mac_filter_list, list) {
847 if (ether_addr_equal(macaddr, f->macaddr))
848 return f;
849 }
850 return NULL;
851}
852
853/**
854 * i40e_add_filter - Add a mac filter to the filter list
855 * @adapter: board private structure
856 * @macaddr: the MAC address
857 *
858 * Returns ptr to the filter object or NULL when no memory available.
859 **/
860static struct
861i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
862 u8 *macaddr)
863{
864 struct i40evf_mac_filter *f;
Mitch Williams54e16f62015-01-29 07:17:20 +0000865 int count = 50;
Greg Rose5eae00c2013-12-21 06:12:45 +0000866
867 if (!macaddr)
868 return NULL;
869
870 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
Mitch Williams54e16f62015-01-29 07:17:20 +0000871 &adapter->crit_section)) {
Mitch Williamsb65476c2014-07-09 07:46:14 +0000872 udelay(1);
Mitch Williams54e16f62015-01-29 07:17:20 +0000873 if (--count == 0)
874 return NULL;
875 }
Greg Rose5eae00c2013-12-21 06:12:45 +0000876
877 f = i40evf_find_filter(adapter, macaddr);
Mitch Williams348d4992014-11-11 20:02:52 +0000878 if (!f) {
Greg Rose5eae00c2013-12-21 06:12:45 +0000879 f = kzalloc(sizeof(*f), GFP_ATOMIC);
Mitch Williams348d4992014-11-11 20:02:52 +0000880 if (!f) {
Greg Rose5eae00c2013-12-21 06:12:45 +0000881 clear_bit(__I40EVF_IN_CRITICAL_TASK,
882 &adapter->crit_section);
883 return NULL;
884 }
885
Greg Rose9a173902014-05-22 06:32:02 +0000886 ether_addr_copy(f->macaddr, macaddr);
Greg Rose5eae00c2013-12-21 06:12:45 +0000887
Mitch Williams63590b62016-05-16 10:26:42 -0700888 list_add_tail(&f->list, &adapter->mac_filter_list);
Greg Rose5eae00c2013-12-21 06:12:45 +0000889 f->add = true;
890 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
891 }
892
893 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
894 return f;
895}
896
897/**
898 * i40evf_set_mac - NDO callback to set port mac address
899 * @netdev: network interface device structure
900 * @p: pointer to an address structure
901 *
902 * Returns 0 on success, negative on failure
903 **/
904static int i40evf_set_mac(struct net_device *netdev, void *p)
905{
906 struct i40evf_adapter *adapter = netdev_priv(netdev);
907 struct i40e_hw *hw = &adapter->hw;
908 struct i40evf_mac_filter *f;
909 struct sockaddr *addr = p;
910
911 if (!is_valid_ether_addr(addr->sa_data))
912 return -EADDRNOTAVAIL;
913
914 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
915 return 0;
916
Mitch Williams14e52ee2015-08-31 19:54:44 -0400917 if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
918 return -EPERM;
919
920 f = i40evf_find_filter(adapter, hw->mac.addr);
921 if (f) {
922 f->remove = true;
923 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
924 }
925
Greg Rose5eae00c2013-12-21 06:12:45 +0000926 f = i40evf_add_filter(adapter, addr->sa_data);
927 if (f) {
Greg Rose9a173902014-05-22 06:32:02 +0000928 ether_addr_copy(hw->mac.addr, addr->sa_data);
929 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
Greg Rose5eae00c2013-12-21 06:12:45 +0000930 }
931
932 return (f == NULL) ? -ENOMEM : 0;
933}
934
935/**
936 * i40evf_set_rx_mode - NDO callback to set the netdev filters
937 * @netdev: network interface device structure
938 **/
939static void i40evf_set_rx_mode(struct net_device *netdev)
940{
941 struct i40evf_adapter *adapter = netdev_priv(netdev);
942 struct i40evf_mac_filter *f, *ftmp;
943 struct netdev_hw_addr *uca;
944 struct netdev_hw_addr *mca;
Shannon Nelson2f41f332015-08-26 15:14:20 -0400945 struct netdev_hw_addr *ha;
Mitch Williams54e16f62015-01-29 07:17:20 +0000946 int count = 50;
Greg Rose5eae00c2013-12-21 06:12:45 +0000947
948 /* add addr if not already in the filter list */
949 netdev_for_each_uc_addr(uca, netdev) {
950 i40evf_add_filter(adapter, uca->addr);
951 }
952 netdev_for_each_mc_addr(mca, netdev) {
953 i40evf_add_filter(adapter, mca->addr);
954 }
955
956 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
Mitch Williams54e16f62015-01-29 07:17:20 +0000957 &adapter->crit_section)) {
Mitch Williamsb65476c2014-07-09 07:46:14 +0000958 udelay(1);
Mitch Williams54e16f62015-01-29 07:17:20 +0000959 if (--count == 0) {
960 dev_err(&adapter->pdev->dev,
961 "Failed to get lock in %s\n", __func__);
962 return;
963 }
964 }
Greg Rose5eae00c2013-12-21 06:12:45 +0000965 /* remove filter if not in netdev list */
966 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
Shannon Nelson2f41f332015-08-26 15:14:20 -0400967 netdev_for_each_mc_addr(mca, netdev)
968 if (ether_addr_equal(mca->addr, f->macaddr))
969 goto bottom_of_search_loop;
Greg Rose5eae00c2013-12-21 06:12:45 +0000970
Shannon Nelson2f41f332015-08-26 15:14:20 -0400971 netdev_for_each_uc_addr(uca, netdev)
972 if (ether_addr_equal(uca->addr, f->macaddr))
973 goto bottom_of_search_loop;
974
975 for_each_dev_addr(netdev, ha)
976 if (ether_addr_equal(ha->addr, f->macaddr))
977 goto bottom_of_search_loop;
978
979 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
980 goto bottom_of_search_loop;
981
982 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
983 f->remove = true;
984 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
985
986bottom_of_search_loop:
987 continue;
Greg Rose5eae00c2013-12-21 06:12:45 +0000988 }
Anjali Singhai Jain47d34832016-04-12 08:30:52 -0700989
990 if (netdev->flags & IFF_PROMISC &&
991 !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
992 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
993 else if (!(netdev->flags & IFF_PROMISC) &&
994 adapter->flags & I40EVF_FLAG_PROMISC_ON)
995 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
996
Anjali Singhai Jainf42a5c72016-05-03 15:13:10 -0700997 if (netdev->flags & IFF_ALLMULTI &&
998 !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
999 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
1000 else if (!(netdev->flags & IFF_ALLMULTI) &&
1001 adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
1002 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
1003
Greg Rose5eae00c2013-12-21 06:12:45 +00001004 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1005}
1006
1007/**
1008 * i40evf_napi_enable_all - enable NAPI on all queue vectors
1009 * @adapter: board private structure
1010 **/
1011static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
1012{
1013 int q_idx;
1014 struct i40e_q_vector *q_vector;
1015 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1016
1017 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1018 struct napi_struct *napi;
Mitch Williams75a64432014-11-11 20:02:42 +00001019
Mitch Williams7d96ba12015-10-26 19:44:39 -04001020 q_vector = &adapter->q_vectors[q_idx];
Greg Rose5eae00c2013-12-21 06:12:45 +00001021 napi = &q_vector->napi;
1022 napi_enable(napi);
1023 }
1024}
1025
1026/**
1027 * i40evf_napi_disable_all - disable NAPI on all queue vectors
1028 * @adapter: board private structure
1029 **/
1030static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
1031{
1032 int q_idx;
1033 struct i40e_q_vector *q_vector;
1034 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1035
1036 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Mitch Williams7d96ba12015-10-26 19:44:39 -04001037 q_vector = &adapter->q_vectors[q_idx];
Greg Rose5eae00c2013-12-21 06:12:45 +00001038 napi_disable(&q_vector->napi);
1039 }
1040}
1041
1042/**
1043 * i40evf_configure - set up transmit and receive data structures
1044 * @adapter: board private structure
1045 **/
1046static void i40evf_configure(struct i40evf_adapter *adapter)
1047{
1048 struct net_device *netdev = adapter->netdev;
1049 int i;
1050
1051 i40evf_set_rx_mode(netdev);
1052
1053 i40evf_configure_tx(adapter);
1054 i40evf_configure_rx(adapter);
1055 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
1056
Mitch Williamscc052922014-10-25 03:24:34 +00001057 for (i = 0; i < adapter->num_active_queues; i++) {
Mitch Williams0dd438d2015-10-26 19:44:40 -04001058 struct i40e_ring *ring = &adapter->rx_rings[i];
Mitch Williams75a64432014-11-11 20:02:42 +00001059
Mitch Williamsb1630982016-04-18 11:33:48 -07001060 i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
Greg Rose5eae00c2013-12-21 06:12:45 +00001061 }
1062}
1063
1064/**
1065 * i40evf_up_complete - Finish the last steps of bringing up a connection
1066 * @adapter: board private structure
1067 **/
Bimmy Pujaricb130a02016-09-06 18:05:03 -07001068static void i40evf_up_complete(struct i40evf_adapter *adapter)
Greg Rose5eae00c2013-12-21 06:12:45 +00001069{
1070 adapter->state = __I40EVF_RUNNING;
1071 clear_bit(__I40E_DOWN, &adapter->vsi.state);
1072
1073 i40evf_napi_enable_all(adapter);
1074
1075 adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
Mitch Williamsed0e8942017-01-24 10:23:59 -08001076 if (CLIENT_ENABLED(adapter))
1077 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
Greg Rose5eae00c2013-12-21 06:12:45 +00001078 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
Greg Rose5eae00c2013-12-21 06:12:45 +00001079}
1080
1081/**
Greg Rose5eae00c2013-12-21 06:12:45 +00001082 * i40e_down - Shutdown the connection processing
1083 * @adapter: board private structure
1084 **/
1085void i40evf_down(struct i40evf_adapter *adapter)
1086{
1087 struct net_device *netdev = adapter->netdev;
1088 struct i40evf_mac_filter *f;
1089
Mitch Williams209dc4d2015-12-09 15:50:27 -08001090 if (adapter->state <= __I40EVF_DOWN_PENDING)
Mitch Williamsddf0b3a2014-05-22 06:31:46 +00001091 return;
1092
Mitch A Williams53d0b3a2014-12-09 08:53:04 +00001093 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
1094 &adapter->crit_section))
1095 usleep_range(500, 1000);
1096
Mitch Williams63e18c22015-03-27 00:12:10 -07001097 netif_carrier_off(netdev);
1098 netif_tx_disable(netdev);
Sridhar Samudrala3f341ac2016-09-01 22:27:27 +02001099 adapter->link_up = false;
Mitch Williams748c4342015-01-29 07:17:18 +00001100 i40evf_napi_disable_all(adapter);
Mitch Williams63e18c22015-03-27 00:12:10 -07001101 i40evf_irq_disable(adapter);
Mitch A Williams53d0b3a2014-12-09 08:53:04 +00001102
Mitch Williamsef8693e2014-02-13 03:48:53 -08001103 /* remove all MAC filters */
Greg Rose5eae00c2013-12-21 06:12:45 +00001104 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1105 f->remove = true;
1106 }
Mitch Williamsed1f5b52014-02-20 19:29:06 -08001107 /* remove all VLAN filters */
1108 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
1109 f->remove = true;
1110 }
Mitch Williamsef8693e2014-02-13 03:48:53 -08001111 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
1112 adapter->state != __I40EVF_RESETTING) {
Mitch A Williams53d0b3a2014-12-09 08:53:04 +00001113 /* cancel any current operation */
1114 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
Mitch A Williams53d0b3a2014-12-09 08:53:04 +00001115 /* Schedule operations to close down the HW. Don't wait
1116 * here for this to complete. The watchdog is still running
1117 * and it will take care of this.
1118 */
1119 adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
Mitch Williamsed1f5b52014-02-20 19:29:06 -08001120 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
Mitch Williamsef8693e2014-02-13 03:48:53 -08001121 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
Mitch Williamsef8693e2014-02-13 03:48:53 -08001122 }
Greg Rose5eae00c2013-12-21 06:12:45 +00001123
Mitch A Williams53d0b3a2014-12-09 08:53:04 +00001124 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
Greg Rose5eae00c2013-12-21 06:12:45 +00001125}
1126
1127/**
1128 * i40evf_acquire_msix_vectors - Setup the MSIX capability
1129 * @adapter: board private structure
1130 * @vectors: number of vectors to request
1131 *
1132 * Work with the OS to set up the MSIX vectors needed.
1133 *
1134 * Returns 0 on success, negative on failure
1135 **/
1136static int
1137i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
1138{
1139 int err, vector_threshold;
1140
1141 /* We'll want at least 3 (vector_threshold):
1142 * 0) Other (Admin Queue and link, mostly)
1143 * 1) TxQ[0] Cleanup
1144 * 2) RxQ[0] Cleanup
1145 */
1146 vector_threshold = MIN_MSIX_COUNT;
1147
1148 /* The more we get, the more we will assign to Tx/Rx Cleanup
1149 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1150 * Right now, we simply care about how many we'll get; we'll
1151 * set them up later while requesting irq's.
1152 */
Alexander Gordeevfc2f2f52014-04-28 17:53:16 +00001153 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1154 vector_threshold, vectors);
1155 if (err < 0) {
Mitch Williams80e72892014-05-10 04:49:06 +00001156 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
Greg Rose5eae00c2013-12-21 06:12:45 +00001157 kfree(adapter->msix_entries);
1158 adapter->msix_entries = NULL;
Alexander Gordeevfc2f2f52014-04-28 17:53:16 +00001159 return err;
Greg Rose5eae00c2013-12-21 06:12:45 +00001160 }
Alexander Gordeevfc2f2f52014-04-28 17:53:16 +00001161
1162 /* Adjust for only the vectors we'll use, which is minimum
1163 * of max_msix_q_vectors + NONQ_VECS, or the number of
1164 * vectors we were allocated.
1165 */
1166 adapter->num_msix_vectors = err;
1167 return 0;
Greg Rose5eae00c2013-12-21 06:12:45 +00001168}
1169
1170/**
1171 * i40evf_free_queues - Free memory for all rings
1172 * @adapter: board private structure to initialize
1173 *
1174 * Free all of the memory associated with queue pairs.
1175 **/
1176static void i40evf_free_queues(struct i40evf_adapter *adapter)
1177{
Greg Rose5eae00c2013-12-21 06:12:45 +00001178 if (!adapter->vsi_res)
1179 return;
Mitch Williams0dd438d2015-10-26 19:44:40 -04001180 kfree(adapter->tx_rings);
Mitch Williams10311542015-12-09 15:50:30 -08001181 adapter->tx_rings = NULL;
Mitch Williams0dd438d2015-10-26 19:44:40 -04001182 kfree(adapter->rx_rings);
Mitch Williams10311542015-12-09 15:50:30 -08001183 adapter->rx_rings = NULL;
Greg Rose5eae00c2013-12-21 06:12:45 +00001184}
1185
1186/**
1187 * i40evf_alloc_queues - Allocate memory for all rings
1188 * @adapter: board private structure to initialize
1189 *
1190 * We allocate one ring per queue at run-time since we don't know the
1191 * number of queues at compile-time. The polling_netdev array is
1192 * intended for Multiqueue, but should work fine with a single queue.
1193 **/
1194static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1195{
1196 int i;
1197
Mitch Williams0dd438d2015-10-26 19:44:40 -04001198 adapter->tx_rings = kcalloc(adapter->num_active_queues,
1199 sizeof(struct i40e_ring), GFP_KERNEL);
1200 if (!adapter->tx_rings)
1201 goto err_out;
1202 adapter->rx_rings = kcalloc(adapter->num_active_queues,
1203 sizeof(struct i40e_ring), GFP_KERNEL);
1204 if (!adapter->rx_rings)
1205 goto err_out;
1206
Mitch Williamscc052922014-10-25 03:24:34 +00001207 for (i = 0; i < adapter->num_active_queues; i++) {
Greg Rose5eae00c2013-12-21 06:12:45 +00001208 struct i40e_ring *tx_ring;
1209 struct i40e_ring *rx_ring;
1210
Mitch Williams0dd438d2015-10-26 19:44:40 -04001211 tx_ring = &adapter->tx_rings[i];
Greg Rose5eae00c2013-12-21 06:12:45 +00001212
1213 tx_ring->queue_index = i;
1214 tx_ring->netdev = adapter->netdev;
1215 tx_ring->dev = &adapter->pdev->dev;
Mitch Williamsd732a182014-04-24 06:41:37 +00001216 tx_ring->count = adapter->tx_desc_count;
Jacob Keller65e87c02016-09-12 14:18:44 -07001217 tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
Anjali Singhai Jain1f012272015-09-03 17:18:53 -04001218 if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
1219 tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
Greg Rose5eae00c2013-12-21 06:12:45 +00001220
Mitch Williams0dd438d2015-10-26 19:44:40 -04001221 rx_ring = &adapter->rx_rings[i];
Greg Rose5eae00c2013-12-21 06:12:45 +00001222 rx_ring->queue_index = i;
1223 rx_ring->netdev = adapter->netdev;
1224 rx_ring->dev = &adapter->pdev->dev;
Mitch Williamsd732a182014-04-24 06:41:37 +00001225 rx_ring->count = adapter->rx_desc_count;
Jacob Keller65e87c02016-09-12 14:18:44 -07001226 rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
Greg Rose5eae00c2013-12-21 06:12:45 +00001227 }
1228
1229 return 0;
1230
1231err_out:
1232 i40evf_free_queues(adapter);
1233 return -ENOMEM;
1234}
1235
1236/**
1237 * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
1238 * @adapter: board private structure to initialize
1239 *
1240 * Attempt to configure the interrupts using the best available
1241 * capabilities of the hardware and the kernel.
1242 **/
1243static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1244{
1245 int vector, v_budget;
1246 int pairs = 0;
1247 int err = 0;
1248
1249 if (!adapter->vsi_res) {
1250 err = -EIO;
1251 goto out;
1252 }
Mitch Williamscc052922014-10-25 03:24:34 +00001253 pairs = adapter->num_active_queues;
Greg Rose5eae00c2013-12-21 06:12:45 +00001254
1255 /* It's easy to be greedy for MSI-X vectors, but it really
1256 * doesn't do us much good if we have a lot more vectors
1257 * than CPU's. So let's be conservative and only ask for
1258 * (roughly) twice the number of vectors as there are CPU's.
1259 */
Mitch Williams30a500e2014-02-11 08:27:52 +00001260 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
1261 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
Greg Rose5eae00c2013-12-21 06:12:45 +00001262
Greg Rose5eae00c2013-12-21 06:12:45 +00001263 adapter->msix_entries = kcalloc(v_budget,
1264 sizeof(struct msix_entry), GFP_KERNEL);
1265 if (!adapter->msix_entries) {
1266 err = -ENOMEM;
1267 goto out;
1268 }
1269
1270 for (vector = 0; vector < v_budget; vector++)
1271 adapter->msix_entries[vector].entry = vector;
1272
Mitch Williams313ed2d2015-08-27 11:42:31 -04001273 err = i40evf_acquire_msix_vectors(adapter, v_budget);
Greg Rose5eae00c2013-12-21 06:12:45 +00001274
1275out:
Mitch Williamse6c4cf62015-11-06 15:26:00 -08001276 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1277 netif_set_real_num_tx_queues(adapter->netdev, pairs);
Greg Rose5eae00c2013-12-21 06:12:45 +00001278 return err;
1279}
1280
1281/**
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001282 * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
1283 * @adapter: board private structure
Helin Zhang2c86ac32015-10-27 16:15:06 -04001284 *
1285 * Return 0 on success, negative on failure
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001286 **/
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001287static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001288{
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001289 struct i40e_aqc_get_set_rss_key_data *rss_key =
1290 (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001291 struct i40e_hw *hw = &adapter->hw;
Helin Zhang2c86ac32015-10-27 16:15:06 -04001292 int ret = 0;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001293
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001294 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
1295 /* bail because we already have a command pending */
Masanari Iidae3d132d2015-10-16 21:14:29 +09001296 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001297 adapter->current_op);
Helin Zhang2c86ac32015-10-27 16:15:06 -04001298 return -EBUSY;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001299 }
1300
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001301 ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1302 if (ret) {
1303 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1304 i40evf_stat_str(hw, ret),
1305 i40evf_aq_str(hw, hw->aq.asq_last_status));
1306 return ret;
1307
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001308 }
1309
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001310 ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1311 adapter->rss_lut, adapter->rss_lut_size);
1312 if (ret) {
1313 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1314 i40evf_stat_str(hw, ret),
1315 i40evf_aq_str(hw, hw->aq.asq_last_status));
Helin Zhang2c86ac32015-10-27 16:15:06 -04001316 }
1317
1318 return ret;
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001319
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001320}
1321
1322/**
Helin Zhang2c86ac32015-10-27 16:15:06 -04001323 * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001324 * @adapter: board private structure
Helin Zhang2c86ac32015-10-27 16:15:06 -04001325 *
1326 * Returns 0 on success, negative on failure
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001327 **/
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001328static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001329{
1330 struct i40e_hw *hw = &adapter->hw;
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001331 u32 *dw;
Helin Zhang2c86ac32015-10-27 16:15:06 -04001332 u16 i;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001333
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001334 dw = (u32 *)adapter->rss_key;
1335 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1336 wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001337
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001338 dw = (u32 *)adapter->rss_lut;
1339 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1340 wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
Helin Zhang2c86ac32015-10-27 16:15:06 -04001341
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001342 i40e_flush(hw);
Helin Zhang2c86ac32015-10-27 16:15:06 -04001343
1344 return 0;
1345}
1346
1347/**
1348 * i40evf_config_rss - Configure RSS keys and lut
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001349 * @adapter: board private structure
Helin Zhang2c86ac32015-10-27 16:15:06 -04001350 *
1351 * Returns 0 on success, negative on failure
1352 **/
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001353int i40evf_config_rss(struct i40evf_adapter *adapter)
Helin Zhang2c86ac32015-10-27 16:15:06 -04001354{
Helin Zhang2c86ac32015-10-27 16:15:06 -04001355
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001356 if (RSS_PF(adapter)) {
1357 adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
1358 I40EVF_FLAG_AQ_SET_RSS_KEY;
1359 return 0;
1360 } else if (RSS_AQ(adapter)) {
1361 return i40evf_config_rss_aq(adapter);
1362 } else {
1363 return i40evf_config_rss_reg(adapter);
1364 }
Helin Zhang90b02b42015-10-26 19:44:33 -04001365}
1366
1367/**
Helin Zhang2c86ac32015-10-27 16:15:06 -04001368 * i40evf_fill_rss_lut - Fill the lut with default values
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001369 * @adapter: board private structure
Helin Zhang2c86ac32015-10-27 16:15:06 -04001370 **/
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001371static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
Helin Zhang2c86ac32015-10-27 16:15:06 -04001372{
1373 u16 i;
1374
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001375 for (i = 0; i < adapter->rss_lut_size; i++)
1376 adapter->rss_lut[i] = i % adapter->num_active_queues;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001377}
1378
1379/**
Helin Zhang96a81982015-10-26 19:44:31 -04001380 * i40evf_init_rss - Prepare for RSS
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001381 * @adapter: board private structure
Helin Zhang2c86ac32015-10-27 16:15:06 -04001382 *
1383 * Return 0 on success, negative on failure
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001384 **/
Helin Zhang2c86ac32015-10-27 16:15:06 -04001385static int i40evf_init_rss(struct i40evf_adapter *adapter)
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001386{
1387 struct i40e_hw *hw = &adapter->hw;
Helin Zhang2c86ac32015-10-27 16:15:06 -04001388 int ret;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001389
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001390 if (!RSS_PF(adapter)) {
1391 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1392 if (adapter->vf_res->vf_offload_flags &
1393 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1394 adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
1395 else
1396 adapter->hena = I40E_DEFAULT_RSS_HENA;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001397
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001398 wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
1399 wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1400 }
Helin Zhang66f9af852015-10-26 19:44:34 -04001401
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001402 i40evf_fill_rss_lut(adapter);
Helin Zhang66f9af852015-10-26 19:44:34 -04001403
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001404 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1405 ret = i40evf_config_rss(adapter);
Helin Zhang2c86ac32015-10-27 16:15:06 -04001406
1407 return ret;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001408}
1409
1410/**
Greg Rose5eae00c2013-12-21 06:12:45 +00001411 * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
1412 * @adapter: board private structure to initialize
1413 *
1414 * We allocate one q_vector per queue interrupt. If allocation fails we
1415 * return -ENOMEM.
1416 **/
1417static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1418{
Mitch Williams7d96ba12015-10-26 19:44:39 -04001419 int q_idx = 0, num_q_vectors;
Greg Rose5eae00c2013-12-21 06:12:45 +00001420 struct i40e_q_vector *q_vector;
1421
1422 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
Mitch Williams0dd438d2015-10-26 19:44:40 -04001423 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
Mitch Williams7d96ba12015-10-26 19:44:39 -04001424 GFP_KERNEL);
1425 if (!adapter->q_vectors)
Alan Cox311f23e2016-03-01 16:02:15 -08001426 return -ENOMEM;
Greg Rose5eae00c2013-12-21 06:12:45 +00001427
1428 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
Mitch Williams7d96ba12015-10-26 19:44:39 -04001429 q_vector = &adapter->q_vectors[q_idx];
Greg Rose5eae00c2013-12-21 06:12:45 +00001430 q_vector->adapter = adapter;
1431 q_vector->vsi = &adapter->vsi;
1432 q_vector->v_idx = q_idx;
1433 netif_napi_add(adapter->netdev, &q_vector->napi,
Mitch Williams75a64432014-11-11 20:02:42 +00001434 i40evf_napi_poll, NAPI_POLL_WEIGHT);
Greg Rose5eae00c2013-12-21 06:12:45 +00001435 }
1436
1437 return 0;
Greg Rose5eae00c2013-12-21 06:12:45 +00001438}
1439
1440/**
1441 * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
1442 * @adapter: board private structure to initialize
1443 *
1444 * This function frees the memory allocated to the q_vectors. In addition if
1445 * NAPI is enabled it will delete any references to the NAPI struct prior
1446 * to freeing the q_vector.
1447 **/
1448static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
1449{
1450 int q_idx, num_q_vectors;
1451 int napi_vectors;
1452
Jacob Kelleref4603e2016-11-08 13:05:08 -08001453 if (!adapter->q_vectors)
1454 return;
1455
Greg Rose5eae00c2013-12-21 06:12:45 +00001456 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
Mitch Williamscc052922014-10-25 03:24:34 +00001457 napi_vectors = adapter->num_active_queues;
Greg Rose5eae00c2013-12-21 06:12:45 +00001458
1459 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
Mitch Williams7d96ba12015-10-26 19:44:39 -04001460 struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
Greg Rose5eae00c2013-12-21 06:12:45 +00001461 if (q_idx < napi_vectors)
1462 netif_napi_del(&q_vector->napi);
Greg Rose5eae00c2013-12-21 06:12:45 +00001463 }
Mitch Williams7d96ba12015-10-26 19:44:39 -04001464 kfree(adapter->q_vectors);
Jacob Kelleref4603e2016-11-08 13:05:08 -08001465 adapter->q_vectors = NULL;
Greg Rose5eae00c2013-12-21 06:12:45 +00001466}
1467
1468/**
1469 * i40evf_reset_interrupt_capability - Reset MSIX setup
1470 * @adapter: board private structure
1471 *
1472 **/
1473void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
1474{
Alan Brady47d2a5d2016-11-08 13:05:05 -08001475 if (!adapter->msix_entries)
1476 return;
1477
Greg Rose5eae00c2013-12-21 06:12:45 +00001478 pci_disable_msix(adapter->pdev);
1479 kfree(adapter->msix_entries);
1480 adapter->msix_entries = NULL;
Greg Rose5eae00c2013-12-21 06:12:45 +00001481}
1482
1483/**
1484 * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
1485 * @adapter: board private structure to initialize
1486 *
1487 **/
1488int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
1489{
1490 int err;
1491
Jacob Keller62fe2a82016-07-27 12:02:33 -07001492 rtnl_lock();
Greg Rose5eae00c2013-12-21 06:12:45 +00001493 err = i40evf_set_interrupt_capability(adapter);
Jacob Keller62fe2a82016-07-27 12:02:33 -07001494 rtnl_unlock();
Greg Rose5eae00c2013-12-21 06:12:45 +00001495 if (err) {
1496 dev_err(&adapter->pdev->dev,
1497 "Unable to setup interrupt capabilities\n");
1498 goto err_set_interrupt;
1499 }
1500
1501 err = i40evf_alloc_q_vectors(adapter);
1502 if (err) {
1503 dev_err(&adapter->pdev->dev,
1504 "Unable to allocate memory for queue vectors\n");
1505 goto err_alloc_q_vectors;
1506 }
1507
1508 err = i40evf_alloc_queues(adapter);
1509 if (err) {
1510 dev_err(&adapter->pdev->dev,
1511 "Unable to allocate memory for queues\n");
1512 goto err_alloc_queues;
1513 }
1514
1515 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
Mitch Williams75a64432014-11-11 20:02:42 +00001516 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1517 adapter->num_active_queues);
Greg Rose5eae00c2013-12-21 06:12:45 +00001518
1519 return 0;
1520err_alloc_queues:
1521 i40evf_free_q_vectors(adapter);
1522err_alloc_q_vectors:
1523 i40evf_reset_interrupt_capability(adapter);
1524err_set_interrupt:
1525 return err;
1526}
1527
1528/**
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001529 * i40evf_free_rss - Free memory used by RSS structs
1530 * @adapter: board private structure
Helin Zhang66f9af852015-10-26 19:44:34 -04001531 **/
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001532static void i40evf_free_rss(struct i40evf_adapter *adapter)
Helin Zhang66f9af852015-10-26 19:44:34 -04001533{
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001534 kfree(adapter->rss_key);
1535 adapter->rss_key = NULL;
Helin Zhang66f9af852015-10-26 19:44:34 -04001536
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001537 kfree(adapter->rss_lut);
1538 adapter->rss_lut = NULL;
Helin Zhang66f9af852015-10-26 19:44:34 -04001539}
1540
1541/**
Greg Rose5eae00c2013-12-21 06:12:45 +00001542 * i40evf_watchdog_timer - Periodic call-back timer
1543 * @data: pointer to adapter disguised as unsigned long
1544 **/
1545static void i40evf_watchdog_timer(unsigned long data)
1546{
1547 struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
Mitch Williams75a64432014-11-11 20:02:42 +00001548
Greg Rose5eae00c2013-12-21 06:12:45 +00001549 schedule_work(&adapter->watchdog_task);
1550 /* timer will be rescheduled in watchdog task */
1551}
1552
1553/**
1554 * i40evf_watchdog_task - Periodic call-back task
1555 * @work: pointer to work_struct
1556 **/
1557static void i40evf_watchdog_task(struct work_struct *work)
1558{
1559 struct i40evf_adapter *adapter = container_of(work,
Mitch Williams75a64432014-11-11 20:02:42 +00001560 struct i40evf_adapter,
1561 watchdog_task);
Greg Rose5eae00c2013-12-21 06:12:45 +00001562 struct i40e_hw *hw = &adapter->hw;
Mitch Williamsee5c1e92015-08-28 17:55:53 -04001563 u32 reg_val;
Greg Rose5eae00c2013-12-21 06:12:45 +00001564
Greg Rose5eae00c2013-12-21 06:12:45 +00001565 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
Mitch Williamsef8693e2014-02-13 03:48:53 -08001566 goto restart_watchdog;
1567
1568 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
Mitch Williamsee5c1e92015-08-28 17:55:53 -04001569 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1570 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1571 if ((reg_val == I40E_VFR_VFACTIVE) ||
1572 (reg_val == I40E_VFR_COMPLETED)) {
Mitch Williamsef8693e2014-02-13 03:48:53 -08001573 /* A chance for redemption! */
1574 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1575 adapter->state = __I40EVF_STARTUP;
1576 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1577 schedule_delayed_work(&adapter->init_task, 10);
1578 clear_bit(__I40EVF_IN_CRITICAL_TASK,
1579 &adapter->crit_section);
1580 /* Don't reschedule the watchdog, since we've restarted
1581 * the init task. When init_task contacts the PF and
1582 * gets everything set up again, it'll restart the
1583 * watchdog for us. Down, boy. Sit. Stay. Woof.
1584 */
1585 return;
1586 }
Mitch Williamsef8693e2014-02-13 03:48:53 -08001587 adapter->aq_required = 0;
1588 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1589 goto watchdog_done;
1590 }
1591
1592 if ((adapter->state < __I40EVF_DOWN) ||
1593 (adapter->flags & I40EVF_FLAG_RESET_PENDING))
Greg Rose5eae00c2013-12-21 06:12:45 +00001594 goto watchdog_done;
1595
Mitch Williamsef8693e2014-02-13 03:48:53 -08001596 /* check for reset */
Mitch Williamsee5c1e92015-08-28 17:55:53 -04001597 reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
1598 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
Greg Rose5eae00c2013-12-21 06:12:45 +00001599 adapter->state = __I40EVF_RESETTING;
Mitch Williamsef8693e2014-02-13 03:48:53 -08001600 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
Mitch Williams249c8b82014-05-10 04:49:04 +00001601 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
Greg Rose5eae00c2013-12-21 06:12:45 +00001602 schedule_work(&adapter->reset_task);
Mitch Williamsef8693e2014-02-13 03:48:53 -08001603 adapter->aq_required = 0;
1604 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
Greg Rose5eae00c2013-12-21 06:12:45 +00001605 goto watchdog_done;
1606 }
1607
1608 /* Process admin queue tasks. After init, everything gets done
1609 * here so we don't race on the admin queue.
1610 */
Mitch Williamsed636962015-04-07 19:45:32 -04001611 if (adapter->current_op) {
Mitch A Williams0758e7cb2014-12-09 08:53:08 +00001612 if (!i40evf_asq_done(hw)) {
1613 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1614 i40evf_send_api_ver(adapter);
1615 }
Greg Rose5eae00c2013-12-21 06:12:45 +00001616 goto watchdog_done;
Mitch A Williams0758e7cb2014-12-09 08:53:08 +00001617 }
Mitch Williamse6d038d2015-06-04 16:23:58 -04001618 if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
1619 i40evf_send_vf_config_msg(adapter);
1620 goto watchdog_done;
1621 }
Greg Rose5eae00c2013-12-21 06:12:45 +00001622
Mitch Williamse284fc82015-03-27 00:12:09 -07001623 if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
1624 i40evf_disable_queues(adapter);
1625 goto watchdog_done;
1626 }
1627
Greg Rose5eae00c2013-12-21 06:12:45 +00001628 if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
1629 i40evf_map_queues(adapter);
1630 goto watchdog_done;
1631 }
1632
1633 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
1634 i40evf_add_ether_addrs(adapter);
1635 goto watchdog_done;
1636 }
1637
1638 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
1639 i40evf_add_vlans(adapter);
1640 goto watchdog_done;
1641 }
1642
1643 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
1644 i40evf_del_ether_addrs(adapter);
1645 goto watchdog_done;
1646 }
1647
1648 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
1649 i40evf_del_vlans(adapter);
1650 goto watchdog_done;
1651 }
1652
Greg Rose5eae00c2013-12-21 06:12:45 +00001653 if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
1654 i40evf_configure_queues(adapter);
1655 goto watchdog_done;
1656 }
1657
1658 if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
1659 i40evf_enable_queues(adapter);
1660 goto watchdog_done;
1661 }
1662
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001663 if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
1664 /* This message goes straight to the firmware, not the
1665 * PF, so we don't have to set current_op as we will
1666 * not get a response through the ARQ.
1667 */
Helin Zhang96a81982015-10-26 19:44:31 -04001668 i40evf_init_rss(adapter);
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001669 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
1670 goto watchdog_done;
1671 }
Mitch Williams43a3d9b2016-04-12 08:30:44 -07001672 if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
1673 i40evf_get_hena(adapter);
1674 goto watchdog_done;
1675 }
1676 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
1677 i40evf_set_hena(adapter);
1678 goto watchdog_done;
1679 }
1680 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
1681 i40evf_set_rss_key(adapter);
1682 goto watchdog_done;
1683 }
1684 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
1685 i40evf_set_rss_lut(adapter);
1686 goto watchdog_done;
1687 }
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001688
Anjali Singhai Jain47d34832016-04-12 08:30:52 -07001689 if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
1690 i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
1691 I40E_FLAG_VF_MULTICAST_PROMISC);
1692 goto watchdog_done;
1693 }
1694
Anjali Singhai Jainf42a5c72016-05-03 15:13:10 -07001695 if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
1696 i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
1697 goto watchdog_done;
1698 }
1699
1700 if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
1701 (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
Anjali Singhai Jain47d34832016-04-12 08:30:52 -07001702 i40evf_set_promiscuous(adapter, 0);
1703 goto watchdog_done;
1704 }
Mitch Williamsed0e8942017-01-24 10:23:59 -08001705 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
Anjali Singhai Jain47d34832016-04-12 08:30:52 -07001706
Greg Rose5eae00c2013-12-21 06:12:45 +00001707 if (adapter->state == __I40EVF_RUNNING)
1708 i40evf_request_stats(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +00001709watchdog_done:
Mitch A Williams4870e172014-12-09 08:53:06 +00001710 if (adapter->state == __I40EVF_RUNNING) {
1711 i40evf_irq_enable_queues(adapter, ~0);
1712 i40evf_fire_sw_int(adapter, 0xFF);
1713 } else {
1714 i40evf_fire_sw_int(adapter, 0x1);
1715 }
1716
Mitch Williamsef8693e2014-02-13 03:48:53 -08001717 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1718restart_watchdog:
Ashish Shahd3e2edb2014-08-01 13:27:12 -07001719 if (adapter->state == __I40EVF_REMOVE)
1720 return;
Greg Rose5eae00c2013-12-21 06:12:45 +00001721 if (adapter->aq_required)
1722 mod_timer(&adapter->watchdog_timer,
1723 jiffies + msecs_to_jiffies(20));
1724 else
1725 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
Greg Rose5eae00c2013-12-21 06:12:45 +00001726 schedule_work(&adapter->adminq_task);
1727}
1728
Joe Perchesdedecb62016-11-01 15:35:14 -07001729static void i40evf_disable_vf(struct i40evf_adapter *adapter)
1730{
1731 struct i40evf_mac_filter *f, *ftmp;
1732 struct i40evf_vlan_filter *fv, *fvtmp;
1733
1734 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1735
1736 if (netif_running(adapter->netdev)) {
1737 set_bit(__I40E_DOWN, &adapter->vsi.state);
1738 netif_carrier_off(adapter->netdev);
1739 netif_tx_disable(adapter->netdev);
1740 adapter->link_up = false;
1741 i40evf_napi_disable_all(adapter);
1742 i40evf_irq_disable(adapter);
1743 i40evf_free_traffic_irqs(adapter);
1744 i40evf_free_all_tx_resources(adapter);
1745 i40evf_free_all_rx_resources(adapter);
1746 }
1747
1748 /* Delete all of the filters, both MAC and VLAN. */
1749 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1750 list_del(&f->list);
1751 kfree(f);
1752 }
1753
1754 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1755 list_del(&fv->list);
1756 kfree(fv);
1757 }
1758
1759 i40evf_free_misc_irq(adapter);
1760 i40evf_reset_interrupt_capability(adapter);
1761 i40evf_free_queues(adapter);
1762 i40evf_free_q_vectors(adapter);
1763 kfree(adapter->vf_res);
1764 i40evf_shutdown_adminq(&adapter->hw);
1765 adapter->netdev->flags &= ~IFF_UP;
1766 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1767 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1768 adapter->state = __I40EVF_DOWN;
1769 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1770}
1771
Mitch Williams67c818a2015-06-19 08:56:30 -07001772#define I40EVF_RESET_WAIT_MS 10
1773#define I40EVF_RESET_WAIT_COUNT 500
Greg Rose5eae00c2013-12-21 06:12:45 +00001774/**
1775 * i40evf_reset_task - Call-back task to handle hardware reset
1776 * @work: pointer to work_struct
1777 *
1778 * During reset we need to shut down and reinitialize the admin queue
1779 * before we can use it to communicate with the PF again. We also clear
1780 * and reinit the rings because that context is lost as well.
1781 **/
1782static void i40evf_reset_task(struct work_struct *work)
1783{
Mitch Williamsef8693e2014-02-13 03:48:53 -08001784 struct i40evf_adapter *adapter = container_of(work,
1785 struct i40evf_adapter,
1786 reset_task);
Mitch Williamsac833bb2015-01-29 07:17:19 +00001787 struct net_device *netdev = adapter->netdev;
Greg Rose5eae00c2013-12-21 06:12:45 +00001788 struct i40e_hw *hw = &adapter->hw;
Mitch Williams40d01362015-10-01 14:37:37 -04001789 struct i40evf_vlan_filter *vlf;
Mitch Williamsac833bb2015-01-29 07:17:19 +00001790 struct i40evf_mac_filter *f;
Mitch Williamsee5c1e92015-08-28 17:55:53 -04001791 u32 reg_val;
Mitch Williamsac833bb2015-01-29 07:17:19 +00001792 int i = 0, err;
Greg Rose5eae00c2013-12-21 06:12:45 +00001793
Mitch Williamsed0e8942017-01-24 10:23:59 -08001794 while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
Greg Rose5eae00c2013-12-21 06:12:45 +00001795 &adapter->crit_section))
Neerav Parikhf98a2002014-09-13 07:40:44 +00001796 usleep_range(500, 1000);
Mitch Williamsed0e8942017-01-24 10:23:59 -08001797 if (CLIENT_ENABLED(adapter)) {
1798 adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
1799 I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
1800 I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1801 I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
1802 cancel_delayed_work_sync(&adapter->client_task);
1803 i40evf_notify_client_close(&adapter->vsi, true);
1804 }
Mitch Williams67c818a2015-06-19 08:56:30 -07001805 i40evf_misc_irq_disable(adapter);
Mitch Williams3526d802014-03-06 08:59:56 +00001806 if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
Mitch Williams67c818a2015-06-19 08:56:30 -07001807 adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
1808 /* Restart the AQ here. If we have been reset but didn't
1809 * detect it, or if the PF had to reinit, our AQ will be hosed.
1810 */
1811 i40evf_shutdown_adminq(hw);
1812 i40evf_init_adminq(hw);
Mitch Williams3526d802014-03-06 08:59:56 +00001813 i40evf_request_reset(adapter);
1814 }
Mitch Williams67c818a2015-06-19 08:56:30 -07001815 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
Mitch Williams3526d802014-03-06 08:59:56 +00001816
Mitch Williamsef8693e2014-02-13 03:48:53 -08001817 /* poll until we see the reset actually happen */
1818 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
Mitch Williamsee5c1e92015-08-28 17:55:53 -04001819 reg_val = rd32(hw, I40E_VF_ARQLEN1) &
1820 I40E_VF_ARQLEN1_ARQENABLE_MASK;
1821 if (!reg_val)
Greg Rose5eae00c2013-12-21 06:12:45 +00001822 break;
Mitch Williamsee5c1e92015-08-28 17:55:53 -04001823 usleep_range(5000, 10000);
Greg Rose5eae00c2013-12-21 06:12:45 +00001824 }
Mitch Williamsef8693e2014-02-13 03:48:53 -08001825 if (i == I40EVF_RESET_WAIT_COUNT) {
Mitch Williams67c818a2015-06-19 08:56:30 -07001826 dev_info(&adapter->pdev->dev, "Never saw reset\n");
Mitch Williamsef8693e2014-02-13 03:48:53 -08001827 goto continue_reset; /* act like the reset happened */
1828 }
1829
1830 /* wait until the reset is complete and the PF is responding to us */
1831 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
Jacob Keller7d3f04a2016-10-05 09:30:45 -07001832 /* sleep first to make sure a minimum wait time is met */
1833 msleep(I40EVF_RESET_WAIT_MS);
1834
Mitch Williamsee5c1e92015-08-28 17:55:53 -04001835 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1836 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1837 if (reg_val == I40E_VFR_VFACTIVE)
Mitch Williamsef8693e2014-02-13 03:48:53 -08001838 break;
Mitch Williamsef8693e2014-02-13 03:48:53 -08001839 }
Jacob Keller7d3f04a2016-10-05 09:30:45 -07001840
Mitch Williams509a4472015-12-23 12:05:52 -08001841 pci_set_master(adapter->pdev);
Jacob Keller7d3f04a2016-10-05 09:30:45 -07001842
Mitch Williamsef8693e2014-02-13 03:48:53 -08001843 if (i == I40EVF_RESET_WAIT_COUNT) {
Mitch Williams80e72892014-05-10 04:49:06 +00001844 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
Mitch Williamsee5c1e92015-08-28 17:55:53 -04001845 reg_val);
Joe Perchesdedecb62016-11-01 15:35:14 -07001846 i40evf_disable_vf(adapter);
Mitch Williamsed0e8942017-01-24 10:23:59 -08001847 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
Mitch Williamsef8693e2014-02-13 03:48:53 -08001848 return; /* Do not attempt to reinit. It's dead, Jim. */
Greg Rose5eae00c2013-12-21 06:12:45 +00001849 }
Mitch Williamsef8693e2014-02-13 03:48:53 -08001850
1851continue_reset:
Mitch Williams67c818a2015-06-19 08:56:30 -07001852 if (netif_running(adapter->netdev)) {
1853 netif_carrier_off(netdev);
1854 netif_tx_stop_all_queues(netdev);
Sridhar Samudrala3f341ac2016-09-01 22:27:27 +02001855 adapter->link_up = false;
Mitch Williams67c818a2015-06-19 08:56:30 -07001856 i40evf_napi_disable_all(adapter);
1857 }
Mitch Williamsac833bb2015-01-29 07:17:19 +00001858 i40evf_irq_disable(adapter);
Mitch Williamsac833bb2015-01-29 07:17:19 +00001859
Greg Rose5eae00c2013-12-21 06:12:45 +00001860 adapter->state = __I40EVF_RESETTING;
Mitch Williams67c818a2015-06-19 08:56:30 -07001861 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1862
1863 /* free the Tx/Rx rings and descriptors, might be better to just
1864 * re-use them sometime in the future
1865 */
1866 i40evf_free_all_rx_resources(adapter);
1867 i40evf_free_all_tx_resources(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +00001868
1869 /* kill and reinit the admin queue */
Lihong Yang903e6832016-09-06 18:05:05 -07001870 i40evf_shutdown_adminq(hw);
Mitch Williamsac833bb2015-01-29 07:17:19 +00001871 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
Greg Rose5eae00c2013-12-21 06:12:45 +00001872 err = i40evf_init_adminq(hw);
1873 if (err)
Mitch Williamsac833bb2015-01-29 07:17:19 +00001874 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1875 err);
Greg Rose5eae00c2013-12-21 06:12:45 +00001876
Mitch Williamse6d038d2015-06-04 16:23:58 -04001877 adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
1878 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
Mitch Williamsac833bb2015-01-29 07:17:19 +00001879
1880 /* re-add all MAC filters */
1881 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1882 f->add = true;
1883 }
1884 /* re-add all VLAN filters */
Mitch Williams40d01362015-10-01 14:37:37 -04001885 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1886 vlf->add = true;
Mitch Williamsac833bb2015-01-29 07:17:19 +00001887 }
Mitch Williamse6d038d2015-06-04 16:23:58 -04001888 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
Mitch Williamsac833bb2015-01-29 07:17:19 +00001889 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
Greg Rose5eae00c2013-12-21 06:12:45 +00001890 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
Mitch Williamsed0e8942017-01-24 10:23:59 -08001891 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
Mitch Williams67c818a2015-06-19 08:56:30 -07001892 i40evf_misc_irq_enable(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +00001893
1894 mod_timer(&adapter->watchdog_timer, jiffies + 2);
1895
1896 if (netif_running(adapter->netdev)) {
1897 /* allocate transmit descriptors */
1898 err = i40evf_setup_all_tx_resources(adapter);
1899 if (err)
1900 goto reset_err;
1901
1902 /* allocate receive descriptors */
1903 err = i40evf_setup_all_rx_resources(adapter);
1904 if (err)
1905 goto reset_err;
1906
1907 i40evf_configure(adapter);
1908
Bimmy Pujaricb130a02016-09-06 18:05:03 -07001909 i40evf_up_complete(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +00001910
1911 i40evf_irq_enable(adapter, true);
Mitch Williams67c818a2015-06-19 08:56:30 -07001912 } else {
1913 adapter->state = __I40EVF_DOWN;
Greg Rose5eae00c2013-12-21 06:12:45 +00001914 }
Mitch Williams67c818a2015-06-19 08:56:30 -07001915
Greg Rose5eae00c2013-12-21 06:12:45 +00001916 return;
1917reset_err:
Mitch Williams80e72892014-05-10 04:49:06 +00001918 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
Greg Rose5eae00c2013-12-21 06:12:45 +00001919 i40evf_close(adapter->netdev);
1920}
1921
1922/**
1923 * i40evf_adminq_task - worker thread to clean the admin queue
1924 * @work: pointer to work_struct containing our data
1925 **/
1926static void i40evf_adminq_task(struct work_struct *work)
1927{
1928 struct i40evf_adapter *adapter =
1929 container_of(work, struct i40evf_adapter, adminq_task);
1930 struct i40e_hw *hw = &adapter->hw;
1931 struct i40e_arq_event_info event;
1932 struct i40e_virtchnl_msg *v_msg;
1933 i40e_status ret;
Mitch Williams912257e2014-05-22 06:32:07 +00001934 u32 val, oldval;
Greg Rose5eae00c2013-12-21 06:12:45 +00001935 u16 pending;
1936
Mitch Williamsef8693e2014-02-13 03:48:53 -08001937 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
Mitch A Williams72354482014-12-09 08:53:07 +00001938 goto out;
Mitch Williamsef8693e2014-02-13 03:48:53 -08001939
Mitch Williams1001dc32014-11-11 20:02:19 +00001940 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
1941 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
Mitch Williams249c8b82014-05-10 04:49:04 +00001942 if (!event.msg_buf)
Mitch A Williams72354482014-12-09 08:53:07 +00001943 goto out;
Mitch Williams249c8b82014-05-10 04:49:04 +00001944
Greg Rose5eae00c2013-12-21 06:12:45 +00001945 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
1946 do {
1947 ret = i40evf_clean_arq_element(hw, &event, &pending);
Mitch Williams8b011eb2015-01-09 11:18:17 +00001948 if (ret || !v_msg->v_opcode)
Greg Rose5eae00c2013-12-21 06:12:45 +00001949 break; /* No event to process or error cleaning ARQ */
1950
1951 i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
1952 v_msg->v_retval, event.msg_buf,
Mitch Williams1001dc32014-11-11 20:02:19 +00001953 event.msg_len);
Mitch Williams75a64432014-11-11 20:02:42 +00001954 if (pending != 0)
Greg Rose5eae00c2013-12-21 06:12:45 +00001955 memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
Greg Rose5eae00c2013-12-21 06:12:45 +00001956 } while (pending);
1957
Mitch Williams67c818a2015-06-19 08:56:30 -07001958 if ((adapter->flags &
1959 (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
1960 adapter->state == __I40EVF_RESETTING)
1961 goto freedom;
1962
Mitch Williams912257e2014-05-22 06:32:07 +00001963 /* check for error indications */
1964 val = rd32(hw, hw->aq.arq.len);
Mitch Williams19b73d82016-03-10 14:59:49 -08001965 if (val == 0xdeadbeef) /* indicates device in reset */
1966 goto freedom;
Mitch Williams912257e2014-05-22 06:32:07 +00001967 oldval = val;
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001968 if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
Mitch Williams912257e2014-05-22 06:32:07 +00001969 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001970 val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
Mitch Williams912257e2014-05-22 06:32:07 +00001971 }
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001972 if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
Mitch Williams912257e2014-05-22 06:32:07 +00001973 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001974 val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
Mitch Williams912257e2014-05-22 06:32:07 +00001975 }
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001976 if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
Mitch Williams912257e2014-05-22 06:32:07 +00001977 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001978 val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
Mitch Williams912257e2014-05-22 06:32:07 +00001979 }
1980 if (oldval != val)
1981 wr32(hw, hw->aq.arq.len, val);
1982
1983 val = rd32(hw, hw->aq.asq.len);
1984 oldval = val;
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001985 if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
Mitch Williams912257e2014-05-22 06:32:07 +00001986 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001987 val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
Mitch Williams912257e2014-05-22 06:32:07 +00001988 }
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001989 if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
Mitch Williams912257e2014-05-22 06:32:07 +00001990 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001991 val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
Mitch Williams912257e2014-05-22 06:32:07 +00001992 }
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001993 if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
Mitch Williams912257e2014-05-22 06:32:07 +00001994 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
Anjali Singhai Jainb1f33662015-07-10 19:36:05 -04001995 val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
Mitch Williams912257e2014-05-22 06:32:07 +00001996 }
1997 if (oldval != val)
1998 wr32(hw, hw->aq.asq.len, val);
1999
Mitch Williams67c818a2015-06-19 08:56:30 -07002000freedom:
Mitch A Williams72354482014-12-09 08:53:07 +00002001 kfree(event.msg_buf);
2002out:
Greg Rose5eae00c2013-12-21 06:12:45 +00002003 /* re-enable Admin queue interrupt cause */
2004 i40evf_misc_irq_enable(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +00002005}
2006
2007/**
Mitch Williamsed0e8942017-01-24 10:23:59 -08002008 * i40evf_client_task - worker thread to perform client work
2009 * @work: pointer to work_struct containing our data
2010 *
2011 * This task handles client interactions. Because client calls can be
2012 * reentrant, we can't handle them in the watchdog.
2013 **/
2014static void i40evf_client_task(struct work_struct *work)
2015{
2016 struct i40evf_adapter *adapter =
2017 container_of(work, struct i40evf_adapter, client_task.work);
2018
2019 /* If we can't get the client bit, just give up. We'll be rescheduled
2020 * later.
2021 */
2022
2023 if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
2024 return;
2025
2026 if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2027 i40evf_client_subtask(adapter);
2028 adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2029 goto out;
2030 }
2031 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
2032 i40evf_notify_client_close(&adapter->vsi, false);
2033 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2034 goto out;
2035 }
2036 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
2037 i40evf_notify_client_open(&adapter->vsi);
2038 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
2039 goto out;
2040 }
2041 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2042 i40evf_notify_client_l2_params(&adapter->vsi);
2043 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2044 }
2045out:
2046 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2047}
2048
2049/**
Greg Rose5eae00c2013-12-21 06:12:45 +00002050 * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
2051 * @adapter: board private structure
2052 *
2053 * Free all transmit software resources
2054 **/
Mitch Williamse284fc82015-03-27 00:12:09 -07002055void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
Greg Rose5eae00c2013-12-21 06:12:45 +00002056{
2057 int i;
2058
Mitch Williamsfdb47ae2015-11-19 11:34:18 -08002059 if (!adapter->tx_rings)
2060 return;
2061
Mitch Williamscc052922014-10-25 03:24:34 +00002062 for (i = 0; i < adapter->num_active_queues; i++)
Mitch Williams0dd438d2015-10-26 19:44:40 -04002063 if (adapter->tx_rings[i].desc)
2064 i40evf_free_tx_resources(&adapter->tx_rings[i]);
Greg Rose5eae00c2013-12-21 06:12:45 +00002065}
2066
2067/**
2068 * i40evf_setup_all_tx_resources - allocate all queues Tx resources
2069 * @adapter: board private structure
2070 *
2071 * If this function returns with an error, then it's possible one or
2072 * more of the rings is populated (while the rest are not). It is the
2073 * callers duty to clean those orphaned rings.
2074 *
2075 * Return 0 on success, negative on failure
2076 **/
2077static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
2078{
2079 int i, err = 0;
2080
Mitch Williamscc052922014-10-25 03:24:34 +00002081 for (i = 0; i < adapter->num_active_queues; i++) {
Mitch Williams0dd438d2015-10-26 19:44:40 -04002082 adapter->tx_rings[i].count = adapter->tx_desc_count;
2083 err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
Greg Rose5eae00c2013-12-21 06:12:45 +00002084 if (!err)
2085 continue;
2086 dev_err(&adapter->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04002087 "Allocation for Tx Queue %u failed\n", i);
Greg Rose5eae00c2013-12-21 06:12:45 +00002088 break;
2089 }
2090
2091 return err;
2092}
2093
2094/**
2095 * i40evf_setup_all_rx_resources - allocate all queues Rx resources
2096 * @adapter: board private structure
2097 *
2098 * If this function returns with an error, then it's possible one or
2099 * more of the rings is populated (while the rest are not). It is the
2100 * callers duty to clean those orphaned rings.
2101 *
2102 * Return 0 on success, negative on failure
2103 **/
2104static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
2105{
2106 int i, err = 0;
2107
Mitch Williamscc052922014-10-25 03:24:34 +00002108 for (i = 0; i < adapter->num_active_queues; i++) {
Mitch Williams0dd438d2015-10-26 19:44:40 -04002109 adapter->rx_rings[i].count = adapter->rx_desc_count;
2110 err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
Greg Rose5eae00c2013-12-21 06:12:45 +00002111 if (!err)
2112 continue;
2113 dev_err(&adapter->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04002114 "Allocation for Rx Queue %u failed\n", i);
Greg Rose5eae00c2013-12-21 06:12:45 +00002115 break;
2116 }
2117 return err;
2118}
2119
2120/**
2121 * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
2122 * @adapter: board private structure
2123 *
2124 * Free all receive software resources
2125 **/
Mitch Williamse284fc82015-03-27 00:12:09 -07002126void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
Greg Rose5eae00c2013-12-21 06:12:45 +00002127{
2128 int i;
2129
Mitch Williamsfdb47ae2015-11-19 11:34:18 -08002130 if (!adapter->rx_rings)
2131 return;
2132
Mitch Williamscc052922014-10-25 03:24:34 +00002133 for (i = 0; i < adapter->num_active_queues; i++)
Mitch Williams0dd438d2015-10-26 19:44:40 -04002134 if (adapter->rx_rings[i].desc)
2135 i40evf_free_rx_resources(&adapter->rx_rings[i]);
Greg Rose5eae00c2013-12-21 06:12:45 +00002136}
2137
2138/**
2139 * i40evf_open - Called when a network interface is made active
2140 * @netdev: network interface device structure
2141 *
2142 * Returns 0 on success, negative value on failure
2143 *
2144 * The open entry point is called when a network interface is made
2145 * active by the system (IFF_UP). At this point all resources needed
2146 * for transmit and receive operations are allocated, the interrupt
2147 * handler is registered with the OS, the watchdog timer is started,
2148 * and the stack is notified that the interface is ready.
2149 **/
2150static int i40evf_open(struct net_device *netdev)
2151{
2152 struct i40evf_adapter *adapter = netdev_priv(netdev);
2153 int err;
2154
Mitch Williamsef8693e2014-02-13 03:48:53 -08002155 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
2156 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2157 return -EIO;
2158 }
Mitch Williams209dc4d2015-12-09 15:50:27 -08002159
2160 if (adapter->state != __I40EVF_DOWN)
Greg Rose5eae00c2013-12-21 06:12:45 +00002161 return -EBUSY;
2162
2163 /* allocate transmit descriptors */
2164 err = i40evf_setup_all_tx_resources(adapter);
2165 if (err)
2166 goto err_setup_tx;
2167
2168 /* allocate receive descriptors */
2169 err = i40evf_setup_all_rx_resources(adapter);
2170 if (err)
2171 goto err_setup_rx;
2172
2173 /* clear any pending interrupts, may auto mask */
2174 err = i40evf_request_traffic_irqs(adapter, netdev->name);
2175 if (err)
2176 goto err_req_irq;
2177
Mitch Williams44151cd2015-04-27 14:57:17 -04002178 i40evf_add_filter(adapter, adapter->hw.mac.addr);
Greg Rose5eae00c2013-12-21 06:12:45 +00002179 i40evf_configure(adapter);
2180
Bimmy Pujaricb130a02016-09-06 18:05:03 -07002181 i40evf_up_complete(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +00002182
2183 i40evf_irq_enable(adapter, true);
2184
2185 return 0;
2186
2187err_req_irq:
2188 i40evf_down(adapter);
2189 i40evf_free_traffic_irqs(adapter);
2190err_setup_rx:
2191 i40evf_free_all_rx_resources(adapter);
2192err_setup_tx:
2193 i40evf_free_all_tx_resources(adapter);
2194
2195 return err;
2196}
2197
2198/**
2199 * i40evf_close - Disables a network interface
2200 * @netdev: network interface device structure
2201 *
2202 * Returns 0, this is not allowed to fail
2203 *
2204 * The close entry point is called when an interface is de-activated
2205 * by the OS. The hardware is still under the drivers control, but
2206 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
2207 * are freed, along with all transmit and receive resources.
2208 **/
2209static int i40evf_close(struct net_device *netdev)
2210{
2211 struct i40evf_adapter *adapter = netdev_priv(netdev);
2212
Mitch Williams209dc4d2015-12-09 15:50:27 -08002213 if (adapter->state <= __I40EVF_DOWN_PENDING)
Mitch Williamsef8693e2014-02-13 03:48:53 -08002214 return 0;
2215
Mitch Williamsef8693e2014-02-13 03:48:53 -08002216
Greg Rose5eae00c2013-12-21 06:12:45 +00002217 set_bit(__I40E_DOWN, &adapter->vsi.state);
Mitch Williamsed0e8942017-01-24 10:23:59 -08002218 if (CLIENT_ENABLED(adapter))
2219 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
Greg Rose5eae00c2013-12-21 06:12:45 +00002220
2221 i40evf_down(adapter);
Mitch Williams209dc4d2015-12-09 15:50:27 -08002222 adapter->state = __I40EVF_DOWN_PENDING;
Greg Rose5eae00c2013-12-21 06:12:45 +00002223 i40evf_free_traffic_irqs(adapter);
2224
Mitch Williams51f38262016-12-12 15:44:11 -08002225 /* We explicitly don't free resources here because the hardware is
2226 * still active and can DMA into memory. Resources are cleared in
2227 * i40evf_virtchnl_completion() after we get confirmation from the PF
2228 * driver that the rings have been stopped.
2229 */
Greg Rose5eae00c2013-12-21 06:12:45 +00002230 return 0;
2231}
2232
2233/**
2234 * i40evf_get_stats - Get System Network Statistics
2235 * @netdev: network interface device structure
2236 *
2237 * Returns the address of the device statistics structure.
2238 * The statistics are actually updated from the timer callback.
2239 **/
2240static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
2241{
2242 struct i40evf_adapter *adapter = netdev_priv(netdev);
2243
2244 /* only return the current stats */
2245 return &adapter->net_stats;
2246}
2247
2248/**
Greg Rose5eae00c2013-12-21 06:12:45 +00002249 * i40evf_change_mtu - Change the Maximum Transfer Unit
2250 * @netdev: network interface device structure
2251 * @new_mtu: new value for maximum frame size
2252 *
2253 * Returns 0 on success, negative on failure
2254 **/
2255static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
2256{
2257 struct i40evf_adapter *adapter = netdev_priv(netdev);
Greg Rose5eae00c2013-12-21 06:12:45 +00002258
Greg Rose5eae00c2013-12-21 06:12:45 +00002259 netdev->mtu = new_mtu;
Mitch Williamsed0e8942017-01-24 10:23:59 -08002260 if (CLIENT_ENABLED(adapter)) {
2261 i40evf_notify_client_l2_params(&adapter->vsi);
2262 adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2263 }
Mitch Williams67c818a2015-06-19 08:56:30 -07002264 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
2265 schedule_work(&adapter->reset_task);
2266
Greg Rose5eae00c2013-12-21 06:12:45 +00002267 return 0;
2268}
2269
Alexander Duyck06fc0162016-10-25 16:08:47 -07002270/**
2271 * i40evf_features_check - Validate encapsulated packet conforms to limits
2272 * @skb: skb buff
2273 * @netdev: This physical port's netdev
2274 * @features: Offload features that the stack believes apply
2275 **/
2276static netdev_features_t i40evf_features_check(struct sk_buff *skb,
2277 struct net_device *dev,
2278 netdev_features_t features)
2279{
2280 size_t len;
2281
2282 /* No point in doing any of this if neither checksum nor GSO are
2283 * being requested for this frame. We can rule out both by just
2284 * checking for CHECKSUM_PARTIAL
2285 */
2286 if (skb->ip_summed != CHECKSUM_PARTIAL)
2287 return features;
2288
2289 /* We cannot support GSO if the MSS is going to be less than
2290 * 64 bytes. If it is then we need to drop support for GSO.
2291 */
2292 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
2293 features &= ~NETIF_F_GSO_MASK;
2294
2295 /* MACLEN can support at most 63 words */
2296 len = skb_network_header(skb) - skb->data;
2297 if (len & ~(63 * 2))
2298 goto out_err;
2299
2300 /* IPLEN and EIPLEN can support at most 127 dwords */
2301 len = skb_transport_header(skb) - skb_network_header(skb);
2302 if (len & ~(127 * 4))
2303 goto out_err;
2304
2305 if (skb->encapsulation) {
2306 /* L4TUNLEN can support 127 words */
2307 len = skb_inner_network_header(skb) - skb_transport_header(skb);
2308 if (len & ~(127 * 2))
2309 goto out_err;
2310
2311 /* IPLEN can support at most 127 dwords */
2312 len = skb_inner_transport_header(skb) -
2313 skb_inner_network_header(skb);
2314 if (len & ~(127 * 4))
2315 goto out_err;
2316 }
2317
2318 /* No need to validate L4LEN as TCP is the only protocol with a
2319 * a flexible value and we support all possible values supported
2320 * by TCP, which is at most 15 dwords
2321 */
2322
2323 return features;
2324out_err:
2325 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2326}
2327
Mitch Williamsc4445ae2016-03-18 12:18:07 -07002328#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
2329 NETIF_F_HW_VLAN_CTAG_RX |\
2330 NETIF_F_HW_VLAN_CTAG_FILTER)
2331
2332/**
2333 * i40evf_fix_features - fix up the netdev feature bits
2334 * @netdev: our net device
2335 * @features: desired feature bits
2336 *
2337 * Returns fixed-up features bits
2338 **/
2339static netdev_features_t i40evf_fix_features(struct net_device *netdev,
2340 netdev_features_t features)
2341{
2342 struct i40evf_adapter *adapter = netdev_priv(netdev);
2343
2344 features &= ~I40EVF_VLAN_FEATURES;
2345 if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
2346 features |= I40EVF_VLAN_FEATURES;
2347 return features;
2348}
2349
Greg Rose5eae00c2013-12-21 06:12:45 +00002350static const struct net_device_ops i40evf_netdev_ops = {
2351 .ndo_open = i40evf_open,
2352 .ndo_stop = i40evf_close,
2353 .ndo_start_xmit = i40evf_xmit_frame,
2354 .ndo_get_stats = i40evf_get_stats,
2355 .ndo_set_rx_mode = i40evf_set_rx_mode,
2356 .ndo_validate_addr = eth_validate_addr,
2357 .ndo_set_mac_address = i40evf_set_mac,
2358 .ndo_change_mtu = i40evf_change_mtu,
2359 .ndo_tx_timeout = i40evf_tx_timeout,
2360 .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
2361 .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
Alexander Duyck06fc0162016-10-25 16:08:47 -07002362 .ndo_features_check = i40evf_features_check,
Mitch Williamsc4445ae2016-03-18 12:18:07 -07002363 .ndo_fix_features = i40evf_fix_features,
Alexander Duyck7709b4c2015-09-24 09:04:38 -07002364#ifdef CONFIG_NET_POLL_CONTROLLER
2365 .ndo_poll_controller = i40evf_netpoll,
2366#endif
Greg Rose5eae00c2013-12-21 06:12:45 +00002367};
2368
2369/**
2370 * i40evf_check_reset_complete - check that VF reset is complete
2371 * @hw: pointer to hw struct
2372 *
2373 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
2374 **/
2375static int i40evf_check_reset_complete(struct i40e_hw *hw)
2376{
2377 u32 rstat;
2378 int i;
2379
2380 for (i = 0; i < 100; i++) {
Ashish Shahfd358862014-08-01 13:27:11 -07002381 rstat = rd32(hw, I40E_VFGEN_RSTAT) &
2382 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2383 if ((rstat == I40E_VFR_VFACTIVE) ||
2384 (rstat == I40E_VFR_COMPLETED))
Greg Rose5eae00c2013-12-21 06:12:45 +00002385 return 0;
Neerav Parikhf98a2002014-09-13 07:40:44 +00002386 usleep_range(10, 20);
Greg Rose5eae00c2013-12-21 06:12:45 +00002387 }
2388 return -EBUSY;
2389}
2390
2391/**
Mitch Williamse6d038d2015-06-04 16:23:58 -04002392 * i40evf_process_config - Process the config information we got from the PF
2393 * @adapter: board private structure
2394 *
2395 * Verify that we have a valid config struct, and set up our netdev features
2396 * and our VSI struct.
2397 **/
2398int i40evf_process_config(struct i40evf_adapter *adapter)
2399{
Mitch Williamsba6cc7f2016-04-01 13:34:31 -07002400 struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
Mitch Williamse6d038d2015-06-04 16:23:58 -04002401 struct net_device *netdev = adapter->netdev;
Mitch Williams43a3d9b2016-04-12 08:30:44 -07002402 struct i40e_vsi *vsi = &adapter->vsi;
Mitch Williamse6d038d2015-06-04 16:23:58 -04002403 int i;
Preethi Banalabacd75c2017-03-27 14:43:18 -07002404 netdev_features_t hw_enc_features;
2405 netdev_features_t hw_features;
Mitch Williamse6d038d2015-06-04 16:23:58 -04002406
2407 /* got VF config message back from PF, now we can parse it */
Mitch Williamsba6cc7f2016-04-01 13:34:31 -07002408 for (i = 0; i < vfres->num_vsis; i++) {
2409 if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
2410 adapter->vsi_res = &vfres->vsi_res[i];
Mitch Williamse6d038d2015-06-04 16:23:58 -04002411 }
2412 if (!adapter->vsi_res) {
2413 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2414 return -ENODEV;
2415 }
2416
Preethi Banalabacd75c2017-03-27 14:43:18 -07002417 hw_enc_features = NETIF_F_SG |
2418 NETIF_F_IP_CSUM |
2419 NETIF_F_IPV6_CSUM |
2420 NETIF_F_HIGHDMA |
2421 NETIF_F_SOFT_FEATURES |
2422 NETIF_F_TSO |
2423 NETIF_F_TSO_ECN |
2424 NETIF_F_TSO6 |
2425 NETIF_F_SCTP_CRC |
2426 NETIF_F_RXHASH |
2427 NETIF_F_RXCSUM |
2428 0;
2429
2430 /* advertise to stack only if offloads for encapsulated packets is
2431 * supported
2432 */
2433 if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) {
2434 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
Alexander Duyckb0fe3302016-04-02 00:05:14 -07002435 NETIF_F_GSO_GRE |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002436 NETIF_F_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07002437 NETIF_F_GSO_IPXIP4 |
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07002438 NETIF_F_GSO_IPXIP6 |
Alexander Duyckb0fe3302016-04-02 00:05:14 -07002439 NETIF_F_GSO_UDP_TUNNEL_CSUM |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002440 NETIF_F_GSO_PARTIAL |
Alexander Duyckb0fe3302016-04-02 00:05:14 -07002441 0;
Mitch Williamse6d038d2015-06-04 16:23:58 -04002442
Preethi Banalabacd75c2017-03-27 14:43:18 -07002443 if (!(vfres->vf_offload_flags &
2444 I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2445 netdev->gso_partial_features |=
2446 NETIF_F_GSO_UDP_TUNNEL_CSUM;
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002447
Preethi Banalabacd75c2017-03-27 14:43:18 -07002448 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
2449 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
2450 netdev->hw_enc_features |= hw_enc_features;
2451 }
Alexander Duyckb0fe3302016-04-02 00:05:14 -07002452 /* record features VLANs can make use of */
Preethi Banalabacd75c2017-03-27 14:43:18 -07002453 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
Alexander Duyckf608e6a2016-01-24 21:17:57 -08002454
Alexander Duyckb0fe3302016-04-02 00:05:14 -07002455 /* Write features and hw_features separately to avoid polluting
Preethi Banalabacd75c2017-03-27 14:43:18 -07002456 * with, or dropping, features that are set when we registered.
Alexander Duyckb0fe3302016-04-02 00:05:14 -07002457 */
Preethi Banalabacd75c2017-03-27 14:43:18 -07002458 hw_features = hw_enc_features;
Mitch Williamsba6cc7f2016-04-01 13:34:31 -07002459
Preethi Banalabacd75c2017-03-27 14:43:18 -07002460 netdev->hw_features |= hw_features;
Alexander Duyckb0fe3302016-04-02 00:05:14 -07002461
Preethi Banalabacd75c2017-03-27 14:43:18 -07002462 netdev->features |= hw_features | I40EVF_VLAN_FEATURES;
Mitch Williamse6d038d2015-06-04 16:23:58 -04002463
2464 adapter->vsi.id = adapter->vsi_res->vsi_id;
2465
2466 adapter->vsi.back = adapter;
2467 adapter->vsi.base_vector = 1;
2468 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
Mitch Williams43a3d9b2016-04-12 08:30:44 -07002469 vsi->netdev = adapter->netdev;
2470 vsi->qs_handle = adapter->vsi_res->qset_handle;
2471 if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2472 adapter->rss_key_size = vfres->rss_key_size;
2473 adapter->rss_lut_size = vfres->rss_lut_size;
2474 } else {
2475 adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
2476 adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
2477 }
2478
Mitch Williamse6d038d2015-06-04 16:23:58 -04002479 return 0;
2480}
2481
2482/**
Greg Rose5eae00c2013-12-21 06:12:45 +00002483 * i40evf_init_task - worker thread to perform delayed initialization
2484 * @work: pointer to work_struct containing our data
2485 *
2486 * This task completes the work that was begun in probe. Due to the nature
2487 * of VF-PF communications, we may need to wait tens of milliseconds to get
Joe Perchesdbedd442015-03-06 20:49:12 -08002488 * responses back from the PF. Rather than busy-wait in probe and bog down the
Greg Rose5eae00c2013-12-21 06:12:45 +00002489 * whole system, we'll do it in a task so we can sleep.
2490 * This task only runs during driver init. Once we've established
2491 * communications with the PF driver and set up our netdev, the watchdog
2492 * takes over.
2493 **/
2494static void i40evf_init_task(struct work_struct *work)
2495{
2496 struct i40evf_adapter *adapter = container_of(work,
2497 struct i40evf_adapter,
2498 init_task.work);
2499 struct net_device *netdev = adapter->netdev;
Greg Rose5eae00c2013-12-21 06:12:45 +00002500 struct i40e_hw *hw = &adapter->hw;
2501 struct pci_dev *pdev = adapter->pdev;
Mitch Williamse6d038d2015-06-04 16:23:58 -04002502 int err, bufsz;
Greg Rose5eae00c2013-12-21 06:12:45 +00002503
2504 switch (adapter->state) {
2505 case __I40EVF_STARTUP:
2506 /* driver loaded, probe complete */
Mitch Williamsef8693e2014-02-13 03:48:53 -08002507 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
2508 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
Greg Rose5eae00c2013-12-21 06:12:45 +00002509 err = i40e_set_mac_type(hw);
2510 if (err) {
Mitch Williamsc2a137c2014-02-20 19:29:09 -08002511 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
2512 err);
Mitch Williams2619ef42015-04-07 19:45:30 -04002513 goto err;
Greg Rose5eae00c2013-12-21 06:12:45 +00002514 }
2515 err = i40evf_check_reset_complete(hw);
2516 if (err) {
Mitch Williams0d9c7ea2014-04-23 04:50:00 +00002517 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
Mitch Williams75a64432014-11-11 20:02:42 +00002518 err);
Greg Rose5eae00c2013-12-21 06:12:45 +00002519 goto err;
2520 }
2521 hw->aq.num_arq_entries = I40EVF_AQ_LEN;
2522 hw->aq.num_asq_entries = I40EVF_AQ_LEN;
2523 hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
2524 hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
2525
2526 err = i40evf_init_adminq(hw);
2527 if (err) {
Mitch Williamsc2a137c2014-02-20 19:29:09 -08002528 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2529 err);
Greg Rose5eae00c2013-12-21 06:12:45 +00002530 goto err;
2531 }
2532 err = i40evf_send_api_ver(adapter);
2533 if (err) {
Mitch Williams10bdd672014-03-06 08:59:53 +00002534 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
Greg Rose5eae00c2013-12-21 06:12:45 +00002535 i40evf_shutdown_adminq(hw);
2536 goto err;
2537 }
2538 adapter->state = __I40EVF_INIT_VERSION_CHECK;
2539 goto restart;
Greg Rose5eae00c2013-12-21 06:12:45 +00002540 case __I40EVF_INIT_VERSION_CHECK:
Mitch Williams10bdd672014-03-06 08:59:53 +00002541 if (!i40evf_asq_done(hw)) {
Mitch Williams80e72892014-05-10 04:49:06 +00002542 dev_err(&pdev->dev, "Admin queue command never completed\n");
Mitch Williams906a6932014-11-13 03:06:12 +00002543 i40evf_shutdown_adminq(hw);
2544 adapter->state = __I40EVF_STARTUP;
Greg Rose5eae00c2013-12-21 06:12:45 +00002545 goto err;
Mitch Williams10bdd672014-03-06 08:59:53 +00002546 }
Greg Rose5eae00c2013-12-21 06:12:45 +00002547
2548 /* aq msg sent, awaiting reply */
2549 err = i40evf_verify_api_ver(adapter);
2550 if (err) {
Mitch A Williamsd4f82fd2014-12-09 08:53:03 +00002551 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
Mitch Williams56f99202014-06-04 04:22:41 +00002552 err = i40evf_send_api_ver(adapter);
Mitch Williamsee1693e2015-06-04 16:23:59 -04002553 else
2554 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2555 adapter->pf_version.major,
2556 adapter->pf_version.minor,
2557 I40E_VIRTCHNL_VERSION_MAJOR,
2558 I40E_VIRTCHNL_VERSION_MINOR);
Greg Rose5eae00c2013-12-21 06:12:45 +00002559 goto err;
2560 }
2561 err = i40evf_send_vf_config_msg(adapter);
2562 if (err) {
Mitch Williams3f2ab172014-06-04 04:22:40 +00002563 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
Greg Rose5eae00c2013-12-21 06:12:45 +00002564 err);
2565 goto err;
2566 }
2567 adapter->state = __I40EVF_INIT_GET_RESOURCES;
2568 goto restart;
Greg Rose5eae00c2013-12-21 06:12:45 +00002569 case __I40EVF_INIT_GET_RESOURCES:
2570 /* aq msg sent, awaiting reply */
2571 if (!adapter->vf_res) {
2572 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
2573 (I40E_MAX_VF_VSI *
2574 sizeof(struct i40e_virtchnl_vsi_resource));
2575 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
Mitch Williamsc2a137c2014-02-20 19:29:09 -08002576 if (!adapter->vf_res)
Greg Rose5eae00c2013-12-21 06:12:45 +00002577 goto err;
Greg Rose5eae00c2013-12-21 06:12:45 +00002578 }
2579 err = i40evf_get_vf_config(adapter);
Mitch Williams906a6932014-11-13 03:06:12 +00002580 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
Mitch Williams906a6932014-11-13 03:06:12 +00002581 err = i40evf_send_vf_config_msg(adapter);
2582 goto err;
Mitch Williamse7430722015-10-26 19:44:38 -04002583 } else if (err == I40E_ERR_PARAM) {
2584 /* We only get ERR_PARAM if the device is in a very bad
2585 * state or if we've been disabled for previous bad
2586 * behavior. Either way, we're done now.
2587 */
2588 i40evf_shutdown_adminq(hw);
2589 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2590 return;
Mitch Williams906a6932014-11-13 03:06:12 +00002591 }
Greg Rose5eae00c2013-12-21 06:12:45 +00002592 if (err) {
Mitch Williamsc2a137c2014-02-20 19:29:09 -08002593 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
2594 err);
Greg Rose5eae00c2013-12-21 06:12:45 +00002595 goto err_alloc;
2596 }
2597 adapter->state = __I40EVF_INIT_SW;
2598 break;
2599 default:
2600 goto err_alloc;
2601 }
Alexander Duyckf608e6a2016-01-24 21:17:57 -08002602
Mitch Williamse6d038d2015-06-04 16:23:58 -04002603 if (i40evf_process_config(adapter))
Greg Rose5eae00c2013-12-21 06:12:45 +00002604 goto err_alloc;
Mitch Williamse6d038d2015-06-04 16:23:58 -04002605 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
Greg Rose5eae00c2013-12-21 06:12:45 +00002606
2607 adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
2608
Greg Rose5eae00c2013-12-21 06:12:45 +00002609 netdev->netdev_ops = &i40evf_netdev_ops;
2610 i40evf_set_ethtool_ops(netdev);
2611 netdev->watchdog_timeo = 5 * HZ;
Greg Rose3415e8c2014-01-30 12:40:27 +00002612
Jarod Wilson91c527a2016-10-17 15:54:05 -04002613 /* MTU range: 68 - 9710 */
2614 netdev->min_mtu = ETH_MIN_MTU;
2615 netdev->max_mtu = I40E_MAX_RXBUFFER - (ETH_HLEN + ETH_FCS_LEN);
2616
Greg Rose5eae00c2013-12-21 06:12:45 +00002617 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
Mitch Williamsb34f90e2014-05-10 04:49:07 +00002618 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
Mitch Williamsc2a137c2014-02-20 19:29:09 -08002619 adapter->hw.mac.addr);
Mitch Williams14e52ee2015-08-31 19:54:44 -04002620 eth_hw_addr_random(netdev);
2621 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2622 } else {
2623 adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
2624 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2625 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
Greg Rose5eae00c2013-12-21 06:12:45 +00002626 }
Greg Rose5eae00c2013-12-21 06:12:45 +00002627
Greg Rose5eae00c2013-12-21 06:12:45 +00002628 init_timer(&adapter->watchdog_timer);
2629 adapter->watchdog_timer.function = &i40evf_watchdog_timer;
2630 adapter->watchdog_timer.data = (unsigned long)adapter;
2631 mod_timer(&adapter->watchdog_timer, jiffies + 1);
2632
Mitch Williamscc052922014-10-25 03:24:34 +00002633 adapter->num_active_queues = min_t(int,
2634 adapter->vsi_res->num_queue_pairs,
2635 (int)(num_online_cpus()));
Mitch Williamsd732a182014-04-24 06:41:37 +00002636 adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
2637 adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
Greg Rose5eae00c2013-12-21 06:12:45 +00002638 err = i40evf_init_interrupt_scheme(adapter);
2639 if (err)
2640 goto err_sw_init;
2641 i40evf_map_rings_to_vectors(adapter);
Anjali Singhai Jain1f012272015-09-03 17:18:53 -04002642 if (adapter->vf_res->vf_offload_flags &
Anjali Singhai Jainf6d83d12015-12-22 14:25:07 -08002643 I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2644 adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
2645
Greg Rose5eae00c2013-12-21 06:12:45 +00002646 err = i40evf_request_misc_irq(adapter);
2647 if (err)
2648 goto err_sw_init;
2649
2650 netif_carrier_off(netdev);
Sridhar Samudrala3f341ac2016-09-01 22:27:27 +02002651 adapter->link_up = false;
Greg Rose5eae00c2013-12-21 06:12:45 +00002652
Mitch Williamsef8693e2014-02-13 03:48:53 -08002653 if (!adapter->netdev_registered) {
2654 err = register_netdev(netdev);
2655 if (err)
2656 goto err_register;
2657 }
Greg Rose5eae00c2013-12-21 06:12:45 +00002658
2659 adapter->netdev_registered = true;
2660
2661 netif_tx_stop_all_queues(netdev);
Mitch Williamsed0e8942017-01-24 10:23:59 -08002662 if (CLIENT_ALLOWED(adapter)) {
2663 err = i40evf_lan_add_device(adapter);
2664 if (err)
2665 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2666 err);
2667 }
Greg Rose5eae00c2013-12-21 06:12:45 +00002668
Mitch Williamsb34f90e2014-05-10 04:49:07 +00002669 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
Greg Rose5eae00c2013-12-21 06:12:45 +00002670 if (netdev->features & NETIF_F_GRO)
2671 dev_info(&pdev->dev, "GRO is enabled\n");
2672
Greg Rose5eae00c2013-12-21 06:12:45 +00002673 adapter->state = __I40EVF_DOWN;
2674 set_bit(__I40E_DOWN, &adapter->vsi.state);
2675 i40evf_misc_irq_enable(adapter);
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04002676
Mitch Williams43a3d9b2016-04-12 08:30:44 -07002677 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2678 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2679 if (!adapter->rss_key || !adapter->rss_lut)
2680 goto err_mem;
2681
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04002682 if (RSS_AQ(adapter)) {
2683 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
2684 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
2685 } else {
Helin Zhang96a81982015-10-26 19:44:31 -04002686 i40evf_init_rss(adapter);
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04002687 }
Greg Rose5eae00c2013-12-21 06:12:45 +00002688 return;
2689restart:
Mitch Williams3f7e5c32015-09-28 14:12:42 -04002690 schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
Greg Rose5eae00c2013-12-21 06:12:45 +00002691 return;
Mitch Williams43a3d9b2016-04-12 08:30:44 -07002692err_mem:
2693 i40evf_free_rss(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +00002694err_register:
2695 i40evf_free_misc_irq(adapter);
2696err_sw_init:
2697 i40evf_reset_interrupt_capability(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +00002698err_alloc:
2699 kfree(adapter->vf_res);
2700 adapter->vf_res = NULL;
2701err:
2702 /* Things went into the weeds, so try again later */
2703 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
Mitch Williamsb9029e92015-09-28 14:16:50 -04002704 dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
Mitch Williamsef8693e2014-02-13 03:48:53 -08002705 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
Mitch Williamsb9029e92015-09-28 14:16:50 -04002706 i40evf_shutdown_adminq(hw);
2707 adapter->state = __I40EVF_STARTUP;
2708 schedule_delayed_work(&adapter->init_task, HZ * 5);
2709 return;
Greg Rose5eae00c2013-12-21 06:12:45 +00002710 }
Mitch Williams3f7e5c32015-09-28 14:12:42 -04002711 schedule_delayed_work(&adapter->init_task, HZ);
Greg Rose5eae00c2013-12-21 06:12:45 +00002712}
2713
2714/**
2715 * i40evf_shutdown - Shutdown the device in preparation for a reboot
2716 * @pdev: pci device structure
2717 **/
2718static void i40evf_shutdown(struct pci_dev *pdev)
2719{
2720 struct net_device *netdev = pci_get_drvdata(pdev);
Mitch Williams00293fd2015-01-09 11:18:18 +00002721 struct i40evf_adapter *adapter = netdev_priv(netdev);
Greg Rose5eae00c2013-12-21 06:12:45 +00002722
2723 netif_device_detach(netdev);
2724
2725 if (netif_running(netdev))
2726 i40evf_close(netdev);
2727
Mitch Williams00293fd2015-01-09 11:18:18 +00002728 /* Prevent the watchdog from running. */
2729 adapter->state = __I40EVF_REMOVE;
2730 adapter->aq_required = 0;
Mitch Williams00293fd2015-01-09 11:18:18 +00002731
Greg Rose5eae00c2013-12-21 06:12:45 +00002732#ifdef CONFIG_PM
2733 pci_save_state(pdev);
2734
2735#endif
2736 pci_disable_device(pdev);
2737}
2738
2739/**
2740 * i40evf_probe - Device Initialization Routine
2741 * @pdev: PCI device information struct
2742 * @ent: entry in i40evf_pci_tbl
2743 *
2744 * Returns 0 on success, negative on failure
2745 *
2746 * i40evf_probe initializes an adapter identified by a pci_dev structure.
2747 * The OS initialization, configuring of the adapter private structure,
2748 * and a hardware reset occur.
2749 **/
2750static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2751{
2752 struct net_device *netdev;
2753 struct i40evf_adapter *adapter = NULL;
2754 struct i40e_hw *hw = NULL;
Mitch Williamsdbbd8112014-02-20 19:29:08 -08002755 int err;
Greg Rose5eae00c2013-12-21 06:12:45 +00002756
2757 err = pci_enable_device(pdev);
2758 if (err)
2759 return err;
2760
Mitch Williams64942942014-02-11 08:26:33 +00002761 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Mitch Williams64942942014-02-11 08:26:33 +00002762 if (err) {
Jean Sacrene3e3bfd2014-03-25 04:30:27 +00002763 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2764 if (err) {
2765 dev_err(&pdev->dev,
2766 "DMA configuration failed: 0x%x\n", err);
2767 goto err_dma;
2768 }
Greg Rose5eae00c2013-12-21 06:12:45 +00002769 }
2770
2771 err = pci_request_regions(pdev, i40evf_driver_name);
2772 if (err) {
2773 dev_err(&pdev->dev,
2774 "pci_request_regions failed 0x%x\n", err);
2775 goto err_pci_reg;
2776 }
2777
2778 pci_enable_pcie_error_reporting(pdev);
2779
2780 pci_set_master(pdev);
2781
Mitch Williams1255b7a2015-11-06 15:25:59 -08002782 netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES);
Greg Rose5eae00c2013-12-21 06:12:45 +00002783 if (!netdev) {
2784 err = -ENOMEM;
2785 goto err_alloc_etherdev;
2786 }
2787
2788 SET_NETDEV_DEV(netdev, &pdev->dev);
2789
2790 pci_set_drvdata(pdev, netdev);
2791 adapter = netdev_priv(netdev);
Greg Rose5eae00c2013-12-21 06:12:45 +00002792
2793 adapter->netdev = netdev;
2794 adapter->pdev = pdev;
2795
2796 hw = &adapter->hw;
2797 hw->back = adapter;
2798
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04002799 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
Greg Rose5eae00c2013-12-21 06:12:45 +00002800 adapter->state = __I40EVF_STARTUP;
2801
2802 /* Call save state here because it relies on the adapter struct. */
2803 pci_save_state(pdev);
2804
2805 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
2806 pci_resource_len(pdev, 0));
2807 if (!hw->hw_addr) {
2808 err = -EIO;
2809 goto err_ioremap;
2810 }
2811 hw->vendor_id = pdev->vendor;
2812 hw->device_id = pdev->device;
2813 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2814 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2815 hw->subsystem_device_id = pdev->subsystem_device;
2816 hw->bus.device = PCI_SLOT(pdev->devfn);
2817 hw->bus.func = PCI_FUNC(pdev->devfn);
Sudheer Mogilappagarib3f028f2017-02-09 23:58:22 -08002818 hw->bus.bus_id = pdev->bus->number;
Greg Rose5eae00c2013-12-21 06:12:45 +00002819
Jesse Brandeburg8ddb3322015-11-18 15:47:06 -08002820 /* set up the locks for the AQ, do this only once in probe
2821 * and destroy them only once in remove
2822 */
2823 mutex_init(&hw->aq.asq_mutex);
2824 mutex_init(&hw->aq.arq_mutex);
2825
Serey Kong8bb1a542014-08-01 13:27:15 -07002826 INIT_LIST_HEAD(&adapter->mac_filter_list);
2827 INIT_LIST_HEAD(&adapter->vlan_filter_list);
2828
Greg Rose5eae00c2013-12-21 06:12:45 +00002829 INIT_WORK(&adapter->reset_task, i40evf_reset_task);
2830 INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
2831 INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
Mitch Williamsed0e8942017-01-24 10:23:59 -08002832 INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
Greg Rose5eae00c2013-12-21 06:12:45 +00002833 INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
Mitch Williams9b32b0b2015-08-13 15:11:32 -07002834 schedule_delayed_work(&adapter->init_task,
2835 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
Greg Rose5eae00c2013-12-21 06:12:45 +00002836
2837 return 0;
2838
2839err_ioremap:
2840 free_netdev(netdev);
2841err_alloc_etherdev:
2842 pci_release_regions(pdev);
2843err_pci_reg:
2844err_dma:
2845 pci_disable_device(pdev);
2846 return err;
2847}
2848
2849#ifdef CONFIG_PM
2850/**
2851 * i40evf_suspend - Power management suspend routine
2852 * @pdev: PCI device information struct
2853 * @state: unused
2854 *
2855 * Called when the system (VM) is entering sleep/suspend.
2856 **/
2857static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
2858{
2859 struct net_device *netdev = pci_get_drvdata(pdev);
2860 struct i40evf_adapter *adapter = netdev_priv(netdev);
2861 int retval = 0;
2862
2863 netif_device_detach(netdev);
2864
2865 if (netif_running(netdev)) {
2866 rtnl_lock();
2867 i40evf_down(adapter);
2868 rtnl_unlock();
2869 }
2870 i40evf_free_misc_irq(adapter);
2871 i40evf_reset_interrupt_capability(adapter);
2872
2873 retval = pci_save_state(pdev);
2874 if (retval)
2875 return retval;
2876
2877 pci_disable_device(pdev);
2878
2879 return 0;
2880}
2881
2882/**
Joe Perchesdbedd442015-03-06 20:49:12 -08002883 * i40evf_resume - Power management resume routine
Greg Rose5eae00c2013-12-21 06:12:45 +00002884 * @pdev: PCI device information struct
2885 *
2886 * Called when the system (VM) is resumed from sleep/suspend.
2887 **/
2888static int i40evf_resume(struct pci_dev *pdev)
2889{
2890 struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
2891 struct net_device *netdev = adapter->netdev;
2892 u32 err;
2893
2894 pci_set_power_state(pdev, PCI_D0);
2895 pci_restore_state(pdev);
2896 /* pci_restore_state clears dev->state_saved so call
2897 * pci_save_state to restore it.
2898 */
2899 pci_save_state(pdev);
2900
2901 err = pci_enable_device_mem(pdev);
2902 if (err) {
2903 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
2904 return err;
2905 }
2906 pci_set_master(pdev);
2907
2908 rtnl_lock();
2909 err = i40evf_set_interrupt_capability(adapter);
2910 if (err) {
Vasily Averinf2a1c362015-07-07 18:53:38 +03002911 rtnl_unlock();
Greg Rose5eae00c2013-12-21 06:12:45 +00002912 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
2913 return err;
2914 }
2915 err = i40evf_request_misc_irq(adapter);
2916 rtnl_unlock();
2917 if (err) {
2918 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
2919 return err;
2920 }
2921
2922 schedule_work(&adapter->reset_task);
2923
2924 netif_device_attach(netdev);
2925
2926 return err;
2927}
2928
2929#endif /* CONFIG_PM */
2930/**
2931 * i40evf_remove - Device Removal Routine
2932 * @pdev: PCI device information struct
2933 *
2934 * i40evf_remove is called by the PCI subsystem to alert the driver
2935 * that it should release a PCI device. The could be caused by a
2936 * Hot-Plug event, or because the driver is going to be removed from
2937 * memory.
2938 **/
2939static void i40evf_remove(struct pci_dev *pdev)
2940{
2941 struct net_device *netdev = pci_get_drvdata(pdev);
2942 struct i40evf_adapter *adapter = netdev_priv(netdev);
Mitch Williams6ba36a22014-08-01 13:27:14 -07002943 struct i40evf_mac_filter *f, *ftmp;
Greg Rose5eae00c2013-12-21 06:12:45 +00002944 struct i40e_hw *hw = &adapter->hw;
Mitch Williamsed0e8942017-01-24 10:23:59 -08002945 int err;
Greg Rose5eae00c2013-12-21 06:12:45 +00002946
2947 cancel_delayed_work_sync(&adapter->init_task);
Mitch Williamsef8693e2014-02-13 03:48:53 -08002948 cancel_work_sync(&adapter->reset_task);
Mitch Williamsed0e8942017-01-24 10:23:59 -08002949 cancel_delayed_work_sync(&adapter->client_task);
Greg Rose5eae00c2013-12-21 06:12:45 +00002950 if (adapter->netdev_registered) {
2951 unregister_netdev(netdev);
2952 adapter->netdev_registered = false;
2953 }
Mitch Williamsed0e8942017-01-24 10:23:59 -08002954 if (CLIENT_ALLOWED(adapter)) {
2955 err = i40evf_lan_del_device(adapter);
2956 if (err)
2957 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
2958 err);
2959 }
Mitch A Williams53d0b3a2014-12-09 08:53:04 +00002960
Mitch Williamsf4a71882015-01-09 11:18:16 +00002961 /* Shut down all the garbage mashers on the detention level */
Greg Rose5eae00c2013-12-21 06:12:45 +00002962 adapter->state = __I40EVF_REMOVE;
Mitch Williamsf4a71882015-01-09 11:18:16 +00002963 adapter->aq_required = 0;
Mitch Williamsf4a71882015-01-09 11:18:16 +00002964 i40evf_request_reset(adapter);
Mitch Williams22ead372016-03-18 12:18:10 -07002965 msleep(50);
Mitch Williamsf4a71882015-01-09 11:18:16 +00002966 /* If the FW isn't responding, kick it once, but only once. */
2967 if (!i40evf_asq_done(hw)) {
2968 i40evf_request_reset(adapter);
Mitch Williams22ead372016-03-18 12:18:10 -07002969 msleep(50);
Mitch Williamsf4a71882015-01-09 11:18:16 +00002970 }
Mitch Williams8a68bad2016-12-12 15:44:10 -08002971 i40evf_free_all_tx_resources(adapter);
2972 i40evf_free_all_rx_resources(adapter);
Jacob Kelleref4603e2016-11-08 13:05:08 -08002973 i40evf_misc_irq_disable(adapter);
2974 i40evf_free_misc_irq(adapter);
2975 i40evf_reset_interrupt_capability(adapter);
2976 i40evf_free_q_vectors(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +00002977
Mitch Williamse5d17c32014-06-04 04:22:38 +00002978 if (adapter->watchdog_timer.function)
2979 del_timer_sync(&adapter->watchdog_timer);
2980
Mitch Williamsdbb01c82014-02-20 19:29:07 -08002981 flush_scheduled_work();
2982
Mitch Williams43a3d9b2016-04-12 08:30:44 -07002983 i40evf_free_rss(adapter);
Helin Zhang66f9af852015-10-26 19:44:34 -04002984
Greg Rose5eae00c2013-12-21 06:12:45 +00002985 if (hw->aq.asq.count)
2986 i40evf_shutdown_adminq(hw);
2987
Jesse Brandeburg8ddb3322015-11-18 15:47:06 -08002988 /* destroy the locks only once, here */
2989 mutex_destroy(&hw->aq.arq_mutex);
2990 mutex_destroy(&hw->aq.asq_mutex);
2991
Greg Rose5eae00c2013-12-21 06:12:45 +00002992 iounmap(hw->hw_addr);
2993 pci_release_regions(pdev);
Mitch Williamse284fc82015-03-27 00:12:09 -07002994 i40evf_free_all_tx_resources(adapter);
2995 i40evf_free_all_rx_resources(adapter);
Greg Rose5eae00c2013-12-21 06:12:45 +00002996 i40evf_free_queues(adapter);
2997 kfree(adapter->vf_res);
Mitch Williams6ba36a22014-08-01 13:27:14 -07002998 /* If we got removed before an up/down sequence, we've got a filter
2999 * hanging out there that we need to get rid of.
3000 */
3001 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3002 list_del(&f->list);
3003 kfree(f);
3004 }
Mitch A Williams37dfdf32014-12-09 08:53:05 +00003005 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
3006 list_del(&f->list);
3007 kfree(f);
3008 }
Greg Rose5eae00c2013-12-21 06:12:45 +00003009
3010 free_netdev(netdev);
3011
3012 pci_disable_pcie_error_reporting(pdev);
3013
3014 pci_disable_device(pdev);
3015}
3016
3017static struct pci_driver i40evf_driver = {
3018 .name = i40evf_driver_name,
3019 .id_table = i40evf_pci_tbl,
3020 .probe = i40evf_probe,
3021 .remove = i40evf_remove,
3022#ifdef CONFIG_PM
3023 .suspend = i40evf_suspend,
3024 .resume = i40evf_resume,
3025#endif
3026 .shutdown = i40evf_shutdown,
3027};
3028
3029/**
3030 * i40e_init_module - Driver Registration Routine
3031 *
3032 * i40e_init_module is the first routine called when the driver is
3033 * loaded. All it does is register with the PCI subsystem.
3034 **/
3035static int __init i40evf_init_module(void)
3036{
3037 int ret;
Mitch Williams75a64432014-11-11 20:02:42 +00003038
Greg Rose5eae00c2013-12-21 06:12:45 +00003039 pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
Mitch Williams75a64432014-11-11 20:02:42 +00003040 i40evf_driver_version);
Greg Rose5eae00c2013-12-21 06:12:45 +00003041
3042 pr_info("%s\n", i40evf_copyright);
3043
Jacob Keller6992a6c2016-08-04 11:37:01 -07003044 i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3045 i40evf_driver_name);
Jesse Brandeburg2803b162015-12-22 14:25:08 -08003046 if (!i40evf_wq) {
3047 pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
3048 return -ENOMEM;
3049 }
Greg Rose5eae00c2013-12-21 06:12:45 +00003050 ret = pci_register_driver(&i40evf_driver);
3051 return ret;
3052}
3053
3054module_init(i40evf_init_module);
3055
3056/**
3057 * i40e_exit_module - Driver Exit Cleanup Routine
3058 *
3059 * i40e_exit_module is called just before the driver is removed
3060 * from memory.
3061 **/
3062static void __exit i40evf_exit_module(void)
3063{
3064 pci_unregister_driver(&i40evf_driver);
Jesse Brandeburg2803b162015-12-22 14:25:08 -08003065 destroy_workqueue(i40evf_wq);
Greg Rose5eae00c2013-12-21 06:12:45 +00003066}
3067
3068module_exit(i40evf_exit_module);
3069
3070/* i40evf_main.c */