blob: 25db8d80b6841341a7de8b84d13622d0cd68fdad [file] [log] [blame]
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Jesse Brandeburg2818ccd2016-01-13 16:51:38 -08004 * Copyright(c) 2013 - 2016 Intel Corporation.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050027#include <linux/etherdevice.h>
28#include <linux/of_net.h>
29#include <linux/pci.h>
30
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000031/* Local includes */
32#include "i40e.h"
Shannon Nelson4eb3f762014-03-06 08:59:58 +000033#include "i40e_diag.h"
Alexander Duyck06a5f7f2016-06-16 12:22:06 -070034#include <net/udp_tunnel.h>
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000035
36const char i40e_driver_name[] = "i40e";
37static const char i40e_driver_string[] =
38 "Intel(R) Ethernet Connection XL710 Network Driver";
39
40#define DRV_KERN "-k"
41
Catherine Sullivane8e724d2014-07-10 07:58:26 +000042#define DRV_VERSION_MAJOR 1
Bimmy Pujari07061952016-05-16 10:26:45 -070043#define DRV_VERSION_MINOR 6
Bimmy Pujaricf465fe2016-09-27 11:28:54 -070044#define DRV_VERSION_BUILD 21
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000045#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
46 __stringify(DRV_VERSION_MINOR) "." \
47 __stringify(DRV_VERSION_BUILD) DRV_KERN
48const char i40e_driver_version_str[] = DRV_VERSION;
Jesse Brandeburg8fb905b2014-01-17 15:36:33 -080049static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000050
51/* a bit of forward declarations */
52static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
53static void i40e_handle_reset_warning(struct i40e_pf *pf);
54static int i40e_add_vsi(struct i40e_vsi *vsi);
55static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000056static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000057static int i40e_setup_misc_vector(struct i40e_pf *pf);
58static void i40e_determine_queue_usage(struct i40e_pf *pf);
59static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080060static void i40e_fdir_sb_setup(struct i40e_pf *pf);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -080061static int i40e_veb_get_bw_info(struct i40e_veb *veb);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000062
63/* i40e_pci_tbl - PCI Device ID Table
64 *
65 * Last entry must be all 0s
66 *
67 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
68 * Class, Class Mask, private data (not used) }
69 */
Benoit Taine9baa3c32014-08-08 15:56:03 +020070static const struct pci_device_id i40e_pci_tbl[] = {
Shannon Nelsonab600852014-01-17 15:36:39 -080071 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
Shannon Nelsonab600852014-01-17 15:36:39 -080072 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
Shannon Nelsonab600852014-01-17 15:36:39 -080073 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
Shannon Nelsonab600852014-01-17 15:36:39 -080075 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
Mitch Williams5960d332014-09-13 07:40:47 +000078 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
Shannon Nelsonbc5166b92015-08-26 15:14:10 -040079 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
Jesse Brandeburgae24b402015-03-27 00:12:09 -070080 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
Anjali Singhai Jain35dae512015-12-22 14:25:03 -080081 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
Anjali Singhai Jain87e6c1d2015-06-05 12:20:25 -040083 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
Catherine Sullivand6bf58c2016-03-18 12:18:08 -070086 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
Shannon Nelson48a3b512015-07-23 16:54:39 -040087 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000089 /* required last entry */
90 {0, }
91};
92MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
93
94#define I40E_MAX_VF_COUNT 128
95static int debug = -1;
Alexander Duyck5d4ca232016-09-30 08:21:46 -040096module_param(debug, uint, 0);
97MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000098
99MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
100MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
101MODULE_LICENSE("GPL");
102MODULE_VERSION(DRV_VERSION);
103
Jesse Brandeburg2803b162015-12-22 14:25:08 -0800104static struct workqueue_struct *i40e_wq;
105
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000106/**
107 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
108 * @hw: pointer to the HW structure
109 * @mem: ptr to mem struct to fill out
110 * @size: size of memory requested
111 * @alignment: what to align the allocation to
112 **/
113int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
114 u64 size, u32 alignment)
115{
116 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
117
118 mem->size = ALIGN(size, alignment);
119 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
120 &mem->pa, GFP_KERNEL);
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000121 if (!mem->va)
122 return -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000123
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000124 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000125}
126
127/**
128 * i40e_free_dma_mem_d - OS specific memory free for shared code
129 * @hw: pointer to the HW structure
130 * @mem: ptr to mem struct to free
131 **/
132int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
133{
134 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
135
136 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
137 mem->va = NULL;
138 mem->pa = 0;
139 mem->size = 0;
140
141 return 0;
142}
143
144/**
145 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
146 * @hw: pointer to the HW structure
147 * @mem: ptr to mem struct to fill out
148 * @size: size of memory requested
149 **/
150int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
151 u32 size)
152{
153 mem->size = size;
154 mem->va = kzalloc(size, GFP_KERNEL);
155
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000156 if (!mem->va)
157 return -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000158
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000159 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000160}
161
162/**
163 * i40e_free_virt_mem_d - OS specific memory free for shared code
164 * @hw: pointer to the HW structure
165 * @mem: ptr to mem struct to free
166 **/
167int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
168{
169 /* it's ok to kfree a NULL pointer */
170 kfree(mem->va);
171 mem->va = NULL;
172 mem->size = 0;
173
174 return 0;
175}
176
177/**
178 * i40e_get_lump - find a lump of free generic resource
179 * @pf: board private structure
180 * @pile: the pile of resource to search
181 * @needed: the number of items needed
182 * @id: an owner id to stick on the items assigned
183 *
184 * Returns the base item index of the lump, or negative for error
185 *
186 * The search_hint trick and lack of advanced fit-finding only work
187 * because we're highly likely to have all the same size lump requests.
188 * Linear search time and any fragmentation should be minimal.
189 **/
190static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
191 u16 needed, u16 id)
192{
193 int ret = -ENOMEM;
Jesse Brandeburgddf434a2013-09-13 08:23:19 +0000194 int i, j;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000195
196 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
197 dev_info(&pf->pdev->dev,
198 "param err: pile=%p needed=%d id=0x%04x\n",
199 pile, needed, id);
200 return -EINVAL;
201 }
202
203 /* start the linear search with an imperfect hint */
204 i = pile->search_hint;
Jesse Brandeburgddf434a2013-09-13 08:23:19 +0000205 while (i < pile->num_entries) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000206 /* skip already allocated entries */
207 if (pile->list[i] & I40E_PILE_VALID_BIT) {
208 i++;
209 continue;
210 }
211
212 /* do we have enough in this lump? */
213 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
214 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
215 break;
216 }
217
218 if (j == needed) {
219 /* there was enough, so assign it to the requestor */
220 for (j = 0; j < needed; j++)
221 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
222 ret = i;
223 pile->search_hint = i + j;
Jesse Brandeburgddf434a2013-09-13 08:23:19 +0000224 break;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000225 }
Jesse Brandeburg6995b362015-08-28 17:55:54 -0400226
227 /* not enough, so skip over it and continue looking */
228 i += j;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000229 }
230
231 return ret;
232}
233
234/**
235 * i40e_put_lump - return a lump of generic resource
236 * @pile: the pile of resource to search
237 * @index: the base item index
238 * @id: the owner id of the items assigned
239 *
240 * Returns the count of items in the lump
241 **/
242static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
243{
244 int valid_id = (id | I40E_PILE_VALID_BIT);
245 int count = 0;
246 int i;
247
248 if (!pile || index >= pile->num_entries)
249 return -EINVAL;
250
251 for (i = index;
252 i < pile->num_entries && pile->list[i] == valid_id;
253 i++) {
254 pile->list[i] = 0;
255 count++;
256 }
257
258 if (count && index < pile->search_hint)
259 pile->search_hint = index;
260
261 return count;
262}
263
264/**
Anjali Singhai Jainfdf0e0b2015-03-31 00:45:05 -0700265 * i40e_find_vsi_from_id - searches for the vsi with the given id
266 * @pf - the pf structure to search for the vsi
267 * @id - id of the vsi it is searching for
268 **/
269struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
270{
271 int i;
272
273 for (i = 0; i < pf->num_alloc_vsi; i++)
274 if (pf->vsi[i] && (pf->vsi[i]->id == id))
275 return pf->vsi[i];
276
277 return NULL;
278}
279
280/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000281 * i40e_service_event_schedule - Schedule the service task to wake up
282 * @pf: board private structure
283 *
284 * If not already scheduled, this puts the task into the work queue
285 **/
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -0600286void i40e_service_event_schedule(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000287{
288 if (!test_bit(__I40E_DOWN, &pf->state) &&
289 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
290 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
Jesse Brandeburg2803b162015-12-22 14:25:08 -0800291 queue_work(i40e_wq, &pf->service_task);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000292}
293
294/**
295 * i40e_tx_timeout - Respond to a Tx Hang
296 * @netdev: network interface device structure
297 *
298 * If any port has noticed a Tx timeout, it is likely that the whole
299 * device is munged, not just the one netdev port, so go for the full
300 * reset.
301 **/
Vasu Dev38e00432014-08-01 13:27:03 -0700302#ifdef I40E_FCOE
303void i40e_tx_timeout(struct net_device *netdev)
304#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000305static void i40e_tx_timeout(struct net_device *netdev)
Vasu Dev38e00432014-08-01 13:27:03 -0700306#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000307{
308 struct i40e_netdev_priv *np = netdev_priv(netdev);
309 struct i40e_vsi *vsi = np->vsi;
310 struct i40e_pf *pf = vsi->back;
Kiran Patilb03a8c12015-09-24 18:13:15 -0400311 struct i40e_ring *tx_ring = NULL;
312 unsigned int i, hung_queue = 0;
313 u32 head, val;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000314
315 pf->tx_timeout_count++;
316
Kiran Patilb03a8c12015-09-24 18:13:15 -0400317 /* find the stopped queue the same way the stack does */
318 for (i = 0; i < netdev->num_tx_queues; i++) {
319 struct netdev_queue *q;
320 unsigned long trans_start;
321
322 q = netdev_get_tx_queue(netdev, i);
Florian Westphal9b366272016-05-03 16:33:14 +0200323 trans_start = q->trans_start;
Kiran Patilb03a8c12015-09-24 18:13:15 -0400324 if (netif_xmit_stopped(q) &&
325 time_after(jiffies,
326 (trans_start + netdev->watchdog_timeo))) {
327 hung_queue = i;
328 break;
329 }
330 }
331
332 if (i == netdev->num_tx_queues) {
333 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
334 } else {
335 /* now that we have an index, find the tx_ring struct */
336 for (i = 0; i < vsi->num_queue_pairs; i++) {
337 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
338 if (hung_queue ==
339 vsi->tx_rings[i]->queue_index) {
340 tx_ring = vsi->tx_rings[i];
341 break;
342 }
343 }
344 }
345 }
346
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000347 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
Kiran Patilb03a8c12015-09-24 18:13:15 -0400348 pf->tx_timeout_recovery_level = 1; /* reset after some time */
349 else if (time_before(jiffies,
350 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
351 return; /* don't do any new action before the next timeout */
352
353 if (tx_ring) {
354 head = i40e_get_head(tx_ring);
355 /* Read interrupt register */
356 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
357 val = rd32(&pf->hw,
358 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
359 tx_ring->vsi->base_vector - 1));
360 else
361 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
362
363 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
364 vsi->seid, hung_queue, tx_ring->next_to_clean,
365 head, tx_ring->next_to_use,
366 readl(tx_ring->tail), val);
367 }
368
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000369 pf->tx_timeout_last_recovery = jiffies;
Kiran Patilb03a8c12015-09-24 18:13:15 -0400370 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
371 pf->tx_timeout_recovery_level, hung_queue);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000372
373 switch (pf->tx_timeout_recovery_level) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000374 case 1:
375 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
376 break;
377 case 2:
378 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
379 break;
380 case 3:
381 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
382 break;
383 default:
384 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000385 break;
386 }
Kiran Patilb03a8c12015-09-24 18:13:15 -0400387
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000388 i40e_service_event_schedule(pf);
389 pf->tx_timeout_recovery_level++;
390}
391
392/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000393 * i40e_get_vsi_stats_struct - Get System Network Statistics
394 * @vsi: the VSI we care about
395 *
396 * Returns the address of the device statistics structure.
397 * The statistics are actually updated from the service task.
398 **/
399struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
400{
401 return &vsi->net_stats;
402}
403
404/**
405 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
406 * @netdev: network interface device structure
407 *
408 * Returns the address of the device statistics structure.
409 * The statistics are actually updated from the service task.
410 **/
Vasu Dev38e00432014-08-01 13:27:03 -0700411#ifdef I40E_FCOE
412struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
413 struct net_device *netdev,
414 struct rtnl_link_stats64 *stats)
415#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000416static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
417 struct net_device *netdev,
Alexander Duyck980e9b12013-09-28 06:01:03 +0000418 struct rtnl_link_stats64 *stats)
Vasu Dev38e00432014-08-01 13:27:03 -0700419#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000420{
421 struct i40e_netdev_priv *np = netdev_priv(netdev);
Akeem G Abodunrine7046ee2014-04-09 05:58:58 +0000422 struct i40e_ring *tx_ring, *rx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000423 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000424 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
425 int i;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000426
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +0000427 if (test_bit(__I40E_DOWN, &vsi->state))
428 return stats;
429
Jesse Brandeburg3c325ce2013-12-14 03:26:45 -0800430 if (!vsi->tx_rings)
431 return stats;
432
Alexander Duyck980e9b12013-09-28 06:01:03 +0000433 rcu_read_lock();
434 for (i = 0; i < vsi->num_queue_pairs; i++) {
Alexander Duyck980e9b12013-09-28 06:01:03 +0000435 u64 bytes, packets;
436 unsigned int start;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000437
Alexander Duyck980e9b12013-09-28 06:01:03 +0000438 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
439 if (!tx_ring)
440 continue;
441
442 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700443 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
Alexander Duyck980e9b12013-09-28 06:01:03 +0000444 packets = tx_ring->stats.packets;
445 bytes = tx_ring->stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700446 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
Alexander Duyck980e9b12013-09-28 06:01:03 +0000447
448 stats->tx_packets += packets;
449 stats->tx_bytes += bytes;
450 rx_ring = &tx_ring[1];
451
452 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700453 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
Alexander Duyck980e9b12013-09-28 06:01:03 +0000454 packets = rx_ring->stats.packets;
455 bytes = rx_ring->stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700456 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
Alexander Duyck980e9b12013-09-28 06:01:03 +0000457
458 stats->rx_packets += packets;
459 stats->rx_bytes += bytes;
460 }
461 rcu_read_unlock();
462
Akeem G Abodunrina5282f42014-05-10 04:49:03 +0000463 /* following stats updated by i40e_watchdog_subtask() */
Alexander Duyck980e9b12013-09-28 06:01:03 +0000464 stats->multicast = vsi_stats->multicast;
465 stats->tx_errors = vsi_stats->tx_errors;
466 stats->tx_dropped = vsi_stats->tx_dropped;
467 stats->rx_errors = vsi_stats->rx_errors;
Jesse Brandeburgd8201e22015-07-23 16:54:35 -0400468 stats->rx_dropped = vsi_stats->rx_dropped;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000469 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
470 stats->rx_length_errors = vsi_stats->rx_length_errors;
471
472 return stats;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000473}
474
475/**
476 * i40e_vsi_reset_stats - Resets all stats of the given vsi
477 * @vsi: the VSI to have its stats reset
478 **/
479void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
480{
481 struct rtnl_link_stats64 *ns;
482 int i;
483
484 if (!vsi)
485 return;
486
487 ns = i40e_get_vsi_stats_struct(vsi);
488 memset(ns, 0, sizeof(*ns));
489 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
490 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
491 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
Greg Rose8e9dca52013-12-18 13:45:53 +0000492 if (vsi->rx_rings && vsi->rx_rings[0]) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000493 for (i = 0; i < vsi->num_queue_pairs; i++) {
Jesse Brandeburg6995b362015-08-28 17:55:54 -0400494 memset(&vsi->rx_rings[i]->stats, 0,
Alexander Duyck9f65e152013-09-28 06:00:58 +0000495 sizeof(vsi->rx_rings[i]->stats));
Jesse Brandeburg6995b362015-08-28 17:55:54 -0400496 memset(&vsi->rx_rings[i]->rx_stats, 0,
Alexander Duyck9f65e152013-09-28 06:00:58 +0000497 sizeof(vsi->rx_rings[i]->rx_stats));
Jesse Brandeburg6995b362015-08-28 17:55:54 -0400498 memset(&vsi->tx_rings[i]->stats, 0,
Alexander Duyck9f65e152013-09-28 06:00:58 +0000499 sizeof(vsi->tx_rings[i]->stats));
500 memset(&vsi->tx_rings[i]->tx_stats, 0,
501 sizeof(vsi->tx_rings[i]->tx_stats));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000502 }
Greg Rose8e9dca52013-12-18 13:45:53 +0000503 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000504 vsi->stat_offsets_loaded = false;
505}
506
507/**
Jeff Kirsherb40c82e62015-02-27 09:18:34 +0000508 * i40e_pf_reset_stats - Reset all of the stats for the given PF
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000509 * @pf: the PF to be reset
510 **/
511void i40e_pf_reset_stats(struct i40e_pf *pf)
512{
Shannon Nelsone91fdf72014-06-03 23:50:18 +0000513 int i;
514
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000515 memset(&pf->stats, 0, sizeof(pf->stats));
516 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
517 pf->stat_offsets_loaded = false;
Shannon Nelsone91fdf72014-06-03 23:50:18 +0000518
519 for (i = 0; i < I40E_MAX_VEB; i++) {
520 if (pf->veb[i]) {
521 memset(&pf->veb[i]->stats, 0,
522 sizeof(pf->veb[i]->stats));
523 memset(&pf->veb[i]->stats_offsets, 0,
524 sizeof(pf->veb[i]->stats_offsets));
525 pf->veb[i]->stat_offsets_loaded = false;
526 }
527 }
Catherine Sullivan42bce042016-07-27 12:02:32 -0700528 pf->hw_csum_rx_error = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000529}
530
531/**
532 * i40e_stat_update48 - read and update a 48 bit stat from the chip
533 * @hw: ptr to the hardware info
534 * @hireg: the high 32 bit reg to read
535 * @loreg: the low 32 bit reg to read
536 * @offset_loaded: has the initial offset been loaded yet
537 * @offset: ptr to current offset value
538 * @stat: ptr to the stat
539 *
540 * Since the device stats are not reset at PFReset, they likely will not
541 * be zeroed when the driver starts. We'll save the first values read
542 * and use them as offsets to be subtracted from the raw values in order
543 * to report stats that count from zero. In the process, we also manage
544 * the potential roll-over.
545 **/
546static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
547 bool offset_loaded, u64 *offset, u64 *stat)
548{
549 u64 new_data;
550
Shannon Nelsonab600852014-01-17 15:36:39 -0800551 if (hw->device_id == I40E_DEV_ID_QEMU) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000552 new_data = rd32(hw, loreg);
553 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
554 } else {
555 new_data = rd64(hw, loreg);
556 }
557 if (!offset_loaded)
558 *offset = new_data;
559 if (likely(new_data >= *offset))
560 *stat = new_data - *offset;
561 else
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400562 *stat = (new_data + BIT_ULL(48)) - *offset;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000563 *stat &= 0xFFFFFFFFFFFFULL;
564}
565
566/**
567 * i40e_stat_update32 - read and update a 32 bit stat from the chip
568 * @hw: ptr to the hardware info
569 * @reg: the hw reg to read
570 * @offset_loaded: has the initial offset been loaded yet
571 * @offset: ptr to current offset value
572 * @stat: ptr to the stat
573 **/
574static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
575 bool offset_loaded, u64 *offset, u64 *stat)
576{
577 u32 new_data;
578
579 new_data = rd32(hw, reg);
580 if (!offset_loaded)
581 *offset = new_data;
582 if (likely(new_data >= *offset))
583 *stat = (u32)(new_data - *offset);
584 else
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400585 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000586}
587
588/**
589 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
590 * @vsi: the VSI to be updated
591 **/
592void i40e_update_eth_stats(struct i40e_vsi *vsi)
593{
594 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
595 struct i40e_pf *pf = vsi->back;
596 struct i40e_hw *hw = &pf->hw;
597 struct i40e_eth_stats *oes;
598 struct i40e_eth_stats *es; /* device's eth stats */
599
600 es = &vsi->eth_stats;
601 oes = &vsi->eth_stats_offsets;
602
603 /* Gather up the stats that the hw collects */
604 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
605 vsi->stat_offsets_loaded,
606 &oes->tx_errors, &es->tx_errors);
607 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
608 vsi->stat_offsets_loaded,
609 &oes->rx_discards, &es->rx_discards);
Shannon Nelson41a9e552014-04-23 04:50:20 +0000610 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
611 vsi->stat_offsets_loaded,
612 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
613 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
614 vsi->stat_offsets_loaded,
615 &oes->tx_errors, &es->tx_errors);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000616
617 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
618 I40E_GLV_GORCL(stat_idx),
619 vsi->stat_offsets_loaded,
620 &oes->rx_bytes, &es->rx_bytes);
621 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
622 I40E_GLV_UPRCL(stat_idx),
623 vsi->stat_offsets_loaded,
624 &oes->rx_unicast, &es->rx_unicast);
625 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
626 I40E_GLV_MPRCL(stat_idx),
627 vsi->stat_offsets_loaded,
628 &oes->rx_multicast, &es->rx_multicast);
629 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
630 I40E_GLV_BPRCL(stat_idx),
631 vsi->stat_offsets_loaded,
632 &oes->rx_broadcast, &es->rx_broadcast);
633
634 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
635 I40E_GLV_GOTCL(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->tx_bytes, &es->tx_bytes);
638 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
639 I40E_GLV_UPTCL(stat_idx),
640 vsi->stat_offsets_loaded,
641 &oes->tx_unicast, &es->tx_unicast);
642 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
643 I40E_GLV_MPTCL(stat_idx),
644 vsi->stat_offsets_loaded,
645 &oes->tx_multicast, &es->tx_multicast);
646 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
647 I40E_GLV_BPTCL(stat_idx),
648 vsi->stat_offsets_loaded,
649 &oes->tx_broadcast, &es->tx_broadcast);
650 vsi->stat_offsets_loaded = true;
651}
652
653/**
654 * i40e_update_veb_stats - Update Switch component statistics
655 * @veb: the VEB being updated
656 **/
657static void i40e_update_veb_stats(struct i40e_veb *veb)
658{
659 struct i40e_pf *pf = veb->pf;
660 struct i40e_hw *hw = &pf->hw;
661 struct i40e_eth_stats *oes;
662 struct i40e_eth_stats *es; /* device's eth stats */
Neerav Parikhfe860af2015-07-10 19:36:02 -0400663 struct i40e_veb_tc_stats *veb_oes;
664 struct i40e_veb_tc_stats *veb_es;
665 int i, idx = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000666
667 idx = veb->stats_idx;
668 es = &veb->stats;
669 oes = &veb->stats_offsets;
Neerav Parikhfe860af2015-07-10 19:36:02 -0400670 veb_es = &veb->tc_stats;
671 veb_oes = &veb->tc_stats_offsets;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000672
673 /* Gather up the stats that the hw collects */
674 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
675 veb->stat_offsets_loaded,
676 &oes->tx_discards, &es->tx_discards);
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +0000677 if (hw->revision_id > 0)
678 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
679 veb->stat_offsets_loaded,
680 &oes->rx_unknown_protocol,
681 &es->rx_unknown_protocol);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000682 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
683 veb->stat_offsets_loaded,
684 &oes->rx_bytes, &es->rx_bytes);
685 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
686 veb->stat_offsets_loaded,
687 &oes->rx_unicast, &es->rx_unicast);
688 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
689 veb->stat_offsets_loaded,
690 &oes->rx_multicast, &es->rx_multicast);
691 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
692 veb->stat_offsets_loaded,
693 &oes->rx_broadcast, &es->rx_broadcast);
694
695 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
696 veb->stat_offsets_loaded,
697 &oes->tx_bytes, &es->tx_bytes);
698 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
699 veb->stat_offsets_loaded,
700 &oes->tx_unicast, &es->tx_unicast);
701 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
702 veb->stat_offsets_loaded,
703 &oes->tx_multicast, &es->tx_multicast);
704 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
705 veb->stat_offsets_loaded,
706 &oes->tx_broadcast, &es->tx_broadcast);
Neerav Parikhfe860af2015-07-10 19:36:02 -0400707 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
708 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
709 I40E_GLVEBTC_RPCL(i, idx),
710 veb->stat_offsets_loaded,
711 &veb_oes->tc_rx_packets[i],
712 &veb_es->tc_rx_packets[i]);
713 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
714 I40E_GLVEBTC_RBCL(i, idx),
715 veb->stat_offsets_loaded,
716 &veb_oes->tc_rx_bytes[i],
717 &veb_es->tc_rx_bytes[i]);
718 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
719 I40E_GLVEBTC_TPCL(i, idx),
720 veb->stat_offsets_loaded,
721 &veb_oes->tc_tx_packets[i],
722 &veb_es->tc_tx_packets[i]);
723 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
724 I40E_GLVEBTC_TBCL(i, idx),
725 veb->stat_offsets_loaded,
726 &veb_oes->tc_tx_bytes[i],
727 &veb_es->tc_tx_bytes[i]);
728 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000729 veb->stat_offsets_loaded = true;
730}
731
Vasu Dev38e00432014-08-01 13:27:03 -0700732#ifdef I40E_FCOE
733/**
734 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
735 * @vsi: the VSI that is capable of doing FCoE
736 **/
737static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
738{
739 struct i40e_pf *pf = vsi->back;
740 struct i40e_hw *hw = &pf->hw;
741 struct i40e_fcoe_stats *ofs;
742 struct i40e_fcoe_stats *fs; /* device's eth stats */
743 int idx;
744
745 if (vsi->type != I40E_VSI_FCOE)
746 return;
747
Kiran Patil4147e2c2016-01-15 14:33:14 -0800748 idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
Vasu Dev38e00432014-08-01 13:27:03 -0700749 fs = &vsi->fcoe_stats;
750 ofs = &vsi->fcoe_stats_offsets;
751
752 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
753 vsi->fcoe_stat_offsets_loaded,
754 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
755 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
756 vsi->fcoe_stat_offsets_loaded,
757 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
758 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
759 vsi->fcoe_stat_offsets_loaded,
760 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
761 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
762 vsi->fcoe_stat_offsets_loaded,
763 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
764 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
765 vsi->fcoe_stat_offsets_loaded,
766 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
767 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
768 vsi->fcoe_stat_offsets_loaded,
769 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
770 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
771 vsi->fcoe_stat_offsets_loaded,
772 &ofs->fcoe_last_error, &fs->fcoe_last_error);
773 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
774 vsi->fcoe_stat_offsets_loaded,
775 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
776
777 vsi->fcoe_stat_offsets_loaded = true;
778}
779
780#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000781/**
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000782 * i40e_update_vsi_stats - Update the vsi statistics counters.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000783 * @vsi: the VSI to be updated
784 *
785 * There are a few instances where we store the same stat in a
786 * couple of different structs. This is partly because we have
787 * the netdev stats that need to be filled out, which is slightly
788 * different from the "eth_stats" defined by the chip and used in
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000789 * VF communications. We sort it out here.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000790 **/
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000791static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000792{
793 struct i40e_pf *pf = vsi->back;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000794 struct rtnl_link_stats64 *ons;
795 struct rtnl_link_stats64 *ns; /* netdev stats */
796 struct i40e_eth_stats *oes;
797 struct i40e_eth_stats *es; /* device's eth stats */
798 u32 tx_restart, tx_busy;
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800799 u64 tx_lost_interrupt;
Akeem G Abodunrinbf00b372014-10-17 03:14:39 +0000800 struct i40e_ring *p;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000801 u32 rx_page, rx_buf;
Akeem G Abodunrinbf00b372014-10-17 03:14:39 +0000802 u64 bytes, packets;
803 unsigned int start;
Anjali Singhai Jain2fc3d712015-08-27 11:42:29 -0400804 u64 tx_linearize;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -0400805 u64 tx_force_wb;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000806 u64 rx_p, rx_b;
807 u64 tx_p, tx_b;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000808 u16 q;
809
810 if (test_bit(__I40E_DOWN, &vsi->state) ||
811 test_bit(__I40E_CONFIG_BUSY, &pf->state))
812 return;
813
814 ns = i40e_get_vsi_stats_struct(vsi);
815 ons = &vsi->net_stats_offsets;
816 es = &vsi->eth_stats;
817 oes = &vsi->eth_stats_offsets;
818
819 /* Gather up the netdev and vsi stats that the driver collects
820 * on the fly during packet processing
821 */
822 rx_b = rx_p = 0;
823 tx_b = tx_p = 0;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -0400824 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800825 tx_lost_interrupt = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000826 rx_page = 0;
827 rx_buf = 0;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000828 rcu_read_lock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000829 for (q = 0; q < vsi->num_queue_pairs; q++) {
Alexander Duyck980e9b12013-09-28 06:01:03 +0000830 /* locate Tx ring */
831 p = ACCESS_ONCE(vsi->tx_rings[q]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000832
Alexander Duyck980e9b12013-09-28 06:01:03 +0000833 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700834 start = u64_stats_fetch_begin_irq(&p->syncp);
Alexander Duyck980e9b12013-09-28 06:01:03 +0000835 packets = p->stats.packets;
836 bytes = p->stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700837 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
Alexander Duyck980e9b12013-09-28 06:01:03 +0000838 tx_b += bytes;
839 tx_p += packets;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000840 tx_restart += p->tx_stats.restart_queue;
841 tx_busy += p->tx_stats.tx_busy;
Anjali Singhai Jain2fc3d712015-08-27 11:42:29 -0400842 tx_linearize += p->tx_stats.tx_linearize;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -0400843 tx_force_wb += p->tx_stats.tx_force_wb;
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800844 tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000845
846 /* Rx queue is part of the same block as Tx queue */
847 p = &p[1];
848 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700849 start = u64_stats_fetch_begin_irq(&p->syncp);
Alexander Duyck980e9b12013-09-28 06:01:03 +0000850 packets = p->stats.packets;
851 bytes = p->stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700852 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
Alexander Duyck980e9b12013-09-28 06:01:03 +0000853 rx_b += bytes;
854 rx_p += packets;
Mitch Williams420136c2013-12-18 13:45:59 +0000855 rx_buf += p->rx_stats.alloc_buff_failed;
856 rx_page += p->rx_stats.alloc_page_failed;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000857 }
Alexander Duyck980e9b12013-09-28 06:01:03 +0000858 rcu_read_unlock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000859 vsi->tx_restart = tx_restart;
860 vsi->tx_busy = tx_busy;
Anjali Singhai Jain2fc3d712015-08-27 11:42:29 -0400861 vsi->tx_linearize = tx_linearize;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -0400862 vsi->tx_force_wb = tx_force_wb;
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800863 vsi->tx_lost_interrupt = tx_lost_interrupt;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000864 vsi->rx_page_failed = rx_page;
865 vsi->rx_buf_failed = rx_buf;
866
867 ns->rx_packets = rx_p;
868 ns->rx_bytes = rx_b;
869 ns->tx_packets = tx_p;
870 ns->tx_bytes = tx_b;
871
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000872 /* update netdev stats from eth stats */
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000873 i40e_update_eth_stats(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000874 ons->tx_errors = oes->tx_errors;
875 ns->tx_errors = es->tx_errors;
876 ons->multicast = oes->rx_multicast;
877 ns->multicast = es->rx_multicast;
Shannon Nelson41a9e552014-04-23 04:50:20 +0000878 ons->rx_dropped = oes->rx_discards;
879 ns->rx_dropped = es->rx_discards;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000880 ons->tx_dropped = oes->tx_discards;
881 ns->tx_dropped = es->tx_discards;
882
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000883 /* pull in a couple PF stats if this is the main vsi */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000884 if (vsi == pf->vsi[pf->lan_vsi]) {
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000885 ns->rx_crc_errors = pf->stats.crc_errors;
886 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
887 ns->rx_length_errors = pf->stats.rx_length_errors;
888 }
889}
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000890
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000891/**
Jeff Kirsherb40c82e62015-02-27 09:18:34 +0000892 * i40e_update_pf_stats - Update the PF statistics counters.
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000893 * @pf: the PF to be updated
894 **/
895static void i40e_update_pf_stats(struct i40e_pf *pf)
896{
897 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
898 struct i40e_hw_port_stats *nsd = &pf->stats;
899 struct i40e_hw *hw = &pf->hw;
900 u32 val;
901 int i;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000902
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000903 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
904 I40E_GLPRT_GORCL(hw->port),
905 pf->stat_offsets_loaded,
906 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
907 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
908 I40E_GLPRT_GOTCL(hw->port),
909 pf->stat_offsets_loaded,
910 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
911 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.rx_discards,
914 &nsd->eth.rx_discards);
Shannon Nelson532d2832014-04-23 04:50:09 +0000915 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
916 I40E_GLPRT_UPRCL(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->eth.rx_unicast,
919 &nsd->eth.rx_unicast);
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000920 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
921 I40E_GLPRT_MPRCL(hw->port),
922 pf->stat_offsets_loaded,
923 &osd->eth.rx_multicast,
924 &nsd->eth.rx_multicast);
Shannon Nelson532d2832014-04-23 04:50:09 +0000925 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
926 I40E_GLPRT_BPRCL(hw->port),
927 pf->stat_offsets_loaded,
928 &osd->eth.rx_broadcast,
929 &nsd->eth.rx_broadcast);
930 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
931 I40E_GLPRT_UPTCL(hw->port),
932 pf->stat_offsets_loaded,
933 &osd->eth.tx_unicast,
934 &nsd->eth.tx_unicast);
935 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
936 I40E_GLPRT_MPTCL(hw->port),
937 pf->stat_offsets_loaded,
938 &osd->eth.tx_multicast,
939 &nsd->eth.tx_multicast);
940 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
941 I40E_GLPRT_BPTCL(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->eth.tx_broadcast,
944 &nsd->eth.tx_broadcast);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000945
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000946 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->tx_dropped_link_down,
949 &nsd->tx_dropped_link_down);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000950
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000951 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->crc_errors, &nsd->crc_errors);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000954
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000955 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
956 pf->stat_offsets_loaded,
957 &osd->illegal_bytes, &nsd->illegal_bytes);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000958
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000959 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
960 pf->stat_offsets_loaded,
961 &osd->mac_local_faults,
962 &nsd->mac_local_faults);
963 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
964 pf->stat_offsets_loaded,
965 &osd->mac_remote_faults,
966 &nsd->mac_remote_faults);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000967
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000968 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
969 pf->stat_offsets_loaded,
970 &osd->rx_length_errors,
971 &nsd->rx_length_errors);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000972
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000973 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
974 pf->stat_offsets_loaded,
975 &osd->link_xon_rx, &nsd->link_xon_rx);
976 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
977 pf->stat_offsets_loaded,
978 &osd->link_xon_tx, &nsd->link_xon_tx);
Neerav Parikh95db2392015-11-06 15:26:09 -0800979 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->link_xoff_rx, &nsd->link_xoff_rx);
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000982 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
983 pf->stat_offsets_loaded,
984 &osd->link_xoff_tx, &nsd->link_xoff_tx);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000985
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000986 for (i = 0; i < 8; i++) {
Neerav Parikh95db2392015-11-06 15:26:09 -0800987 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
988 pf->stat_offsets_loaded,
989 &osd->priority_xoff_rx[i],
990 &nsd->priority_xoff_rx[i]);
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000991 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000992 pf->stat_offsets_loaded,
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000993 &osd->priority_xon_rx[i],
994 &nsd->priority_xon_rx[i]);
995 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000996 pf->stat_offsets_loaded,
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000997 &osd->priority_xon_tx[i],
998 &nsd->priority_xon_tx[i]);
999 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001000 pf->stat_offsets_loaded,
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001001 &osd->priority_xoff_tx[i],
1002 &nsd->priority_xoff_tx[i]);
1003 i40e_stat_update32(hw,
1004 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001005 pf->stat_offsets_loaded,
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001006 &osd->priority_xon_2_xoff[i],
1007 &nsd->priority_xon_2_xoff[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001008 }
1009
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001010 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1011 I40E_GLPRT_PRC64L(hw->port),
1012 pf->stat_offsets_loaded,
1013 &osd->rx_size_64, &nsd->rx_size_64);
1014 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1015 I40E_GLPRT_PRC127L(hw->port),
1016 pf->stat_offsets_loaded,
1017 &osd->rx_size_127, &nsd->rx_size_127);
1018 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1019 I40E_GLPRT_PRC255L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->rx_size_255, &nsd->rx_size_255);
1022 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1023 I40E_GLPRT_PRC511L(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->rx_size_511, &nsd->rx_size_511);
1026 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1027 I40E_GLPRT_PRC1023L(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->rx_size_1023, &nsd->rx_size_1023);
1030 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1031 I40E_GLPRT_PRC1522L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->rx_size_1522, &nsd->rx_size_1522);
1034 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1035 I40E_GLPRT_PRC9522L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->rx_size_big, &nsd->rx_size_big);
1038
1039 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1040 I40E_GLPRT_PTC64L(hw->port),
1041 pf->stat_offsets_loaded,
1042 &osd->tx_size_64, &nsd->tx_size_64);
1043 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1044 I40E_GLPRT_PTC127L(hw->port),
1045 pf->stat_offsets_loaded,
1046 &osd->tx_size_127, &nsd->tx_size_127);
1047 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1048 I40E_GLPRT_PTC255L(hw->port),
1049 pf->stat_offsets_loaded,
1050 &osd->tx_size_255, &nsd->tx_size_255);
1051 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1052 I40E_GLPRT_PTC511L(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->tx_size_511, &nsd->tx_size_511);
1055 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1056 I40E_GLPRT_PTC1023L(hw->port),
1057 pf->stat_offsets_loaded,
1058 &osd->tx_size_1023, &nsd->tx_size_1023);
1059 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1060 I40E_GLPRT_PTC1522L(hw->port),
1061 pf->stat_offsets_loaded,
1062 &osd->tx_size_1522, &nsd->tx_size_1522);
1063 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1064 I40E_GLPRT_PTC9522L(hw->port),
1065 pf->stat_offsets_loaded,
1066 &osd->tx_size_big, &nsd->tx_size_big);
1067
1068 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1069 pf->stat_offsets_loaded,
1070 &osd->rx_undersize, &nsd->rx_undersize);
1071 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1072 pf->stat_offsets_loaded,
1073 &osd->rx_fragments, &nsd->rx_fragments);
1074 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1075 pf->stat_offsets_loaded,
1076 &osd->rx_oversize, &nsd->rx_oversize);
1077 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1078 pf->stat_offsets_loaded,
1079 &osd->rx_jabber, &nsd->rx_jabber);
1080
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00001081 /* FDIR stats */
Anjali Singhai Jain0bf4b1b2015-04-16 20:06:02 -04001082 i40e_stat_update32(hw,
1083 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00001084 pf->stat_offsets_loaded,
1085 &osd->fd_atr_match, &nsd->fd_atr_match);
Anjali Singhai Jain0bf4b1b2015-04-16 20:06:02 -04001086 i40e_stat_update32(hw,
1087 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00001088 pf->stat_offsets_loaded,
1089 &osd->fd_sb_match, &nsd->fd_sb_match);
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04001090 i40e_stat_update32(hw,
1091 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1092 pf->stat_offsets_loaded,
1093 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00001094
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001095 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1096 nsd->tx_lpi_status =
1097 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1098 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1099 nsd->rx_lpi_status =
1100 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1101 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1102 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1103 pf->stat_offsets_loaded,
1104 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1105 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1106 pf->stat_offsets_loaded,
1107 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1108
Anjali Singhai Jaind0389e52015-04-22 19:34:05 -04001109 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1110 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1111 nsd->fd_sb_status = true;
1112 else
1113 nsd->fd_sb_status = false;
1114
1115 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1116 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1117 nsd->fd_atr_status = true;
1118 else
1119 nsd->fd_atr_status = false;
1120
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001121 pf->stat_offsets_loaded = true;
1122}
1123
1124/**
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001125 * i40e_update_stats - Update the various statistics counters.
1126 * @vsi: the VSI to be updated
1127 *
1128 * Update the various stats for this VSI and its related entities.
1129 **/
1130void i40e_update_stats(struct i40e_vsi *vsi)
1131{
1132 struct i40e_pf *pf = vsi->back;
1133
1134 if (vsi == pf->vsi[pf->lan_vsi])
1135 i40e_update_pf_stats(pf);
1136
1137 i40e_update_vsi_stats(vsi);
Vasu Dev38e00432014-08-01 13:27:03 -07001138#ifdef I40E_FCOE
1139 i40e_update_fcoe_stats(vsi);
1140#endif
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001141}
1142
1143/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001144 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1145 * @vsi: the VSI to be searched
1146 * @macaddr: the MAC address
1147 * @vlan: the vlan
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001148 *
1149 * Returns ptr to the filter object or NULL
1150 **/
1151static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
Jacob Keller6622f5c2016-10-05 09:30:32 -07001152 const u8 *macaddr, s16 vlan)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001153{
1154 struct i40e_mac_filter *f;
1155
1156 if (!vsi || !macaddr)
1157 return NULL;
1158
1159 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1160 if ((ether_addr_equal(macaddr, f->macaddr)) &&
Jacob Keller1bc87e82016-10-05 09:30:31 -07001161 (vlan == f->vlan))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001162 return f;
1163 }
1164 return NULL;
1165}
1166
1167/**
1168 * i40e_find_mac - Find a mac addr in the macvlan filters list
1169 * @vsi: the VSI to be searched
1170 * @macaddr: the MAC address we are searching for
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001171 *
1172 * Returns the first filter with the provided MAC address or NULL if
1173 * MAC address was not found
1174 **/
Jacob Keller6622f5c2016-10-05 09:30:32 -07001175struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001176{
1177 struct i40e_mac_filter *f;
1178
1179 if (!vsi || !macaddr)
1180 return NULL;
1181
1182 list_for_each_entry(f, &vsi->mac_filter_list, list) {
Jacob Keller1bc87e82016-10-05 09:30:31 -07001183 if ((ether_addr_equal(macaddr, f->macaddr)))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001184 return f;
1185 }
1186 return NULL;
1187}
1188
1189/**
1190 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1191 * @vsi: the VSI to be searched
1192 *
1193 * Returns true if VSI is in vlan mode or false otherwise
1194 **/
1195bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1196{
1197 struct i40e_mac_filter *f;
1198
1199 /* Only -1 for all the filters denotes not in vlan mode
1200 * so we have to go through all the list in order to make sure
1201 */
1202 list_for_each_entry(f, &vsi->mac_filter_list, list) {
Greg Rosed9b68f82015-07-23 16:54:31 -04001203 if (f->vlan >= 0 || vsi->info.pvid)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001204 return true;
1205 }
1206
1207 return false;
1208}
1209
1210/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001211 * i40e_add_filter - Add a mac/vlan filter to the VSI
1212 * @vsi: the VSI to be searched
1213 * @macaddr: the MAC address
1214 * @vlan: the vlan
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001215 *
1216 * Returns ptr to the filter object or NULL when no memory available.
Kiran Patil21659032015-09-30 14:09:03 -04001217 *
1218 * NOTE: This function is expected to be called with mac_filter_list_lock
1219 * being held.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001220 **/
1221struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
Jacob Keller6622f5c2016-10-05 09:30:32 -07001222 const u8 *macaddr, s16 vlan)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001223{
1224 struct i40e_mac_filter *f;
1225
1226 if (!vsi || !macaddr)
1227 return NULL;
1228
Kiran Patilf6bd0962016-06-20 09:10:34 -07001229 /* Do not allow broadcast filter to be added since broadcast filter
1230 * is added as part of add VSI for any newly created VSI except
1231 * FDIR VSI
1232 */
1233 if (is_broadcast_ether_addr(macaddr))
1234 return NULL;
1235
Jacob Keller1bc87e82016-10-05 09:30:31 -07001236 f = i40e_find_filter(vsi, macaddr, vlan);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001237 if (!f) {
1238 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1239 if (!f)
Jacob Keller1bc87e82016-10-05 09:30:31 -07001240 return NULL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001241
Greg Rose9a173902014-05-22 06:32:02 +00001242 ether_addr_copy(f->macaddr, macaddr);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001243 f->vlan = vlan;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001244 /* If we're in overflow promisc mode, set the state directly
1245 * to failed, so we don't bother to try sending the filter
1246 * to the hardware.
1247 */
1248 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
1249 f->state = I40E_FILTER_FAILED;
1250 else
1251 f->state = I40E_FILTER_NEW;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001252 INIT_LIST_HEAD(&f->list);
Kiran Patil04d5a212015-12-09 15:50:23 -08001253 list_add_tail(&f->list, &vsi->mac_filter_list);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001254
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001255 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1256 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1257 }
1258
Jacob Keller1bc87e82016-10-05 09:30:31 -07001259 /* If we're asked to add a filter that has been marked for removal, it
1260 * is safe to simply restore it to active state. __i40e_del_filter
1261 * will have simply deleted any filters which were previously marked
1262 * NEW or FAILED, so if it is currently marked REMOVE it must have
1263 * previously been ACTIVE. Since we haven't yet run the sync filters
1264 * task, just restore this filter to the ACTIVE state so that the
1265 * sync task leaves it in place
1266 */
1267 if (f->state == I40E_FILTER_REMOVE)
1268 f->state = I40E_FILTER_ACTIVE;
1269
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001270 return f;
1271}
1272
1273/**
Jacob Keller290d2552016-10-05 09:30:36 -07001274 * __i40e_del_filter - Remove a specific filter from the VSI
1275 * @vsi: VSI to remove from
1276 * @f: the filter to remove from the list
1277 *
1278 * This function should be called instead of i40e_del_filter only if you know
1279 * the exact filter you will remove already, such as via i40e_find_filter or
1280 * i40e_find_mac.
Kiran Patil21659032015-09-30 14:09:03 -04001281 *
1282 * NOTE: This function is expected to be called with mac_filter_list_lock
1283 * being held.
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001284 * ANOTHER NOTE: This function MUST be called from within the context of
1285 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1286 * instead of list_for_each_entry().
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001287 **/
Jacob Keller290d2552016-10-05 09:30:36 -07001288static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001289{
Jacob Keller1bc87e82016-10-05 09:30:31 -07001290 if (!f)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001291 return;
1292
Jacob Keller1bc87e82016-10-05 09:30:31 -07001293 if ((f->state == I40E_FILTER_FAILED) ||
1294 (f->state == I40E_FILTER_NEW)) {
1295 /* this one never got added by the FW. Just remove it,
1296 * no need to sync anything.
1297 */
1298 list_del(&f->list);
1299 kfree(f);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001300 } else {
Jacob Keller1bc87e82016-10-05 09:30:31 -07001301 f->state = I40E_FILTER_REMOVE;
1302 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1303 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001304 }
1305}
1306
1307/**
Jacob Keller290d2552016-10-05 09:30:36 -07001308 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1309 * @vsi: the VSI to be searched
1310 * @macaddr: the MAC address
1311 * @vlan: the VLAN
1312 *
1313 * NOTE: This function is expected to be called with mac_filter_list_lock
1314 * being held.
1315 * ANOTHER NOTE: This function MUST be called from within the context of
1316 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1317 * instead of list_for_each_entry().
1318 **/
1319void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1320{
1321 struct i40e_mac_filter *f;
1322
1323 if (!vsi || !macaddr)
1324 return;
1325
1326 f = i40e_find_filter(vsi, macaddr, vlan);
1327 __i40e_del_filter(vsi, f);
1328}
1329
1330/**
Jacob Keller35ec2ff2016-10-05 09:30:33 -07001331 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1332 * @vsi: the VSI to be searched
1333 * @macaddr: the mac address to be filtered
1334 *
Jacob Keller5feb3d72016-10-05 09:30:34 -07001335 * Goes through all the macvlan filters and adds a macvlan filter for each
1336 * unique vlan that already exists. If a PVID has been assigned, instead only
1337 * add the macaddr to that VLAN.
Jacob Keller35ec2ff2016-10-05 09:30:33 -07001338 *
Jacob Keller5feb3d72016-10-05 09:30:34 -07001339 * Returns last filter added on success, else NULL
Jacob Keller35ec2ff2016-10-05 09:30:33 -07001340 **/
1341struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
1342 const u8 *macaddr)
1343{
Jacob Keller5feb3d72016-10-05 09:30:34 -07001344 struct i40e_mac_filter *f, *add = NULL;
1345
1346 if (vsi->info.pvid)
1347 return i40e_add_filter(vsi, macaddr,
1348 le16_to_cpu(vsi->info.pvid));
Jacob Keller35ec2ff2016-10-05 09:30:33 -07001349
1350 list_for_each_entry(f, &vsi->mac_filter_list, list) {
Jacob Keller57b341d2016-10-05 09:30:35 -07001351 if (f->state == I40E_FILTER_REMOVE)
1352 continue;
Jacob Keller5feb3d72016-10-05 09:30:34 -07001353 add = i40e_add_filter(vsi, macaddr, f->vlan);
1354 if (!add)
1355 return NULL;
Jacob Keller35ec2ff2016-10-05 09:30:33 -07001356 }
1357
Jacob Keller5feb3d72016-10-05 09:30:34 -07001358 return add;
Jacob Keller35ec2ff2016-10-05 09:30:33 -07001359}
1360
1361/**
1362 * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
1363 * @vsi: the VSI to be searched
1364 * @macaddr: the mac address to be removed
1365 *
1366 * Removes a given MAC address from a VSI, regardless of VLAN
1367 *
1368 * Returns 0 for success, or error
1369 **/
1370int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
1371{
Jacob Keller290d2552016-10-05 09:30:36 -07001372 struct i40e_mac_filter *f, *ftmp;
1373 bool found = false;
Jacob Keller35ec2ff2016-10-05 09:30:33 -07001374
1375 WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
1376 "Missing mac_filter_list_lock\n");
Jacob Keller290d2552016-10-05 09:30:36 -07001377 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1378 if (ether_addr_equal(macaddr, f->macaddr)) {
1379 __i40e_del_filter(vsi, f);
1380 found = true;
1381 }
Jacob Keller35ec2ff2016-10-05 09:30:33 -07001382 }
Jacob Keller290d2552016-10-05 09:30:36 -07001383
1384 if (found)
Jacob Keller35ec2ff2016-10-05 09:30:33 -07001385 return 0;
Jacob Keller290d2552016-10-05 09:30:36 -07001386 else
1387 return -ENOENT;
Jacob Keller35ec2ff2016-10-05 09:30:33 -07001388}
1389
1390/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001391 * i40e_set_mac - NDO callback to set mac address
1392 * @netdev: network interface device structure
1393 * @p: pointer to an address structure
1394 *
1395 * Returns 0 on success, negative on failure
1396 **/
Vasu Dev38e00432014-08-01 13:27:03 -07001397#ifdef I40E_FCOE
1398int i40e_set_mac(struct net_device *netdev, void *p)
1399#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001400static int i40e_set_mac(struct net_device *netdev, void *p)
Vasu Dev38e00432014-08-01 13:27:03 -07001401#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001402{
1403 struct i40e_netdev_priv *np = netdev_priv(netdev);
1404 struct i40e_vsi *vsi = np->vsi;
Shannon Nelson30650cc2014-07-29 04:01:50 +00001405 struct i40e_pf *pf = vsi->back;
1406 struct i40e_hw *hw = &pf->hw;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001407 struct sockaddr *addr = p;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001408
1409 if (!is_valid_ether_addr(addr->sa_data))
1410 return -EADDRNOTAVAIL;
1411
Shannon Nelson30650cc2014-07-29 04:01:50 +00001412 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1413 netdev_info(netdev, "already using mac address %pM\n",
1414 addr->sa_data);
1415 return 0;
1416 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001417
Anjali Singhai Jain80f64282013-11-28 06:39:47 +00001418 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1419 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1420 return -EADDRNOTAVAIL;
1421
Shannon Nelson30650cc2014-07-29 04:01:50 +00001422 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1423 netdev_info(netdev, "returning to hw mac address %pM\n",
1424 hw->mac.addr);
1425 else
1426 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1427
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001428 spin_lock_bh(&vsi->mac_filter_list_lock);
Jacob Keller1bc87e82016-10-05 09:30:31 -07001429 i40e_del_mac_all_vlan(vsi, netdev->dev_addr);
1430 i40e_put_mac_in_vlan(vsi, addr->sa_data);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001431 spin_unlock_bh(&vsi->mac_filter_list_lock);
1432 ether_addr_copy(netdev->dev_addr, addr->sa_data);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001433 if (vsi->type == I40E_VSI_MAIN) {
1434 i40e_status ret;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04001435
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001436 ret = i40e_aq_mac_address_write(&vsi->back->hw,
Shannon Nelsoncc412222014-06-04 01:23:21 +00001437 I40E_AQC_WRITE_TYPE_LAA_WOL,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001438 addr->sa_data, NULL);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001439 if (ret)
1440 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1441 i40e_stat_str(hw, ret),
1442 i40e_aq_str(hw, hw->aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001443 }
1444
Jesse Brandeburgc53934c2016-01-04 10:33:06 -08001445 /* schedule our worker thread which will take care of
1446 * applying the new filter changes
1447 */
1448 i40e_service_event_schedule(vsi->back);
1449 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001450}
1451
1452/**
1453 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1454 * @vsi: the VSI being setup
1455 * @ctxt: VSI context structure
1456 * @enabled_tc: Enabled TCs bitmap
1457 * @is_add: True if called before Add VSI
1458 *
1459 * Setup VSI queue mapping for enabled traffic classes.
1460 **/
Vasu Dev38e00432014-08-01 13:27:03 -07001461#ifdef I40E_FCOE
1462void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1463 struct i40e_vsi_context *ctxt,
1464 u8 enabled_tc,
1465 bool is_add)
1466#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001467static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1468 struct i40e_vsi_context *ctxt,
1469 u8 enabled_tc,
1470 bool is_add)
Vasu Dev38e00432014-08-01 13:27:03 -07001471#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001472{
1473 struct i40e_pf *pf = vsi->back;
1474 u16 sections = 0;
1475 u8 netdev_tc = 0;
1476 u16 numtc = 0;
1477 u16 qcount;
1478 u8 offset;
1479 u16 qmap;
1480 int i;
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001481 u16 num_tc_qps = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001482
1483 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1484 offset = 0;
1485
1486 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1487 /* Find numtc from enabled TC bitmap */
1488 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08001489 if (enabled_tc & BIT(i)) /* TC is enabled */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001490 numtc++;
1491 }
1492 if (!numtc) {
1493 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1494 numtc = 1;
1495 }
1496 } else {
1497 /* At least TC0 is enabled in case of non-DCB case */
1498 numtc = 1;
1499 }
1500
1501 vsi->tc_config.numtc = numtc;
1502 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001503 /* Number of queues per enabled TC */
Catherine Sullivan7d644022016-05-16 10:26:41 -07001504 qcount = vsi->alloc_queue_pairs;
1505
Anjali Singhai7f9ff472015-02-21 06:43:19 +00001506 num_tc_qps = qcount / numtc;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001507 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001508
1509 /* Setup queue offset/count for all TCs for given VSI */
1510 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1511 /* See if the given TC is enabled for the given VSI */
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08001512 if (vsi->tc_config.enabled_tc & BIT(i)) {
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001513 /* TC is enabled */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001514 int pow, num_qps;
1515
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001516 switch (vsi->type) {
1517 case I40E_VSI_MAIN:
Helin Zhangacd65442015-10-26 19:44:28 -04001518 qcount = min_t(int, pf->alloc_rss_size,
1519 num_tc_qps);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001520 break;
Vasu Dev38e00432014-08-01 13:27:03 -07001521#ifdef I40E_FCOE
1522 case I40E_VSI_FCOE:
1523 qcount = num_tc_qps;
1524 break;
1525#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001526 case I40E_VSI_FDIR:
1527 case I40E_VSI_SRIOV:
1528 case I40E_VSI_VMDQ2:
1529 default:
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001530 qcount = num_tc_qps;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001531 WARN_ON(i != 0);
1532 break;
1533 }
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001534 vsi->tc_config.tc_info[i].qoffset = offset;
1535 vsi->tc_config.tc_info[i].qcount = qcount;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001536
Shannon Nelson1e200e42015-02-27 09:15:24 +00001537 /* find the next higher power-of-2 of num queue pairs */
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001538 num_qps = qcount;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001539 pow = 0;
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001540 while (num_qps && (BIT_ULL(pow) < qcount)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001541 pow++;
1542 num_qps >>= 1;
1543 }
1544
1545 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1546 qmap =
1547 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1548 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1549
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001550 offset += qcount;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001551 } else {
1552 /* TC is not enabled so set the offset to
1553 * default queue and allocate one queue
1554 * for the given TC.
1555 */
1556 vsi->tc_config.tc_info[i].qoffset = 0;
1557 vsi->tc_config.tc_info[i].qcount = 1;
1558 vsi->tc_config.tc_info[i].netdev_tc = 0;
1559
1560 qmap = 0;
1561 }
1562 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1563 }
1564
1565 /* Set actual Tx/Rx queue pairs */
1566 vsi->num_queue_pairs = offset;
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +00001567 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1568 if (vsi->req_queue_pairs > 0)
1569 vsi->num_queue_pairs = vsi->req_queue_pairs;
Anjali Singhai Jain26cdc442015-07-10 19:36:00 -04001570 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +00001571 vsi->num_queue_pairs = pf->num_lan_msix;
1572 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001573
1574 /* Scheduler section valid can only be set for ADD VSI */
1575 if (is_add) {
1576 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1577
1578 ctxt->info.up_enable_bits = enabled_tc;
1579 }
1580 if (vsi->type == I40E_VSI_SRIOV) {
1581 ctxt->info.mapping_flags |=
1582 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1583 for (i = 0; i < vsi->num_queue_pairs; i++)
1584 ctxt->info.queue_mapping[i] =
1585 cpu_to_le16(vsi->base_queue + i);
1586 } else {
1587 ctxt->info.mapping_flags |=
1588 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1589 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1590 }
1591 ctxt->info.valid_sections |= cpu_to_le16(sections);
1592}
1593
1594/**
Jacob Keller6622f5c2016-10-05 09:30:32 -07001595 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1596 * @netdev: the netdevice
1597 * @addr: address to add
1598 *
1599 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1600 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1601 */
1602static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1603{
1604 struct i40e_netdev_priv *np = netdev_priv(netdev);
1605 struct i40e_vsi *vsi = np->vsi;
1606 struct i40e_mac_filter *f;
1607
1608 if (i40e_is_vsi_in_vlan(vsi))
1609 f = i40e_put_mac_in_vlan(vsi, addr);
1610 else
1611 f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY);
1612
1613 if (f)
1614 return 0;
1615 else
1616 return -ENOMEM;
1617}
1618
1619/**
1620 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1621 * @netdev: the netdevice
1622 * @addr: address to add
1623 *
1624 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1625 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1626 */
1627static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1628{
1629 struct i40e_netdev_priv *np = netdev_priv(netdev);
1630 struct i40e_vsi *vsi = np->vsi;
1631
1632 if (i40e_is_vsi_in_vlan(vsi))
1633 i40e_del_mac_all_vlan(vsi, addr);
1634 else
1635 i40e_del_filter(vsi, addr, I40E_VLAN_ANY);
1636
1637 return 0;
1638}
1639
1640/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001641 * i40e_set_rx_mode - NDO callback to set the netdev filters
1642 * @netdev: network interface device structure
1643 **/
Vasu Dev38e00432014-08-01 13:27:03 -07001644#ifdef I40E_FCOE
1645void i40e_set_rx_mode(struct net_device *netdev)
1646#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001647static void i40e_set_rx_mode(struct net_device *netdev)
Vasu Dev38e00432014-08-01 13:27:03 -07001648#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001649{
1650 struct i40e_netdev_priv *np = netdev_priv(netdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001651 struct i40e_vsi *vsi = np->vsi;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001652
Kiran Patil21659032015-09-30 14:09:03 -04001653 spin_lock_bh(&vsi->mac_filter_list_lock);
1654
Jacob Keller6622f5c2016-10-05 09:30:32 -07001655 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1656 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001657
Kiran Patil21659032015-09-30 14:09:03 -04001658 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001659
1660 /* check for other flag changes */
1661 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1662 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1663 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1664 }
Jesse Brandeburgc53934c2016-01-04 10:33:06 -08001665
1666 /* schedule our worker thread which will take care of
1667 * applying the new filter changes
1668 */
1669 i40e_service_event_schedule(vsi->back);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001670}
1671
1672/**
Kiran Patil21659032015-09-30 14:09:03 -04001673 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1674 * @vsi: pointer to vsi struct
1675 * @from: Pointer to list which contains MAC filter entries - changes to
1676 * those entries needs to be undone.
1677 *
1678 * MAC filter entries from list were slated to be removed from device.
1679 **/
1680static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1681 struct list_head *from)
1682{
1683 struct i40e_mac_filter *f, *ftmp;
1684
1685 list_for_each_entry_safe(f, ftmp, from, list) {
Kiran Patil21659032015-09-30 14:09:03 -04001686 /* Move the element back into MAC filter list*/
1687 list_move_tail(&f->list, &vsi->mac_filter_list);
1688 }
1689}
1690
1691/**
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001692 * i40e_update_filter_state - Update filter state based on return data
1693 * from firmware
1694 * @count: Number of filters added
1695 * @add_list: return data from fw
1696 * @head: pointer to first filter in current batch
1697 * @aq_err: status from fw
Kiran Patil21659032015-09-30 14:09:03 -04001698 *
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001699 * MAC filter entries from list were slated to be added to device. Returns
1700 * number of successful filters. Note that 0 does NOT mean success!
Kiran Patil21659032015-09-30 14:09:03 -04001701 **/
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001702static int
1703i40e_update_filter_state(int count,
1704 struct i40e_aqc_add_macvlan_element_data *add_list,
1705 struct i40e_mac_filter *add_head, int aq_err)
Kiran Patil21659032015-09-30 14:09:03 -04001706{
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001707 int retval = 0;
1708 int i;
Kiran Patil21659032015-09-30 14:09:03 -04001709
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001710
1711 if (!aq_err) {
1712 retval = count;
1713 /* Everything's good, mark all filters active. */
1714 for (i = 0; i < count ; i++) {
1715 add_head->state = I40E_FILTER_ACTIVE;
1716 add_head = list_next_entry(add_head, list);
1717 }
1718 } else if (aq_err == I40E_AQ_RC_ENOSPC) {
1719 /* Device ran out of filter space. Check the return value
1720 * for each filter to see which ones are active.
1721 */
1722 for (i = 0; i < count ; i++) {
1723 if (add_list[i].match_method ==
1724 I40E_AQC_MM_ERR_NO_RES) {
1725 add_head->state = I40E_FILTER_FAILED;
1726 } else {
1727 add_head->state = I40E_FILTER_ACTIVE;
1728 retval++;
1729 }
1730 add_head = list_next_entry(add_head, list);
1731 }
1732 } else {
1733 /* Some other horrible thing happened, fail all filters */
1734 retval = 0;
1735 for (i = 0; i < count ; i++) {
1736 add_head->state = I40E_FILTER_FAILED;
1737 add_head = list_next_entry(add_head, list);
1738 }
Kiran Patil21659032015-09-30 14:09:03 -04001739 }
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001740 return retval;
Kiran Patil21659032015-09-30 14:09:03 -04001741}
1742
1743/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001744 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1745 * @vsi: ptr to the VSI
1746 *
1747 * Push any outstanding VSI filter changes through the AdminQ.
1748 *
1749 * Returns 0 or error value
1750 **/
Jesse Brandeburg17652c62015-11-05 17:01:02 -08001751int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001752{
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001753 struct i40e_mac_filter *f, *ftmp, *add_head = NULL;
1754 struct list_head tmp_add_list, tmp_del_list;
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001755 struct i40e_hw *hw = &vsi->back->hw;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001756 bool promisc_changed = false;
Shannon Nelson2d1de822016-05-16 10:26:44 -07001757 char vsi_name[16] = "PF";
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001758 int filter_list_len = 0;
1759 u32 changed_flags = 0;
Mitch Williamsea02e902015-11-09 15:35:50 -08001760 i40e_status aq_ret = 0;
Mitch Williamsea02e902015-11-09 15:35:50 -08001761 int retval = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001762 struct i40e_pf *pf;
1763 int num_add = 0;
1764 int num_del = 0;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04001765 int aq_err = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001766 u16 cmd_flags;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001767 int list_size;
1768 int fcnt;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001769
1770 /* empty array typed pointers, kcalloc later */
1771 struct i40e_aqc_add_macvlan_element_data *add_list;
1772 struct i40e_aqc_remove_macvlan_element_data *del_list;
1773
1774 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1775 usleep_range(1000, 2000);
1776 pf = vsi->back;
1777
1778 if (vsi->netdev) {
1779 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1780 vsi->current_netdev_flags = vsi->netdev->flags;
1781 }
1782
Kiran Patil21659032015-09-30 14:09:03 -04001783 INIT_LIST_HEAD(&tmp_add_list);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001784 INIT_LIST_HEAD(&tmp_del_list);
Kiran Patil21659032015-09-30 14:09:03 -04001785
Shannon Nelson2d1de822016-05-16 10:26:44 -07001786 if (vsi->type == I40E_VSI_SRIOV)
1787 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
1788 else if (vsi->type != I40E_VSI_MAIN)
1789 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
1790
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001791 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1792 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1793
Kiran Patil21659032015-09-30 14:09:03 -04001794 spin_lock_bh(&vsi->mac_filter_list_lock);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001795 /* Create a list of filters to delete. */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001796 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001797 if (f->state == I40E_FILTER_REMOVE) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001798 /* Move the element into temporary del_list */
1799 list_move_tail(&f->list, &tmp_del_list);
1800 vsi->active_filters--;
Kiran Patil21659032015-09-30 14:09:03 -04001801 }
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001802 if (f->state == I40E_FILTER_NEW) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001803 /* Move the element into temporary add_list */
1804 list_move_tail(&f->list, &tmp_add_list);
1805 }
Kiran Patil21659032015-09-30 14:09:03 -04001806 }
1807 spin_unlock_bh(&vsi->mac_filter_list_lock);
Kiran Patil21659032015-09-30 14:09:03 -04001808 }
1809
1810 /* Now process 'del_list' outside the lock */
1811 if (!list_empty(&tmp_del_list)) {
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001812 filter_list_len = hw->aq.asq_buf_size /
Kiran Patil21659032015-09-30 14:09:03 -04001813 sizeof(struct i40e_aqc_remove_macvlan_element_data);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001814 list_size = filter_list_len *
Shannon Nelsonf1199992015-11-19 11:34:23 -08001815 sizeof(struct i40e_aqc_remove_macvlan_element_data);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001816 del_list = kzalloc(list_size, GFP_ATOMIC);
Kiran Patil21659032015-09-30 14:09:03 -04001817 if (!del_list) {
Kiran Patil21659032015-09-30 14:09:03 -04001818 /* Undo VSI's MAC filter entry element updates */
1819 spin_lock_bh(&vsi->mac_filter_list_lock);
1820 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
Kiran Patil21659032015-09-30 14:09:03 -04001821 spin_unlock_bh(&vsi->mac_filter_list_lock);
Mitch Williamsea02e902015-11-09 15:35:50 -08001822 retval = -ENOMEM;
1823 goto out;
Kiran Patil21659032015-09-30 14:09:03 -04001824 }
1825
1826 list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001827 cmd_flags = 0;
1828
1829 /* add to delete list */
Greg Rose9a173902014-05-22 06:32:02 +00001830 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001831 if (f->vlan == I40E_VLAN_ANY) {
1832 del_list[num_del].vlan_tag = 0;
Alan Bradya6cb9142016-09-06 18:05:07 -07001833 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001834 } else {
1835 del_list[num_del].vlan_tag =
1836 cpu_to_le16((u16)(f->vlan));
1837 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001838
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001839 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1840 del_list[num_del].flags = cmd_flags;
1841 num_del++;
1842
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001843 /* flush a full buffer */
1844 if (num_del == filter_list_len) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001845 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid,
1846 del_list,
1847 num_del, NULL);
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001848 aq_err = hw->aq.asq_last_status;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001849 num_del = 0;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001850 memset(del_list, 0, list_size);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001851
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001852 /* Explicitly ignore and do not report when
1853 * firmware returns ENOENT.
1854 */
1855 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
Mitch Williamsea02e902015-11-09 15:35:50 -08001856 retval = -EIO;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001857 dev_info(&pf->pdev->dev,
1858 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
Shannon Nelson2d1de822016-05-16 10:26:44 -07001859 vsi_name,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001860 i40e_stat_str(hw, aq_ret),
1861 i40e_aq_str(hw, aq_err));
Mitch Williamsea02e902015-11-09 15:35:50 -08001862 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001863 }
Kiran Patil21659032015-09-30 14:09:03 -04001864 /* Release memory for MAC filter entries which were
1865 * synced up with HW.
1866 */
1867 list_del(&f->list);
1868 kfree(f);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001869 }
Kiran Patil21659032015-09-30 14:09:03 -04001870
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001871 if (num_del) {
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001872 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
1873 num_del, NULL);
1874 aq_err = hw->aq.asq_last_status;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001875 num_del = 0;
1876
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001877 /* Explicitly ignore and do not report when firmware
1878 * returns ENOENT.
1879 */
1880 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1881 retval = -EIO;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001882 dev_info(&pf->pdev->dev,
Shannon Nelson2d1de822016-05-16 10:26:44 -07001883 "ignoring delete macvlan error on %s, err %s aq_err %s\n",
1884 vsi_name,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001885 i40e_stat_str(hw, aq_ret),
1886 i40e_aq_str(hw, aq_err));
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001887 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001888 }
1889
1890 kfree(del_list);
1891 del_list = NULL;
Kiran Patil21659032015-09-30 14:09:03 -04001892 }
1893
1894 if (!list_empty(&tmp_add_list)) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001895 /* Do all the adds now. */
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001896 filter_list_len = hw->aq.asq_buf_size /
Shannon Nelsonf1199992015-11-19 11:34:23 -08001897 sizeof(struct i40e_aqc_add_macvlan_element_data);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001898 list_size = filter_list_len *
1899 sizeof(struct i40e_aqc_add_macvlan_element_data);
1900 add_list = kzalloc(list_size, GFP_ATOMIC);
Kiran Patil21659032015-09-30 14:09:03 -04001901 if (!add_list) {
Mitch Williamsea02e902015-11-09 15:35:50 -08001902 retval = -ENOMEM;
1903 goto out;
Kiran Patil21659032015-09-30 14:09:03 -04001904 }
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001905 num_add = 0;
1906 list_for_each_entry(f, &tmp_add_list, list) {
1907 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1908 &vsi->state)) {
1909 f->state = I40E_FILTER_FAILED;
1910 continue;
1911 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001912 /* add to add array */
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001913 if (num_add == 0)
1914 add_head = f;
1915 cmd_flags = 0;
Greg Rose9a173902014-05-22 06:32:02 +00001916 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001917 if (f->vlan == I40E_VLAN_ANY) {
1918 add_list[num_add].vlan_tag = 0;
1919 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1920 } else {
1921 add_list[num_add].vlan_tag =
1922 cpu_to_le16((u16)(f->vlan));
1923 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001924 add_list[num_add].queue_number = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001925 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001926 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1927 num_add++;
1928
1929 /* flush a full buffer */
1930 if (num_add == filter_list_len) {
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001931 aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
Mitch Williamsea02e902015-11-09 15:35:50 -08001932 add_list, num_add,
1933 NULL);
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001934 aq_err = hw->aq.asq_last_status;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001935 fcnt = i40e_update_filter_state(num_add,
1936 add_list,
1937 add_head,
1938 aq_ret);
1939 vsi->active_filters += fcnt;
1940
1941 if (fcnt != num_add) {
1942 promisc_changed = true;
1943 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1944 &vsi->state);
1945 vsi->promisc_threshold =
1946 (vsi->active_filters * 3) / 4;
1947 dev_warn(&pf->pdev->dev,
1948 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1949 i40e_aq_str(hw, aq_err),
1950 vsi_name);
1951 }
1952 memset(add_list, 0, list_size);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001953 num_add = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001954 }
1955 }
1956 if (num_add) {
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001957 aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
Mitch Williamsea02e902015-11-09 15:35:50 -08001958 add_list, num_add, NULL);
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001959 aq_err = hw->aq.asq_last_status;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001960 fcnt = i40e_update_filter_state(num_add, add_list,
1961 add_head, aq_ret);
1962 vsi->active_filters += fcnt;
1963 if (fcnt != num_add) {
1964 promisc_changed = true;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001965 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1966 &vsi->state);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001967 vsi->promisc_threshold =
1968 (vsi->active_filters * 3) / 4;
1969 dev_warn(&pf->pdev->dev,
1970 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1971 i40e_aq_str(hw, aq_err), vsi_name);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001972 }
1973 }
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001974 /* Now move all of the filters from the temp add list back to
1975 * the VSI's list.
1976 */
1977 spin_lock_bh(&vsi->mac_filter_list_lock);
1978 list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
1979 list_move_tail(&f->list, &vsi->mac_filter_list);
1980 }
1981 spin_unlock_bh(&vsi->mac_filter_list_lock);
1982 kfree(add_list);
1983 add_list = NULL;
1984 }
1985
1986 /* Check to see if we can drop out of overflow promiscuous mode. */
1987 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
1988 (vsi->active_filters < vsi->promisc_threshold)) {
1989 int failed_count = 0;
1990 /* See if we have any failed filters. We can't drop out of
1991 * promiscuous until these have all been deleted.
1992 */
1993 spin_lock_bh(&vsi->mac_filter_list_lock);
1994 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1995 if (f->state == I40E_FILTER_FAILED)
1996 failed_count++;
1997 }
1998 spin_unlock_bh(&vsi->mac_filter_list_lock);
1999 if (!failed_count) {
2000 dev_info(&pf->pdev->dev,
2001 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2002 vsi_name);
2003 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
2004 promisc_changed = true;
2005 vsi->promisc_threshold = 0;
2006 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002007 }
2008
Anjali Singhai Jaina856b5c2016-04-13 03:08:23 -07002009 /* if the VF is not trusted do not do promisc */
2010 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2011 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
2012 goto out;
2013 }
2014
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002015 /* check for changes in promiscuous modes */
2016 if (changed_flags & IFF_ALLMULTI) {
2017 bool cur_multipromisc;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002018
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002019 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
Mitch Williamsea02e902015-11-09 15:35:50 -08002020 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2021 vsi->seid,
2022 cur_multipromisc,
2023 NULL);
2024 if (aq_ret) {
2025 retval = i40e_aq_rc_to_posix(aq_ret,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002026 hw->aq.asq_last_status);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002027 dev_info(&pf->pdev->dev,
Shannon Nelson2d1de822016-05-16 10:26:44 -07002028 "set multi promisc failed on %s, err %s aq_err %s\n",
2029 vsi_name,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002030 i40e_stat_str(hw, aq_ret),
2031 i40e_aq_str(hw, hw->aq.asq_last_status));
Mitch Williamsea02e902015-11-09 15:35:50 -08002032 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002033 }
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002034 if ((changed_flags & IFF_PROMISC) ||
2035 (promisc_changed &&
2036 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002037 bool cur_promisc;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002038
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002039 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2040 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2041 &vsi->state));
Anjali Singhai Jain6784ed52016-01-15 14:33:13 -08002042 if ((vsi->type == I40E_VSI_MAIN) &&
2043 (pf->lan_veb != I40E_NO_VEB) &&
2044 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002045 /* set defport ON for Main VSI instead of true promisc
2046 * this way we will get all unicast/multicast and VLAN
2047 * promisc behavior but will not get VF or VMDq traffic
2048 * replicated on the Main VSI.
2049 */
2050 if (pf->cur_promisc != cur_promisc) {
2051 pf->cur_promisc = cur_promisc;
Mitch Williams5bc16032016-05-16 10:26:43 -07002052 if (cur_promisc)
2053 aq_ret =
2054 i40e_aq_set_default_vsi(hw,
2055 vsi->seid,
2056 NULL);
2057 else
2058 aq_ret =
2059 i40e_aq_clear_default_vsi(hw,
2060 vsi->seid,
2061 NULL);
2062 if (aq_ret) {
2063 retval = i40e_aq_rc_to_posix(aq_ret,
2064 hw->aq.asq_last_status);
2065 dev_info(&pf->pdev->dev,
Shannon Nelson2d1de822016-05-16 10:26:44 -07002066 "Set default VSI failed on %s, err %s, aq_err %s\n",
2067 vsi_name,
Mitch Williams5bc16032016-05-16 10:26:43 -07002068 i40e_stat_str(hw, aq_ret),
2069 i40e_aq_str(hw,
2070 hw->aq.asq_last_status));
2071 }
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002072 }
2073 } else {
Mitch Williamsea02e902015-11-09 15:35:50 -08002074 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002075 hw,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002076 vsi->seid,
Anjali Singhai Jainb5569892016-05-03 15:13:12 -07002077 cur_promisc, NULL,
2078 true);
Mitch Williamsea02e902015-11-09 15:35:50 -08002079 if (aq_ret) {
2080 retval =
2081 i40e_aq_rc_to_posix(aq_ret,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002082 hw->aq.asq_last_status);
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002083 dev_info(&pf->pdev->dev,
Shannon Nelson2d1de822016-05-16 10:26:44 -07002084 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2085 vsi_name,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002086 i40e_stat_str(hw, aq_ret),
2087 i40e_aq_str(hw,
2088 hw->aq.asq_last_status));
Mitch Williamsea02e902015-11-09 15:35:50 -08002089 }
2090 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002091 hw,
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002092 vsi->seid,
2093 cur_promisc, NULL);
Mitch Williamsea02e902015-11-09 15:35:50 -08002094 if (aq_ret) {
2095 retval =
2096 i40e_aq_rc_to_posix(aq_ret,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002097 hw->aq.asq_last_status);
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002098 dev_info(&pf->pdev->dev,
Shannon Nelson2d1de822016-05-16 10:26:44 -07002099 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2100 vsi_name,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002101 i40e_stat_str(hw, aq_ret),
2102 i40e_aq_str(hw,
2103 hw->aq.asq_last_status));
Mitch Williamsea02e902015-11-09 15:35:50 -08002104 }
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002105 }
Mitch Williamsea02e902015-11-09 15:35:50 -08002106 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2107 vsi->seid,
2108 cur_promisc, NULL);
2109 if (aq_ret) {
2110 retval = i40e_aq_rc_to_posix(aq_ret,
2111 pf->hw.aq.asq_last_status);
Greg Rose1a103702013-11-28 06:42:39 +00002112 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002113 "set brdcast promisc failed, err %s, aq_err %s\n",
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002114 i40e_stat_str(hw, aq_ret),
2115 i40e_aq_str(hw,
2116 hw->aq.asq_last_status));
Mitch Williamsea02e902015-11-09 15:35:50 -08002117 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002118 }
Mitch Williamsea02e902015-11-09 15:35:50 -08002119out:
Jesse Brandeburg2818ccd2016-01-13 16:51:38 -08002120 /* if something went wrong then set the changed flag so we try again */
2121 if (retval)
2122 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2123
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002124 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
Mitch Williamsea02e902015-11-09 15:35:50 -08002125 return retval;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002126}
2127
2128/**
2129 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2130 * @pf: board private structure
2131 **/
2132static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2133{
2134 int v;
2135
2136 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2137 return;
2138 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2139
Mitch Williams505682c2014-05-20 08:01:37 +00002140 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002141 if (pf->vsi[v] &&
Jesse Brandeburg17652c62015-11-05 17:01:02 -08002142 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2143 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2144
2145 if (ret) {
2146 /* come back and try again later */
2147 pf->flags |= I40E_FLAG_FILTER_SYNC;
2148 break;
2149 }
2150 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002151 }
2152}
2153
2154/**
2155 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2156 * @netdev: network interface device structure
2157 * @new_mtu: new value for maximum frame size
2158 *
2159 * Returns 0 on success, negative on failure
2160 **/
2161static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2162{
2163 struct i40e_netdev_priv *np = netdev_priv(netdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002164 struct i40e_vsi *vsi = np->vsi;
2165
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002166 netdev_info(netdev, "changing MTU from %d to %d\n",
2167 netdev->mtu, new_mtu);
2168 netdev->mtu = new_mtu;
2169 if (netif_running(netdev))
2170 i40e_vsi_reinit_locked(vsi);
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06002171 i40e_notify_client_of_l2_param_changes(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002172 return 0;
2173}
2174
2175/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002176 * i40e_ioctl - Access the hwtstamp interface
2177 * @netdev: network interface device structure
2178 * @ifr: interface request data
2179 * @cmd: ioctl command
2180 **/
2181int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2182{
2183 struct i40e_netdev_priv *np = netdev_priv(netdev);
2184 struct i40e_pf *pf = np->vsi->back;
2185
2186 switch (cmd) {
2187 case SIOCGHWTSTAMP:
2188 return i40e_ptp_get_ts_config(pf, ifr);
2189 case SIOCSHWTSTAMP:
2190 return i40e_ptp_set_ts_config(pf, ifr);
2191 default:
2192 return -EOPNOTSUPP;
2193 }
2194}
2195
2196/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002197 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2198 * @vsi: the vsi being adjusted
2199 **/
2200void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2201{
2202 struct i40e_vsi_context ctxt;
2203 i40e_status ret;
2204
2205 if ((vsi->info.valid_sections &
2206 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2207 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2208 return; /* already enabled */
2209
2210 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2211 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2212 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2213
2214 ctxt.seid = vsi->seid;
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07002215 ctxt.info = vsi->info;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002216 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2217 if (ret) {
2218 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002219 "update vlan stripping failed, err %s aq_err %s\n",
2220 i40e_stat_str(&vsi->back->hw, ret),
2221 i40e_aq_str(&vsi->back->hw,
2222 vsi->back->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002223 }
2224}
2225
2226/**
2227 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2228 * @vsi: the vsi being adjusted
2229 **/
2230void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2231{
2232 struct i40e_vsi_context ctxt;
2233 i40e_status ret;
2234
2235 if ((vsi->info.valid_sections &
2236 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2237 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2238 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2239 return; /* already disabled */
2240
2241 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2242 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2243 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2244
2245 ctxt.seid = vsi->seid;
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07002246 ctxt.info = vsi->info;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002247 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2248 if (ret) {
2249 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002250 "update vlan stripping failed, err %s aq_err %s\n",
2251 i40e_stat_str(&vsi->back->hw, ret),
2252 i40e_aq_str(&vsi->back->hw,
2253 vsi->back->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002254 }
2255}
2256
2257/**
2258 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2259 * @netdev: network interface to be adjusted
2260 * @features: netdev features to test if VLAN offload is enabled or not
2261 **/
2262static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2263{
2264 struct i40e_netdev_priv *np = netdev_priv(netdev);
2265 struct i40e_vsi *vsi = np->vsi;
2266
2267 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2268 i40e_vlan_stripping_enable(vsi);
2269 else
2270 i40e_vlan_stripping_disable(vsi);
2271}
2272
2273/**
2274 * i40e_vsi_add_vlan - Add vsi membership for given vlan
2275 * @vsi: the vsi being configured
2276 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2277 **/
2278int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2279{
Jacob Keller290d2552016-10-05 09:30:36 -07002280 struct i40e_mac_filter *f, *ftmp, *add_f, *del_f;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002281
Kiran Patil21659032015-09-30 14:09:03 -04002282 /* Locked once because all functions invoked below iterates list*/
2283 spin_lock_bh(&vsi->mac_filter_list_lock);
2284
Jacob Keller1bc87e82016-10-05 09:30:31 -07002285 if (vsi->netdev) {
2286 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002287 if (!add_f) {
2288 dev_info(&vsi->back->pdev->dev,
2289 "Could not add vlan filter %d for %pM\n",
2290 vid, vsi->netdev->dev_addr);
Kiran Patil21659032015-09-30 14:09:03 -04002291 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002292 return -ENOMEM;
2293 }
2294 }
2295
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002296 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
Jacob Keller57b341d2016-10-05 09:30:35 -07002297 if (f->state == I40E_FILTER_REMOVE)
2298 continue;
Jacob Keller1bc87e82016-10-05 09:30:31 -07002299 add_f = i40e_add_filter(vsi, f->macaddr, vid);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002300 if (!add_f) {
2301 dev_info(&vsi->back->pdev->dev,
2302 "Could not add vlan filter %d for %pM\n",
2303 vid, f->macaddr);
Kiran Patil21659032015-09-30 14:09:03 -04002304 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002305 return -ENOMEM;
2306 }
2307 }
2308
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002309 /* Now if we add a vlan tag, make sure to check if it is the first
2310 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2311 * with 0, so we now accept untagged and specified tagged traffic
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002312 * (and not all tags along with untagged)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002313 */
Jacob Keller290d2552016-10-05 09:30:36 -07002314 if (vid > 0 && vsi->netdev) {
2315 del_f = i40e_find_filter(vsi, vsi->netdev->dev_addr,
2316 I40E_VLAN_ANY);
2317 if (del_f) {
2318 __i40e_del_filter(vsi, del_f);
Jacob Keller1bc87e82016-10-05 09:30:31 -07002319 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002320 if (!add_f) {
2321 dev_info(&vsi->back->pdev->dev,
2322 "Could not add filter 0 for %pM\n",
2323 vsi->netdev->dev_addr);
Kiran Patil21659032015-09-30 14:09:03 -04002324 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002325 return -ENOMEM;
2326 }
2327 }
Greg Rose8d82a7c2014-01-13 16:13:04 -08002328 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002329
Greg Rose8d82a7c2014-01-13 16:13:04 -08002330 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2331 if (vid > 0 && !vsi->info.pvid) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002332 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
Jacob Keller57b341d2016-10-05 09:30:35 -07002333 if (f->state == I40E_FILTER_REMOVE)
2334 continue;
Jacob Keller290d2552016-10-05 09:30:36 -07002335 del_f = i40e_find_filter(vsi, f->macaddr,
2336 I40E_VLAN_ANY);
2337 if (!del_f)
Kiran Patil21659032015-09-30 14:09:03 -04002338 continue;
Jacob Keller290d2552016-10-05 09:30:36 -07002339 __i40e_del_filter(vsi, del_f);
Jacob Keller1bc87e82016-10-05 09:30:31 -07002340 add_f = i40e_add_filter(vsi, f->macaddr, 0);
Kiran Patil21659032015-09-30 14:09:03 -04002341 if (!add_f) {
2342 dev_info(&vsi->back->pdev->dev,
2343 "Could not add filter 0 for %pM\n",
2344 f->macaddr);
2345 spin_unlock_bh(&vsi->mac_filter_list_lock);
2346 return -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002347 }
2348 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002349 }
2350
Kiran Patil21659032015-09-30 14:09:03 -04002351 spin_unlock_bh(&vsi->mac_filter_list_lock);
2352
Jesse Brandeburg0e4425e2015-11-05 17:01:01 -08002353 /* schedule our worker thread which will take care of
2354 * applying the new filter changes
2355 */
2356 i40e_service_event_schedule(vsi->back);
2357 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002358}
2359
2360/**
2361 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2362 * @vsi: the vsi being configured
2363 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002364 *
2365 * Return: 0 on success or negative otherwise
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002366 **/
2367int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2368{
2369 struct net_device *netdev = vsi->netdev;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002370 struct i40e_mac_filter *f, *ftmp, *add_f;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002371 int filter_count = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002372
Kiran Patil21659032015-09-30 14:09:03 -04002373 /* Locked once because all functions invoked below iterates list */
2374 spin_lock_bh(&vsi->mac_filter_list_lock);
2375
Jacob Keller1bc87e82016-10-05 09:30:31 -07002376 if (vsi->netdev)
2377 i40e_del_filter(vsi, netdev->dev_addr, vid);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002378
Jacob Keller290d2552016-10-05 09:30:36 -07002379 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2380 if (f->vlan == vid)
2381 __i40e_del_filter(vsi, f);
2382 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002383
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002384 /* go through all the filters for this VSI and if there is only
2385 * vid == 0 it means there are no other filters, so vid 0 must
2386 * be replaced with -1. This signifies that we should from now
2387 * on accept any traffic (with any tag present, or untagged)
2388 */
2389 list_for_each_entry(f, &vsi->mac_filter_list, list) {
Jacob Keller1bc87e82016-10-05 09:30:31 -07002390 if (vsi->netdev) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002391 if (f->vlan &&
2392 ether_addr_equal(netdev->dev_addr, f->macaddr))
2393 filter_count++;
2394 }
2395
2396 if (f->vlan)
2397 filter_count++;
2398 }
2399
Jacob Keller1bc87e82016-10-05 09:30:31 -07002400 if (!filter_count && vsi->netdev) {
2401 i40e_del_filter(vsi, netdev->dev_addr, 0);
2402 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002403 if (!f) {
2404 dev_info(&vsi->back->pdev->dev,
2405 "Could not add filter %d for %pM\n",
2406 I40E_VLAN_ANY, netdev->dev_addr);
Kiran Patil21659032015-09-30 14:09:03 -04002407 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002408 return -ENOMEM;
2409 }
2410 }
2411
2412 if (!filter_count) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002413 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
Jacob Keller290d2552016-10-05 09:30:36 -07002414 if (!f->vlan)
2415 __i40e_del_filter(vsi, f);
Jacob Keller1bc87e82016-10-05 09:30:31 -07002416 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002417 if (!add_f) {
2418 dev_info(&vsi->back->pdev->dev,
2419 "Could not add filter %d for %pM\n",
2420 I40E_VLAN_ANY, f->macaddr);
Kiran Patil21659032015-09-30 14:09:03 -04002421 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002422 return -ENOMEM;
2423 }
2424 }
2425 }
2426
Kiran Patil21659032015-09-30 14:09:03 -04002427 spin_unlock_bh(&vsi->mac_filter_list_lock);
2428
Jesse Brandeburg0e4425e2015-11-05 17:01:01 -08002429 /* schedule our worker thread which will take care of
2430 * applying the new filter changes
2431 */
2432 i40e_service_event_schedule(vsi->back);
2433 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002434}
2435
2436/**
2437 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2438 * @netdev: network interface to be adjusted
2439 * @vid: vlan id to be added
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002440 *
2441 * net_device_ops implementation for adding vlan ids
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002442 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002443#ifdef I40E_FCOE
2444int i40e_vlan_rx_add_vid(struct net_device *netdev,
2445 __always_unused __be16 proto, u16 vid)
2446#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002447static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2448 __always_unused __be16 proto, u16 vid)
Vasu Dev38e00432014-08-01 13:27:03 -07002449#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002450{
2451 struct i40e_netdev_priv *np = netdev_priv(netdev);
2452 struct i40e_vsi *vsi = np->vsi;
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002453 int ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002454
2455 if (vid > 4095)
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002456 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002457
Anjali Singhai Jain6982d422014-02-06 05:51:10 +00002458 /* If the network stack called us with vid = 0 then
2459 * it is asking to receive priority tagged packets with
2460 * vlan id 0. Our HW receives them by default when configured
2461 * to receive untagged packets so there is no need to add an
2462 * extra filter for vlan 0 tagged packets.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002463 */
Anjali Singhai Jain6982d422014-02-06 05:51:10 +00002464 if (vid)
2465 ret = i40e_vsi_add_vlan(vsi, vid);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002466
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002467 if (!ret && (vid < VLAN_N_VID))
2468 set_bit(vid, vsi->active_vlans);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002469
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002470 return ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002471}
2472
2473/**
2474 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2475 * @netdev: network interface to be adjusted
2476 * @vid: vlan id to be removed
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002477 *
Akeem G Abodunrinfdfd9432014-02-11 08:24:15 +00002478 * net_device_ops implementation for removing vlan ids
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002479 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002480#ifdef I40E_FCOE
2481int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2482 __always_unused __be16 proto, u16 vid)
2483#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002484static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2485 __always_unused __be16 proto, u16 vid)
Vasu Dev38e00432014-08-01 13:27:03 -07002486#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002487{
2488 struct i40e_netdev_priv *np = netdev_priv(netdev);
2489 struct i40e_vsi *vsi = np->vsi;
2490
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002491 /* return code is ignored as there is nothing a user
2492 * can do about failure to remove and a log message was
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002493 * already printed from the other function
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002494 */
2495 i40e_vsi_kill_vlan(vsi, vid);
2496
2497 clear_bit(vid, vsi->active_vlans);
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002498
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002499 return 0;
2500}
2501
2502/**
Tushar Daveb1b15df2016-07-01 10:11:20 -07002503 * i40e_macaddr_init - explicitly write the mac address filters
2504 *
2505 * @vsi: pointer to the vsi
2506 * @macaddr: the MAC address
2507 *
2508 * This is needed when the macaddr has been obtained by other
2509 * means than the default, e.g., from Open Firmware or IDPROM.
2510 * Returns 0 on success, negative on failure
2511 **/
2512static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
2513{
2514 int ret;
2515 struct i40e_aqc_add_macvlan_element_data element;
2516
2517 ret = i40e_aq_mac_address_write(&vsi->back->hw,
2518 I40E_AQC_WRITE_TYPE_LAA_WOL,
2519 macaddr, NULL);
2520 if (ret) {
2521 dev_info(&vsi->back->pdev->dev,
2522 "Addr change for VSI failed: %d\n", ret);
2523 return -EADDRNOTAVAIL;
2524 }
2525
2526 memset(&element, 0, sizeof(element));
2527 ether_addr_copy(element.mac_addr, macaddr);
2528 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
2529 ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
2530 if (ret) {
2531 dev_info(&vsi->back->pdev->dev,
2532 "add filter failed err %s aq_err %s\n",
2533 i40e_stat_str(&vsi->back->hw, ret),
2534 i40e_aq_str(&vsi->back->hw,
2535 vsi->back->hw.aq.asq_last_status));
2536 }
2537 return ret;
2538}
2539
2540/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002541 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2542 * @vsi: the vsi being brought back up
2543 **/
2544static void i40e_restore_vlan(struct i40e_vsi *vsi)
2545{
2546 u16 vid;
2547
2548 if (!vsi->netdev)
2549 return;
2550
2551 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2552
2553 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2554 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2555 vid);
2556}
2557
2558/**
2559 * i40e_vsi_add_pvid - Add pvid for the VSI
2560 * @vsi: the vsi being adjusted
2561 * @vid: the vlan id to set as a PVID
2562 **/
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00002563int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002564{
2565 struct i40e_vsi_context ctxt;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002566 i40e_status ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002567
2568 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2569 vsi->info.pvid = cpu_to_le16(vid);
Greg Rose6c12fcb2013-11-28 06:39:34 +00002570 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2571 I40E_AQ_VSI_PVLAN_INSERT_PVID |
Greg Roseb774c7d2013-11-28 06:39:44 +00002572 I40E_AQ_VSI_PVLAN_EMOD_STR;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002573
2574 ctxt.seid = vsi->seid;
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07002575 ctxt.info = vsi->info;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002576 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2577 if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002578 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002579 "add pvid failed, err %s aq_err %s\n",
2580 i40e_stat_str(&vsi->back->hw, ret),
2581 i40e_aq_str(&vsi->back->hw,
2582 vsi->back->hw.aq.asq_last_status));
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00002583 return -ENOENT;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002584 }
2585
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00002586 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002587}
2588
2589/**
2590 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2591 * @vsi: the vsi being adjusted
2592 *
2593 * Just use the vlan_rx_register() service to put it back to normal
2594 **/
2595void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2596{
Greg Rose6c12fcb2013-11-28 06:39:34 +00002597 i40e_vlan_stripping_disable(vsi);
2598
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002599 vsi->info.pvid = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002600}
2601
2602/**
2603 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2604 * @vsi: ptr to the VSI
2605 *
2606 * If this function returns with an error, then it's possible one or
2607 * more of the rings is populated (while the rest are not). It is the
2608 * callers duty to clean those orphaned rings.
2609 *
2610 * Return 0 on success, negative on failure
2611 **/
2612static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2613{
2614 int i, err = 0;
2615
2616 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
Alexander Duyck9f65e152013-09-28 06:00:58 +00002617 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002618
2619 return err;
2620}
2621
2622/**
2623 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2624 * @vsi: ptr to the VSI
2625 *
2626 * Free VSI's transmit software resources
2627 **/
2628static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2629{
2630 int i;
2631
Greg Rose8e9dca52013-12-18 13:45:53 +00002632 if (!vsi->tx_rings)
2633 return;
2634
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002635 for (i = 0; i < vsi->num_queue_pairs; i++)
Greg Rose8e9dca52013-12-18 13:45:53 +00002636 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
Alexander Duyck9f65e152013-09-28 06:00:58 +00002637 i40e_free_tx_resources(vsi->tx_rings[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002638}
2639
2640/**
2641 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2642 * @vsi: ptr to the VSI
2643 *
2644 * If this function returns with an error, then it's possible one or
2645 * more of the rings is populated (while the rest are not). It is the
2646 * callers duty to clean those orphaned rings.
2647 *
2648 * Return 0 on success, negative on failure
2649 **/
2650static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2651{
2652 int i, err = 0;
2653
2654 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
Alexander Duyck9f65e152013-09-28 06:00:58 +00002655 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
Vasu Dev38e00432014-08-01 13:27:03 -07002656#ifdef I40E_FCOE
2657 i40e_fcoe_setup_ddp_resources(vsi);
2658#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002659 return err;
2660}
2661
2662/**
2663 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2664 * @vsi: ptr to the VSI
2665 *
2666 * Free all receive software resources
2667 **/
2668static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2669{
2670 int i;
2671
Greg Rose8e9dca52013-12-18 13:45:53 +00002672 if (!vsi->rx_rings)
2673 return;
2674
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002675 for (i = 0; i < vsi->num_queue_pairs; i++)
Greg Rose8e9dca52013-12-18 13:45:53 +00002676 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
Alexander Duyck9f65e152013-09-28 06:00:58 +00002677 i40e_free_rx_resources(vsi->rx_rings[i]);
Vasu Dev38e00432014-08-01 13:27:03 -07002678#ifdef I40E_FCOE
2679 i40e_fcoe_free_ddp_resources(vsi);
2680#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002681}
2682
2683/**
Neerav Parikh3ffa0372014-11-12 00:19:02 +00002684 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2685 * @ring: The Tx ring to configure
2686 *
2687 * This enables/disables XPS for a given Tx descriptor ring
2688 * based on the TCs enabled for the VSI that ring belongs to.
2689 **/
2690static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2691{
2692 struct i40e_vsi *vsi = ring->vsi;
2693 cpumask_var_t mask;
2694
Jesse Brandeburg9a660ee2015-02-26 16:13:22 +00002695 if (!ring->q_vector || !ring->netdev)
2696 return;
2697
2698 /* Single TC mode enable XPS */
2699 if (vsi->tc_config.numtc <= 1) {
2700 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
Neerav Parikh3ffa0372014-11-12 00:19:02 +00002701 netif_set_xps_queue(ring->netdev,
2702 &ring->q_vector->affinity_mask,
2703 ring->queue_index);
Jesse Brandeburg9a660ee2015-02-26 16:13:22 +00002704 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2705 /* Disable XPS to allow selection based on TC */
2706 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2707 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2708 free_cpumask_var(mask);
Neerav Parikh3ffa0372014-11-12 00:19:02 +00002709 }
Jesse Brandeburg0e4425e2015-11-05 17:01:01 -08002710
2711 /* schedule our worker thread which will take care of
2712 * applying the new filter changes
2713 */
2714 i40e_service_event_schedule(vsi->back);
Neerav Parikh3ffa0372014-11-12 00:19:02 +00002715}
2716
2717/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002718 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2719 * @ring: The Tx ring to configure
2720 *
2721 * Configure the Tx descriptor ring in the HMC context.
2722 **/
2723static int i40e_configure_tx_ring(struct i40e_ring *ring)
2724{
2725 struct i40e_vsi *vsi = ring->vsi;
2726 u16 pf_q = vsi->base_queue + ring->queue_index;
2727 struct i40e_hw *hw = &vsi->back->hw;
2728 struct i40e_hmc_obj_txq tx_ctx;
2729 i40e_status err = 0;
2730 u32 qtx_ctl = 0;
2731
2732 /* some ATR related tx ring init */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08002733 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002734 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2735 ring->atr_count = 0;
2736 } else {
2737 ring->atr_sample_rate = 0;
2738 }
2739
Neerav Parikh3ffa0372014-11-12 00:19:02 +00002740 /* configure XPS */
2741 i40e_config_xps_tx_ring(ring);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002742
2743 /* clear the context structure first */
2744 memset(&tx_ctx, 0, sizeof(tx_ctx));
2745
2746 tx_ctx.new_context = 1;
2747 tx_ctx.base = (ring->dma / 128);
2748 tx_ctx.qlen = ring->count;
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08002749 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2750 I40E_FLAG_FD_ATR_ENABLED));
Vasu Dev38e00432014-08-01 13:27:03 -07002751#ifdef I40E_FCOE
2752 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2753#endif
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002754 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +00002755 /* FDIR VSI tx ring can still use RS bit and writebacks */
2756 if (vsi->type != I40E_VSI_FDIR)
2757 tx_ctx.head_wb_ena = 1;
2758 tx_ctx.head_wb_addr = ring->dma +
2759 (ring->count * sizeof(struct i40e_tx_desc));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002760
2761 /* As part of VSI creation/update, FW allocates certain
2762 * Tx arbitration queue sets for each TC enabled for
2763 * the VSI. The FW returns the handles to these queue
2764 * sets as part of the response buffer to Add VSI,
2765 * Update VSI, etc. AQ commands. It is expected that
2766 * these queue set handles be associated with the Tx
2767 * queues by the driver as part of the TX queue context
2768 * initialization. This has to be done regardless of
2769 * DCB as by default everything is mapped to TC0.
2770 */
2771 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2772 tx_ctx.rdylist_act = 0;
2773
2774 /* clear the context in the HMC */
2775 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2776 if (err) {
2777 dev_info(&vsi->back->pdev->dev,
2778 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2779 ring->queue_index, pf_q, err);
2780 return -ENOMEM;
2781 }
2782
2783 /* set the context in the HMC */
2784 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2785 if (err) {
2786 dev_info(&vsi->back->pdev->dev,
2787 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2788 ring->queue_index, pf_q, err);
2789 return -ENOMEM;
2790 }
2791
2792 /* Now associate this queue with this PCI function */
Mitch Williams7a28d882014-10-17 03:14:52 +00002793 if (vsi->type == I40E_VSI_VMDQ2) {
Shannon Nelson9d8bf542014-01-14 00:49:50 -08002794 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
Mitch Williams7a28d882014-10-17 03:14:52 +00002795 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2796 I40E_QTX_CTL_VFVM_INDX_MASK;
2797 } else {
Shannon Nelson9d8bf542014-01-14 00:49:50 -08002798 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
Mitch Williams7a28d882014-10-17 03:14:52 +00002799 }
2800
Shannon Nelson13fd9772013-09-28 07:14:19 +00002801 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2802 I40E_QTX_CTL_PF_INDX_MASK);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002803 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2804 i40e_flush(hw);
2805
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002806 /* cache tail off for easier writes later */
2807 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2808
2809 return 0;
2810}
2811
2812/**
2813 * i40e_configure_rx_ring - Configure a receive ring context
2814 * @ring: The Rx ring to configure
2815 *
2816 * Configure the Rx descriptor ring in the HMC context.
2817 **/
2818static int i40e_configure_rx_ring(struct i40e_ring *ring)
2819{
2820 struct i40e_vsi *vsi = ring->vsi;
2821 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2822 u16 pf_q = vsi->base_queue + ring->queue_index;
2823 struct i40e_hw *hw = &vsi->back->hw;
2824 struct i40e_hmc_obj_rxq rx_ctx;
2825 i40e_status err = 0;
2826
2827 ring->state = 0;
2828
2829 /* clear the context structure first */
2830 memset(&rx_ctx, 0, sizeof(rx_ctx));
2831
2832 ring->rx_buf_len = vsi->rx_buf_len;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002833
2834 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002835
2836 rx_ctx.base = (ring->dma / 128);
2837 rx_ctx.qlen = ring->count;
2838
Jesse Brandeburgbec60fc2016-04-18 11:33:47 -07002839 /* use 32 byte descriptors */
2840 rx_ctx.dsize = 1;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002841
Jesse Brandeburgbec60fc2016-04-18 11:33:47 -07002842 /* descriptor type is always zero
2843 * rx_ctx.dtype = 0;
2844 */
Jesse Brandeburgb32bfa172016-04-18 11:33:42 -07002845 rx_ctx.hsplit_0 = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002846
Jesse Brandeburgb32bfa172016-04-18 11:33:42 -07002847 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00002848 if (hw->revision_id == 0)
2849 rx_ctx.lrxqthresh = 0;
2850 else
2851 rx_ctx.lrxqthresh = 2;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002852 rx_ctx.crcstrip = 1;
2853 rx_ctx.l2tsel = 1;
Jesse Brandeburgc4bbac32015-09-28 11:21:48 -07002854 /* this controls whether VLAN is stripped from inner headers */
2855 rx_ctx.showiv = 0;
Vasu Dev38e00432014-08-01 13:27:03 -07002856#ifdef I40E_FCOE
2857 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2858#endif
Catherine Sullivanacb36762014-03-06 09:02:30 +00002859 /* set the prefena field to 1 because the manual says to */
2860 rx_ctx.prefena = 1;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002861
2862 /* clear the context in the HMC */
2863 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2864 if (err) {
2865 dev_info(&vsi->back->pdev->dev,
2866 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2867 ring->queue_index, pf_q, err);
2868 return -ENOMEM;
2869 }
2870
2871 /* set the context in the HMC */
2872 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2873 if (err) {
2874 dev_info(&vsi->back->pdev->dev,
2875 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2876 ring->queue_index, pf_q, err);
2877 return -ENOMEM;
2878 }
2879
2880 /* cache tail for quicker writes, and clear the reg before use */
2881 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2882 writel(0, ring->tail);
2883
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002884 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002885
2886 return 0;
2887}
2888
2889/**
2890 * i40e_vsi_configure_tx - Configure the VSI for Tx
2891 * @vsi: VSI structure describing this set of rings and resources
2892 *
2893 * Configure the Tx VSI for operation.
2894 **/
2895static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2896{
2897 int err = 0;
2898 u16 i;
2899
Alexander Duyck9f65e152013-09-28 06:00:58 +00002900 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2901 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002902
2903 return err;
2904}
2905
2906/**
2907 * i40e_vsi_configure_rx - Configure the VSI for Rx
2908 * @vsi: the VSI being configured
2909 *
2910 * Configure the Rx VSI for operation.
2911 **/
2912static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2913{
2914 int err = 0;
2915 u16 i;
2916
2917 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2918 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2919 + ETH_FCS_LEN + VLAN_HLEN;
2920 else
2921 vsi->max_frame = I40E_RXBUFFER_2048;
2922
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002923 vsi->rx_buf_len = I40E_RXBUFFER_2048;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002924
Vasu Dev38e00432014-08-01 13:27:03 -07002925#ifdef I40E_FCOE
2926 /* setup rx buffer for FCoE */
2927 if ((vsi->type == I40E_VSI_FCOE) &&
2928 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
Vasu Dev38e00432014-08-01 13:27:03 -07002929 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2930 vsi->max_frame = I40E_RXBUFFER_3072;
Vasu Dev38e00432014-08-01 13:27:03 -07002931 }
2932
2933#endif /* I40E_FCOE */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002934 /* round up for the chip's needs */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002935 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04002936 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002937
2938 /* set up individual rings */
2939 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
Alexander Duyck9f65e152013-09-28 06:00:58 +00002940 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002941
2942 return err;
2943}
2944
2945/**
2946 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2947 * @vsi: ptr to the VSI
2948 **/
2949static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2950{
Akeem G Abodunrine7046ee2014-04-09 05:58:58 +00002951 struct i40e_ring *tx_ring, *rx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002952 u16 qoffset, qcount;
2953 int i, n;
2954
Parikh, Neeravcd238a32015-02-21 06:43:37 +00002955 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2956 /* Reset the TC information */
2957 for (i = 0; i < vsi->num_queue_pairs; i++) {
2958 rx_ring = vsi->rx_rings[i];
2959 tx_ring = vsi->tx_rings[i];
2960 rx_ring->dcb_tc = 0;
2961 tx_ring->dcb_tc = 0;
2962 }
2963 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002964
2965 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04002966 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002967 continue;
2968
2969 qoffset = vsi->tc_config.tc_info[n].qoffset;
2970 qcount = vsi->tc_config.tc_info[n].qcount;
2971 for (i = qoffset; i < (qoffset + qcount); i++) {
Akeem G Abodunrine7046ee2014-04-09 05:58:58 +00002972 rx_ring = vsi->rx_rings[i];
2973 tx_ring = vsi->tx_rings[i];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002974 rx_ring->dcb_tc = n;
2975 tx_ring->dcb_tc = n;
2976 }
2977 }
2978}
2979
2980/**
2981 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2982 * @vsi: ptr to the VSI
2983 **/
2984static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2985{
Tushar Daveb1b15df2016-07-01 10:11:20 -07002986 struct i40e_pf *pf = vsi->back;
2987 int err;
2988
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002989 if (vsi->netdev)
2990 i40e_set_rx_mode(vsi->netdev);
Tushar Daveb1b15df2016-07-01 10:11:20 -07002991
2992 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
2993 err = i40e_macaddr_init(vsi, pf->hw.mac.addr);
2994 if (err) {
2995 dev_warn(&pf->pdev->dev,
2996 "could not set up macaddr; err %d\n", err);
2997 }
2998 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002999}
3000
3001/**
Joseph Gasparakis17a73f62014-02-12 01:45:30 +00003002 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3003 * @vsi: Pointer to the targeted VSI
3004 *
3005 * This function replays the hlist on the hw where all the SB Flow Director
3006 * filters were saved.
3007 **/
3008static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3009{
3010 struct i40e_fdir_filter *filter;
3011 struct i40e_pf *pf = vsi->back;
3012 struct hlist_node *node;
3013
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00003014 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3015 return;
3016
Joseph Gasparakis17a73f62014-02-12 01:45:30 +00003017 hlist_for_each_entry_safe(filter, node,
3018 &pf->fdir_filter_list, fdir_node) {
3019 i40e_add_del_fdir(vsi, filter, true);
3020 }
3021}
3022
3023/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003024 * i40e_vsi_configure - Set up the VSI for action
3025 * @vsi: the VSI being configured
3026 **/
3027static int i40e_vsi_configure(struct i40e_vsi *vsi)
3028{
3029 int err;
3030
3031 i40e_set_vsi_rx_mode(vsi);
3032 i40e_restore_vlan(vsi);
3033 i40e_vsi_config_dcb_rings(vsi);
3034 err = i40e_vsi_configure_tx(vsi);
3035 if (!err)
3036 err = i40e_vsi_configure_rx(vsi);
3037
3038 return err;
3039}
3040
3041/**
3042 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3043 * @vsi: the VSI being configured
3044 **/
3045static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3046{
3047 struct i40e_pf *pf = vsi->back;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003048 struct i40e_hw *hw = &pf->hw;
3049 u16 vector;
3050 int i, q;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003051 u32 qp;
3052
3053 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3054 * and PFINT_LNKLSTn registers, e.g.:
3055 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3056 */
3057 qp = vsi->base_queue;
3058 vector = vsi->base_vector;
Alexander Duyck493fb302013-09-28 07:01:44 +00003059 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
Jesse Brandeburgac26fc12015-09-28 14:12:37 -04003060 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3061
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04003062 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Kan Lianga75e8002016-02-19 09:24:04 -05003063 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003064 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3065 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3066 q_vector->rx.itr);
Kan Lianga75e8002016-02-19 09:24:04 -05003067 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003068 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3069 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3070 q_vector->tx.itr);
Jesse Brandeburgac26fc12015-09-28 14:12:37 -04003071 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3072 INTRL_USEC_TO_REG(vsi->int_rate_limit));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003073
3074 /* Linked list for the queuepairs assigned to this vector */
3075 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3076 for (q = 0; q < q_vector->num_ringpairs; q++) {
Jesse Brandeburgac26fc12015-09-28 14:12:37 -04003077 u32 val;
3078
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003079 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3080 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3081 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3082 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3083 (I40E_QUEUE_TYPE_TX
3084 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3085
3086 wr32(hw, I40E_QINT_RQCTL(qp), val);
3087
3088 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3089 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3090 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3091 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
3092 (I40E_QUEUE_TYPE_RX
3093 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3094
3095 /* Terminate the linked list */
3096 if (q == (q_vector->num_ringpairs - 1))
3097 val |= (I40E_QUEUE_END_OF_LIST
3098 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3099
3100 wr32(hw, I40E_QINT_TQCTL(qp), val);
3101 qp++;
3102 }
3103 }
3104
3105 i40e_flush(hw);
3106}
3107
3108/**
3109 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3110 * @hw: ptr to the hardware info
3111 **/
Jacob Kellerab437b52014-12-14 01:55:08 +00003112static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003113{
Jacob Kellerab437b52014-12-14 01:55:08 +00003114 struct i40e_hw *hw = &pf->hw;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003115 u32 val;
3116
3117 /* clear things first */
3118 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3119 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3120
3121 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3122 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3123 I40E_PFINT_ICR0_ENA_GRST_MASK |
3124 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3125 I40E_PFINT_ICR0_ENA_GPIO_MASK |
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003126 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3127 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3128 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3129
Anjali Singhai Jain0d8e1432015-06-05 12:20:32 -04003130 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3131 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3132
Jacob Kellerab437b52014-12-14 01:55:08 +00003133 if (pf->flags & I40E_FLAG_PTP)
3134 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3135
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003136 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3137
3138 /* SW_ITR_IDX = 0, but don't change INTENA */
Anjali Singhai Jain84ed40e2013-11-26 10:49:32 +00003139 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3140 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003141
3142 /* OTHER_ITR_IDX = 0 */
3143 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3144}
3145
3146/**
3147 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3148 * @vsi: the VSI being configured
3149 **/
3150static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3151{
Alexander Duyck493fb302013-09-28 07:01:44 +00003152 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003153 struct i40e_pf *pf = vsi->back;
3154 struct i40e_hw *hw = &pf->hw;
3155 u32 val;
3156
3157 /* set the ITR configuration */
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04003158 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Kan Lianga75e8002016-02-19 09:24:04 -05003159 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003160 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3161 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
Kan Lianga75e8002016-02-19 09:24:04 -05003162 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003163 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3164 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3165
Jacob Kellerab437b52014-12-14 01:55:08 +00003166 i40e_enable_misc_int_causes(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003167
3168 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3169 wr32(hw, I40E_PFINT_LNKLST0, 0);
3170
Jesse Brandeburgf29eaa32014-02-11 08:24:12 +00003171 /* Associate the queue pair to the vector and enable the queue int */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003172 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3173 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3174 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3175
3176 wr32(hw, I40E_QINT_RQCTL(0), val);
3177
3178 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3179 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3180 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3181
3182 wr32(hw, I40E_QINT_TQCTL(0), val);
3183 i40e_flush(hw);
3184}
3185
3186/**
Mitch Williams2ef28cf2013-11-28 06:39:32 +00003187 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3188 * @pf: board private structure
3189 **/
3190void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3191{
3192 struct i40e_hw *hw = &pf->hw;
3193
3194 wr32(hw, I40E_PFINT_DYN_CTL0,
3195 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3196 i40e_flush(hw);
3197}
3198
3199/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003200 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3201 * @pf: board private structure
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08003202 * @clearpba: true when all pending interrupt events should be cleared
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003203 **/
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08003204void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003205{
3206 struct i40e_hw *hw = &pf->hw;
3207 u32 val;
3208
3209 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08003210 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003211 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3212
3213 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3214 i40e_flush(hw);
3215}
3216
3217/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003218 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3219 * @irq: interrupt number
3220 * @data: pointer to a q_vector
3221 **/
3222static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3223{
3224 struct i40e_q_vector *q_vector = data;
3225
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003226 if (!q_vector->tx.ring && !q_vector->rx.ring)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003227 return IRQ_HANDLED;
3228
Alexander Duyck5d3465a2015-09-29 15:19:50 -07003229 napi_schedule_irqoff(&q_vector->napi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003230
3231 return IRQ_HANDLED;
3232}
3233
3234/**
Alan Brady96db7762016-09-14 16:24:38 -07003235 * i40e_irq_affinity_notify - Callback for affinity changes
3236 * @notify: context as to what irq was changed
3237 * @mask: the new affinity mask
3238 *
3239 * This is a callback function used by the irq_set_affinity_notifier function
3240 * so that we may register to receive changes to the irq affinity masks.
3241 **/
3242static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3243 const cpumask_t *mask)
3244{
3245 struct i40e_q_vector *q_vector =
3246 container_of(notify, struct i40e_q_vector, affinity_notify);
3247
3248 q_vector->affinity_mask = *mask;
3249}
3250
3251/**
3252 * i40e_irq_affinity_release - Callback for affinity notifier release
3253 * @ref: internal core kernel usage
3254 *
3255 * This is a callback function used by the irq_set_affinity_notifier function
3256 * to inform the current notification subscriber that they will no longer
3257 * receive notifications.
3258 **/
3259static void i40e_irq_affinity_release(struct kref *ref) {}
3260
3261/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003262 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3263 * @vsi: the VSI being configured
3264 * @basename: name for the vector
3265 *
3266 * Allocates MSI-X vectors and requests interrupts from the kernel.
3267 **/
3268static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3269{
3270 int q_vectors = vsi->num_q_vectors;
3271 struct i40e_pf *pf = vsi->back;
3272 int base = vsi->base_vector;
3273 int rx_int_idx = 0;
3274 int tx_int_idx = 0;
3275 int vector, err;
Alan Brady96db7762016-09-14 16:24:38 -07003276 int irq_num;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003277
3278 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyck493fb302013-09-28 07:01:44 +00003279 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003280
Alan Brady96db7762016-09-14 16:24:38 -07003281 irq_num = pf->msix_entries[base + vector].vector;
3282
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003283 if (q_vector->tx.ring && q_vector->rx.ring) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003284 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3285 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3286 tx_int_idx++;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003287 } else if (q_vector->rx.ring) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003288 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3289 "%s-%s-%d", basename, "rx", rx_int_idx++);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003290 } else if (q_vector->tx.ring) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003291 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3292 "%s-%s-%d", basename, "tx", tx_int_idx++);
3293 } else {
3294 /* skip this unused q_vector */
3295 continue;
3296 }
Alan Brady96db7762016-09-14 16:24:38 -07003297 err = request_irq(irq_num,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003298 vsi->irq_handler,
3299 0,
3300 q_vector->name,
3301 q_vector);
3302 if (err) {
3303 dev_info(&pf->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04003304 "MSIX request_irq failed, error: %d\n", err);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003305 goto free_queue_irqs;
3306 }
Alan Brady96db7762016-09-14 16:24:38 -07003307
3308 /* register for affinity change notifications */
3309 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3310 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3311 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003312 /* assign the mask for this irq */
Alan Brady96db7762016-09-14 16:24:38 -07003313 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003314 }
3315
Shannon Nelson63741842014-04-23 04:50:16 +00003316 vsi->irqs_ready = true;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003317 return 0;
3318
3319free_queue_irqs:
3320 while (vector) {
3321 vector--;
Alan Brady96db7762016-09-14 16:24:38 -07003322 irq_num = pf->msix_entries[base + vector].vector;
3323 irq_set_affinity_notifier(irq_num, NULL);
3324 irq_set_affinity_hint(irq_num, NULL);
3325 free_irq(irq_num, &vsi->q_vectors[vector]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003326 }
3327 return err;
3328}
3329
3330/**
3331 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3332 * @vsi: the VSI being un-configured
3333 **/
3334static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3335{
3336 struct i40e_pf *pf = vsi->back;
3337 struct i40e_hw *hw = &pf->hw;
3338 int base = vsi->base_vector;
3339 int i;
3340
3341 for (i = 0; i < vsi->num_queue_pairs; i++) {
Alexander Duyck9f65e152013-09-28 06:00:58 +00003342 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3343 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003344 }
3345
3346 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3347 for (i = vsi->base_vector;
3348 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3349 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3350
3351 i40e_flush(hw);
3352 for (i = 0; i < vsi->num_q_vectors; i++)
3353 synchronize_irq(pf->msix_entries[i + base].vector);
3354 } else {
3355 /* Legacy and MSI mode - this stops all interrupt handling */
3356 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3357 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3358 i40e_flush(hw);
3359 synchronize_irq(pf->pdev->irq);
3360 }
3361}
3362
3363/**
3364 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3365 * @vsi: the VSI being configured
3366 **/
3367static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3368{
3369 struct i40e_pf *pf = vsi->back;
3370 int i;
3371
3372 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
Jesse Brandeburg78455482015-07-23 16:54:41 -04003373 for (i = 0; i < vsi->num_q_vectors; i++)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003374 i40e_irq_dynamic_enable(vsi, i);
3375 } else {
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08003376 i40e_irq_dynamic_enable_icr0(pf, true);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003377 }
3378
Jesse Brandeburg1022cb62013-09-28 07:13:08 +00003379 i40e_flush(&pf->hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003380 return 0;
3381}
3382
3383/**
3384 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3385 * @pf: board private structure
3386 **/
3387static void i40e_stop_misc_vector(struct i40e_pf *pf)
3388{
3389 /* Disable ICR 0 */
3390 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3391 i40e_flush(&pf->hw);
3392}
3393
3394/**
3395 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3396 * @irq: interrupt number
3397 * @data: pointer to a q_vector
3398 *
3399 * This is the handler used for all MSI/Legacy interrupts, and deals
3400 * with both queue and non-queue interrupts. This is also used in
3401 * MSIX mode to handle the non-queue interrupts.
3402 **/
3403static irqreturn_t i40e_intr(int irq, void *data)
3404{
3405 struct i40e_pf *pf = (struct i40e_pf *)data;
3406 struct i40e_hw *hw = &pf->hw;
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003407 irqreturn_t ret = IRQ_NONE;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003408 u32 icr0, icr0_remaining;
3409 u32 val, ena_mask;
3410
3411 icr0 = rd32(hw, I40E_PFINT_ICR0);
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003412 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003413
Shannon Nelson116a57d2013-09-28 07:13:59 +00003414 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3415 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003416 goto enable_intr;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003417
Shannon Nelsoncd92e722013-11-16 10:00:44 +00003418 /* if interrupt but no bits showing, must be SWINT */
3419 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3420 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3421 pf->sw_int_count++;
3422
Anjali Singhai Jain0d8e1432015-06-05 12:20:32 -04003423 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3424 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3425 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3426 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3427 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3428 }
3429
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003430 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3431 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
Alexander Duyck5d3465a2015-09-29 15:19:50 -07003432 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3433 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003434
Anjali Singhai Jaina16ae2d2016-01-15 14:33:16 -08003435 /* We do not have a way to disarm Queue causes while leaving
3436 * interrupt enabled for all other causes, ideally
3437 * interrupt should be disabled while we are in NAPI but
3438 * this is not a performance path and napi_schedule()
3439 * can deal with rescheduling.
3440 */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003441 if (!test_bit(__I40E_DOWN, &pf->state))
Alexander Duyck5d3465a2015-09-29 15:19:50 -07003442 napi_schedule_irqoff(&q_vector->napi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003443 }
3444
3445 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3446 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3447 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
Shannon Nelson6e93d0c2016-01-15 14:33:18 -08003448 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003449 }
3450
3451 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3452 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3453 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3454 }
3455
3456 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3457 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3458 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3459 }
3460
3461 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3462 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3463 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3464 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3465 val = rd32(hw, I40E_GLGEN_RSTAT);
3466 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3467 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
Shannon Nelson4eb3f762014-03-06 08:59:58 +00003468 if (val == I40E_RESET_CORER) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003469 pf->corer_count++;
Shannon Nelson4eb3f762014-03-06 08:59:58 +00003470 } else if (val == I40E_RESET_GLOBR) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003471 pf->globr_count++;
Shannon Nelson4eb3f762014-03-06 08:59:58 +00003472 } else if (val == I40E_RESET_EMPR) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003473 pf->empr_count++;
Anjali Singhai Jain9df42d12015-01-24 09:58:40 +00003474 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
Shannon Nelson4eb3f762014-03-06 08:59:58 +00003475 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003476 }
3477
Anjali Singhai Jain9c010ee2013-11-28 06:39:20 +00003478 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3479 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3480 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
Anjali Singhai Jain25fc0e62015-03-31 00:45:01 -07003481 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3482 rd32(hw, I40E_PFHMC_ERRORINFO),
3483 rd32(hw, I40E_PFHMC_ERRORDATA));
Anjali Singhai Jain9c010ee2013-11-28 06:39:20 +00003484 }
3485
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003486 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3487 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3488
3489 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
Jacob Kellercafa1fc2014-04-24 18:05:03 -07003490 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003491 i40e_ptp_tx_hwtstamp(pf);
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003492 }
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003493 }
3494
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003495 /* If a critical error is pending we have no choice but to reset the
3496 * device.
3497 * Report and mask out any remaining unexpected interrupts.
3498 */
3499 icr0_remaining = icr0 & ena_mask;
3500 if (icr0_remaining) {
3501 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3502 icr0_remaining);
Anjali Singhai Jain9c010ee2013-11-28 06:39:20 +00003503 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003504 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
Anjali Singhai Jainc0c28972014-02-12 01:45:34 +00003505 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
Anjali Singhai Jain9c010ee2013-11-28 06:39:20 +00003506 dev_info(&pf->pdev->dev, "device will be reset\n");
3507 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3508 i40e_service_event_schedule(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003509 }
3510 ena_mask &= ~icr0_remaining;
3511 }
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003512 ret = IRQ_HANDLED;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003513
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003514enable_intr:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003515 /* re-enable interrupt causes */
3516 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003517 if (!test_bit(__I40E_DOWN, &pf->state)) {
3518 i40e_service_event_schedule(pf);
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08003519 i40e_irq_dynamic_enable_icr0(pf, false);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003520 }
3521
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003522 return ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003523}
3524
3525/**
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003526 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3527 * @tx_ring: tx ring to clean
3528 * @budget: how many cleans we're allowed
3529 *
3530 * Returns true if there's any budget left (e.g. the clean is finished)
3531 **/
3532static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3533{
3534 struct i40e_vsi *vsi = tx_ring->vsi;
3535 u16 i = tx_ring->next_to_clean;
3536 struct i40e_tx_buffer *tx_buf;
3537 struct i40e_tx_desc *tx_desc;
3538
3539 tx_buf = &tx_ring->tx_bi[i];
3540 tx_desc = I40E_TX_DESC(tx_ring, i);
3541 i -= tx_ring->count;
3542
3543 do {
3544 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3545
3546 /* if next_to_watch is not set then there is no work pending */
3547 if (!eop_desc)
3548 break;
3549
3550 /* prevent any other reads prior to eop_desc */
3551 read_barrier_depends();
3552
3553 /* if the descriptor isn't done, no work yet to do */
3554 if (!(eop_desc->cmd_type_offset_bsz &
3555 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3556 break;
3557
3558 /* clear next_to_watch to prevent false hangs */
3559 tx_buf->next_to_watch = NULL;
3560
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +00003561 tx_desc->buffer_addr = 0;
3562 tx_desc->cmd_type_offset_bsz = 0;
3563 /* move past filter desc */
3564 tx_buf++;
3565 tx_desc++;
3566 i++;
3567 if (unlikely(!i)) {
3568 i -= tx_ring->count;
3569 tx_buf = tx_ring->tx_bi;
3570 tx_desc = I40E_TX_DESC(tx_ring, 0);
3571 }
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003572 /* unmap skb header data */
3573 dma_unmap_single(tx_ring->dev,
3574 dma_unmap_addr(tx_buf, dma),
3575 dma_unmap_len(tx_buf, len),
3576 DMA_TO_DEVICE);
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +00003577 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3578 kfree(tx_buf->raw_buf);
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003579
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +00003580 tx_buf->raw_buf = NULL;
3581 tx_buf->tx_flags = 0;
3582 tx_buf->next_to_watch = NULL;
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003583 dma_unmap_len_set(tx_buf, len, 0);
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +00003584 tx_desc->buffer_addr = 0;
3585 tx_desc->cmd_type_offset_bsz = 0;
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003586
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +00003587 /* move us past the eop_desc for start of next FD desc */
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003588 tx_buf++;
3589 tx_desc++;
3590 i++;
3591 if (unlikely(!i)) {
3592 i -= tx_ring->count;
3593 tx_buf = tx_ring->tx_bi;
3594 tx_desc = I40E_TX_DESC(tx_ring, 0);
3595 }
3596
3597 /* update budget accounting */
3598 budget--;
3599 } while (likely(budget));
3600
3601 i += tx_ring->count;
3602 tx_ring->next_to_clean = i;
3603
Jesse Brandeburg6995b362015-08-28 17:55:54 -04003604 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
Jesse Brandeburg78455482015-07-23 16:54:41 -04003605 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
Jesse Brandeburg6995b362015-08-28 17:55:54 -04003606
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003607 return budget > 0;
3608}
3609
3610/**
3611 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3612 * @irq: interrupt number
3613 * @data: pointer to a q_vector
3614 **/
3615static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3616{
3617 struct i40e_q_vector *q_vector = data;
3618 struct i40e_vsi *vsi;
3619
3620 if (!q_vector->tx.ring)
3621 return IRQ_HANDLED;
3622
3623 vsi = q_vector->tx.ring->vsi;
3624 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3625
3626 return IRQ_HANDLED;
3627}
3628
3629/**
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003630 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003631 * @vsi: the VSI being configured
3632 * @v_idx: vector index
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003633 * @qp_idx: queue pair index
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003634 **/
Anjali Singhai Jain26cdc442015-07-10 19:36:00 -04003635static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003636{
Alexander Duyck493fb302013-09-28 07:01:44 +00003637 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
Alexander Duyck9f65e152013-09-28 06:00:58 +00003638 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3639 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003640
3641 tx_ring->q_vector = q_vector;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003642 tx_ring->next = q_vector->tx.ring;
3643 q_vector->tx.ring = tx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003644 q_vector->tx.count++;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003645
3646 rx_ring->q_vector = q_vector;
3647 rx_ring->next = q_vector->rx.ring;
3648 q_vector->rx.ring = rx_ring;
3649 q_vector->rx.count++;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003650}
3651
3652/**
3653 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3654 * @vsi: the VSI being configured
3655 *
3656 * This function maps descriptor rings to the queue-specific vectors
3657 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3658 * one vector per queue pair, but on a constrained vector budget, we
3659 * group the queue pairs as "efficiently" as possible.
3660 **/
3661static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3662{
3663 int qp_remaining = vsi->num_queue_pairs;
3664 int q_vectors = vsi->num_q_vectors;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003665 int num_ringpairs;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003666 int v_start = 0;
3667 int qp_idx = 0;
3668
3669 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3670 * group them so there are multiple queues per vector.
Anjali Singhai Jain70114ec2014-06-03 23:50:14 +00003671 * It is also important to go through all the vectors available to be
3672 * sure that if we don't use all the vectors, that the remaining vectors
3673 * are cleared. This is especially important when decreasing the
3674 * number of queues in use.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003675 */
Anjali Singhai Jain70114ec2014-06-03 23:50:14 +00003676 for (; v_start < q_vectors; v_start++) {
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003677 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3678
3679 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3680
3681 q_vector->num_ringpairs = num_ringpairs;
3682
3683 q_vector->rx.count = 0;
3684 q_vector->tx.count = 0;
3685 q_vector->rx.ring = NULL;
3686 q_vector->tx.ring = NULL;
3687
3688 while (num_ringpairs--) {
Anjali Singhai Jain26cdc442015-07-10 19:36:00 -04003689 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003690 qp_idx++;
3691 qp_remaining--;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003692 }
3693 }
3694}
3695
3696/**
3697 * i40e_vsi_request_irq - Request IRQ from the OS
3698 * @vsi: the VSI being configured
3699 * @basename: name for the vector
3700 **/
3701static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3702{
3703 struct i40e_pf *pf = vsi->back;
3704 int err;
3705
3706 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3707 err = i40e_vsi_request_irq_msix(vsi, basename);
3708 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3709 err = request_irq(pf->pdev->irq, i40e_intr, 0,
Carolyn Wybornyb294ac72014-12-11 07:06:39 +00003710 pf->int_name, pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003711 else
3712 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
Carolyn Wybornyb294ac72014-12-11 07:06:39 +00003713 pf->int_name, pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003714
3715 if (err)
3716 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3717
3718 return err;
3719}
3720
3721#ifdef CONFIG_NET_POLL_CONTROLLER
3722/**
Jesse Brandeburgd89d9672016-01-04 10:33:02 -08003723 * i40e_netpoll - A Polling 'interrupt' handler
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003724 * @netdev: network interface device structure
3725 *
3726 * This is used by netconsole to send skbs without having to re-enable
3727 * interrupts. It's not called while the normal interrupt routine is executing.
3728 **/
Vasu Dev38e00432014-08-01 13:27:03 -07003729#ifdef I40E_FCOE
3730void i40e_netpoll(struct net_device *netdev)
3731#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003732static void i40e_netpoll(struct net_device *netdev)
Vasu Dev38e00432014-08-01 13:27:03 -07003733#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003734{
3735 struct i40e_netdev_priv *np = netdev_priv(netdev);
3736 struct i40e_vsi *vsi = np->vsi;
3737 struct i40e_pf *pf = vsi->back;
3738 int i;
3739
3740 /* if interface is down do nothing */
3741 if (test_bit(__I40E_DOWN, &vsi->state))
3742 return;
3743
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003744 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3745 for (i = 0; i < vsi->num_q_vectors; i++)
Alexander Duyck493fb302013-09-28 07:01:44 +00003746 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003747 } else {
3748 i40e_intr(pf->pdev->irq, netdev);
3749 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003750}
3751#endif
3752
3753/**
Neerav Parikh23527302014-06-03 23:50:15 +00003754 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3755 * @pf: the PF being configured
3756 * @pf_q: the PF queue
3757 * @enable: enable or disable state of the queue
3758 *
3759 * This routine will wait for the given Tx queue of the PF to reach the
3760 * enabled or disabled state.
3761 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3762 * multiple retries; else will return 0 in case of success.
3763 **/
3764static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3765{
3766 int i;
3767 u32 tx_reg;
3768
3769 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3770 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3771 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3772 break;
3773
Neerav Parikhf98a2002014-09-13 07:40:44 +00003774 usleep_range(10, 20);
Neerav Parikh23527302014-06-03 23:50:15 +00003775 }
3776 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3777 return -ETIMEDOUT;
3778
3779 return 0;
3780}
3781
3782/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003783 * i40e_vsi_control_tx - Start or stop a VSI's rings
3784 * @vsi: the VSI being configured
3785 * @enable: start or stop the rings
3786 **/
3787static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3788{
3789 struct i40e_pf *pf = vsi->back;
3790 struct i40e_hw *hw = &pf->hw;
Neerav Parikh23527302014-06-03 23:50:15 +00003791 int i, j, pf_q, ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003792 u32 tx_reg;
3793
3794 pf_q = vsi->base_queue;
3795 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
Matt Jared351499ab2014-04-23 04:50:03 +00003796
3797 /* warn the TX unit of coming changes */
3798 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3799 if (!enable)
Neerav Parikhf98a2002014-09-13 07:40:44 +00003800 usleep_range(10, 20);
Matt Jared351499ab2014-04-23 04:50:03 +00003801
Mitch Williams6c5ef622014-02-20 19:29:16 -08003802 for (j = 0; j < 50; j++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003803 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
Mitch Williams6c5ef622014-02-20 19:29:16 -08003804 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3805 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3806 break;
3807 usleep_range(1000, 2000);
3808 }
Mitch Williamsfda972f2013-11-28 06:39:29 +00003809 /* Skip if the queue is already in the requested state */
Catherine Sullivan7c122002014-03-14 07:32:29 +00003810 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
Mitch Williamsfda972f2013-11-28 06:39:29 +00003811 continue;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003812
3813 /* turn on/off the queue */
Shannon Nelsonc5c9eb92013-12-21 05:44:48 +00003814 if (enable) {
3815 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
Mitch Williams6c5ef622014-02-20 19:29:16 -08003816 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
Shannon Nelsonc5c9eb92013-12-21 05:44:48 +00003817 } else {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003818 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
Shannon Nelsonc5c9eb92013-12-21 05:44:48 +00003819 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003820
3821 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
Neerav Parikh69129dc2014-11-12 00:18:46 +00003822 /* No waiting for the Tx queue to disable */
3823 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3824 continue;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003825
3826 /* wait for the change to finish */
Neerav Parikh23527302014-06-03 23:50:15 +00003827 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3828 if (ret) {
3829 dev_info(&pf->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04003830 "VSI seid %d Tx ring %d %sable timeout\n",
3831 vsi->seid, pf_q, (enable ? "en" : "dis"));
Neerav Parikh23527302014-06-03 23:50:15 +00003832 break;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003833 }
3834 }
3835
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00003836 if (hw->revision_id == 0)
3837 mdelay(50);
Neerav Parikh23527302014-06-03 23:50:15 +00003838 return ret;
3839}
3840
3841/**
3842 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3843 * @pf: the PF being configured
3844 * @pf_q: the PF queue
3845 * @enable: enable or disable state of the queue
3846 *
3847 * This routine will wait for the given Rx queue of the PF to reach the
3848 * enabled or disabled state.
3849 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3850 * multiple retries; else will return 0 in case of success.
3851 **/
3852static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3853{
3854 int i;
3855 u32 rx_reg;
3856
3857 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3858 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3859 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3860 break;
3861
Neerav Parikhf98a2002014-09-13 07:40:44 +00003862 usleep_range(10, 20);
Neerav Parikh23527302014-06-03 23:50:15 +00003863 }
3864 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3865 return -ETIMEDOUT;
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00003866
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003867 return 0;
3868}
3869
3870/**
3871 * i40e_vsi_control_rx - Start or stop a VSI's rings
3872 * @vsi: the VSI being configured
3873 * @enable: start or stop the rings
3874 **/
3875static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3876{
3877 struct i40e_pf *pf = vsi->back;
3878 struct i40e_hw *hw = &pf->hw;
Neerav Parikh23527302014-06-03 23:50:15 +00003879 int i, j, pf_q, ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003880 u32 rx_reg;
3881
3882 pf_q = vsi->base_queue;
3883 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
Mitch Williams6c5ef622014-02-20 19:29:16 -08003884 for (j = 0; j < 50; j++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003885 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
Mitch Williams6c5ef622014-02-20 19:29:16 -08003886 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3887 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3888 break;
3889 usleep_range(1000, 2000);
3890 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003891
Catherine Sullivan7c122002014-03-14 07:32:29 +00003892 /* Skip if the queue is already in the requested state */
3893 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3894 continue;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003895
3896 /* turn on/off the queue */
3897 if (enable)
Mitch Williams6c5ef622014-02-20 19:29:16 -08003898 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003899 else
Mitch Williams6c5ef622014-02-20 19:29:16 -08003900 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003901 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
Neerav Parikh3fe06f42016-02-17 16:12:15 -08003902 /* No waiting for the Tx queue to disable */
3903 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3904 continue;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003905
3906 /* wait for the change to finish */
Neerav Parikh23527302014-06-03 23:50:15 +00003907 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3908 if (ret) {
3909 dev_info(&pf->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04003910 "VSI seid %d Rx ring %d %sable timeout\n",
3911 vsi->seid, pf_q, (enable ? "en" : "dis"));
Neerav Parikh23527302014-06-03 23:50:15 +00003912 break;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003913 }
3914 }
3915
Neerav Parikh23527302014-06-03 23:50:15 +00003916 return ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003917}
3918
3919/**
3920 * i40e_vsi_control_rings - Start or stop a VSI's rings
3921 * @vsi: the VSI being configured
3922 * @enable: start or stop the rings
3923 **/
Mitch Williamsfc18eaa2013-11-28 06:39:27 +00003924int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003925{
Anjali Singhai Jain3b867b22013-12-21 05:44:44 +00003926 int ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003927
3928 /* do rx first for enable and last for disable */
3929 if (request) {
3930 ret = i40e_vsi_control_rx(vsi, request);
3931 if (ret)
3932 return ret;
3933 ret = i40e_vsi_control_tx(vsi, request);
3934 } else {
Anjali Singhai Jain3b867b22013-12-21 05:44:44 +00003935 /* Ignore return value, we need to shutdown whatever we can */
3936 i40e_vsi_control_tx(vsi, request);
3937 i40e_vsi_control_rx(vsi, request);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003938 }
3939
3940 return ret;
3941}
3942
3943/**
3944 * i40e_vsi_free_irq - Free the irq association with the OS
3945 * @vsi: the VSI being configured
3946 **/
3947static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3948{
3949 struct i40e_pf *pf = vsi->back;
3950 struct i40e_hw *hw = &pf->hw;
3951 int base = vsi->base_vector;
3952 u32 val, qp;
3953 int i;
3954
3955 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3956 if (!vsi->q_vectors)
3957 return;
3958
Shannon Nelson63741842014-04-23 04:50:16 +00003959 if (!vsi->irqs_ready)
3960 return;
3961
3962 vsi->irqs_ready = false;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003963 for (i = 0; i < vsi->num_q_vectors; i++) {
Alan Brady96db7762016-09-14 16:24:38 -07003964 int irq_num;
3965 u16 vector;
3966
3967 vector = i + base;
3968 irq_num = pf->msix_entries[vector].vector;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003969
3970 /* free only the irqs that were actually requested */
Shannon Nelson78681b12013-11-28 06:39:36 +00003971 if (!vsi->q_vectors[i] ||
3972 !vsi->q_vectors[i]->num_ringpairs)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003973 continue;
3974
Alan Brady96db7762016-09-14 16:24:38 -07003975 /* clear the affinity notifier in the IRQ descriptor */
3976 irq_set_affinity_notifier(irq_num, NULL);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003977 /* clear the affinity_mask in the IRQ descriptor */
Alan Brady96db7762016-09-14 16:24:38 -07003978 irq_set_affinity_hint(irq_num, NULL);
3979 synchronize_irq(irq_num);
3980 free_irq(irq_num, vsi->q_vectors[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003981
3982 /* Tear down the interrupt queue link list
3983 *
3984 * We know that they come in pairs and always
3985 * the Rx first, then the Tx. To clear the
3986 * link list, stick the EOL value into the
3987 * next_q field of the registers.
3988 */
3989 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3990 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3991 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3992 val |= I40E_QUEUE_END_OF_LIST
3993 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3994 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3995
3996 while (qp != I40E_QUEUE_END_OF_LIST) {
3997 u32 next;
3998
3999 val = rd32(hw, I40E_QINT_RQCTL(qp));
4000
4001 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4002 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4003 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4004 I40E_QINT_RQCTL_INTEVENT_MASK);
4005
4006 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4007 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4008
4009 wr32(hw, I40E_QINT_RQCTL(qp), val);
4010
4011 val = rd32(hw, I40E_QINT_TQCTL(qp));
4012
4013 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4014 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4015
4016 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4017 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4018 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4019 I40E_QINT_TQCTL_INTEVENT_MASK);
4020
4021 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4022 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4023
4024 wr32(hw, I40E_QINT_TQCTL(qp), val);
4025 qp = next;
4026 }
4027 }
4028 } else {
4029 free_irq(pf->pdev->irq, pf);
4030
4031 val = rd32(hw, I40E_PFINT_LNKLST0);
4032 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4033 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4034 val |= I40E_QUEUE_END_OF_LIST
4035 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4036 wr32(hw, I40E_PFINT_LNKLST0, val);
4037
4038 val = rd32(hw, I40E_QINT_RQCTL(qp));
4039 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4040 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4041 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4042 I40E_QINT_RQCTL_INTEVENT_MASK);
4043
4044 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4045 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4046
4047 wr32(hw, I40E_QINT_RQCTL(qp), val);
4048
4049 val = rd32(hw, I40E_QINT_TQCTL(qp));
4050
4051 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4052 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4053 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4054 I40E_QINT_TQCTL_INTEVENT_MASK);
4055
4056 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4057 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4058
4059 wr32(hw, I40E_QINT_TQCTL(qp), val);
4060 }
4061}
4062
4063/**
Alexander Duyck493fb302013-09-28 07:01:44 +00004064 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4065 * @vsi: the VSI being configured
4066 * @v_idx: Index of vector to be freed
4067 *
4068 * This function frees the memory allocated to the q_vector. In addition if
4069 * NAPI is enabled it will delete any references to the NAPI struct prior
4070 * to freeing the q_vector.
4071 **/
4072static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4073{
4074 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00004075 struct i40e_ring *ring;
Alexander Duyck493fb302013-09-28 07:01:44 +00004076
4077 if (!q_vector)
4078 return;
4079
4080 /* disassociate q_vector from rings */
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00004081 i40e_for_each_ring(ring, q_vector->tx)
4082 ring->q_vector = NULL;
4083
4084 i40e_for_each_ring(ring, q_vector->rx)
4085 ring->q_vector = NULL;
Alexander Duyck493fb302013-09-28 07:01:44 +00004086
4087 /* only VSI w/ an associated netdev is set up w/ NAPI */
4088 if (vsi->netdev)
4089 netif_napi_del(&q_vector->napi);
4090
4091 vsi->q_vectors[v_idx] = NULL;
4092
4093 kfree_rcu(q_vector, rcu);
4094}
4095
4096/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004097 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4098 * @vsi: the VSI being un-configured
4099 *
4100 * This frees the memory allocated to the q_vectors and
4101 * deletes references to the NAPI struct.
4102 **/
4103static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4104{
4105 int v_idx;
4106
Alexander Duyck493fb302013-09-28 07:01:44 +00004107 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4108 i40e_free_q_vector(vsi, v_idx);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004109}
4110
4111/**
4112 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4113 * @pf: board private structure
4114 **/
4115static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4116{
4117 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4118 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4119 pci_disable_msix(pf->pdev);
4120 kfree(pf->msix_entries);
4121 pf->msix_entries = NULL;
Shannon Nelson3b444392015-02-26 16:15:57 +00004122 kfree(pf->irq_pile);
4123 pf->irq_pile = NULL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004124 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4125 pci_disable_msi(pf->pdev);
4126 }
4127 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4128}
4129
4130/**
4131 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4132 * @pf: board private structure
4133 *
4134 * We go through and clear interrupt specific resources and reset the structure
4135 * to pre-load conditions
4136 **/
4137static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4138{
4139 int i;
4140
Shannon Nelsone1477582015-02-21 06:44:33 +00004141 i40e_stop_misc_vector(pf);
Shannon Nelson69278392016-03-10 14:59:43 -08004142 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
Shannon Nelsone1477582015-02-21 06:44:33 +00004143 synchronize_irq(pf->msix_entries[0].vector);
4144 free_irq(pf->msix_entries[0].vector, pf);
4145 }
4146
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06004147 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4148 I40E_IWARP_IRQ_PILE_ID);
4149
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004150 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
Mitch Williams505682c2014-05-20 08:01:37 +00004151 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004152 if (pf->vsi[i])
4153 i40e_vsi_free_q_vectors(pf->vsi[i]);
4154 i40e_reset_interrupt_capability(pf);
4155}
4156
4157/**
4158 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4159 * @vsi: the VSI being configured
4160 **/
4161static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4162{
4163 int q_idx;
4164
4165 if (!vsi->netdev)
4166 return;
4167
4168 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
Alexander Duyck493fb302013-09-28 07:01:44 +00004169 napi_enable(&vsi->q_vectors[q_idx]->napi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004170}
4171
4172/**
4173 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4174 * @vsi: the VSI being configured
4175 **/
4176static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4177{
4178 int q_idx;
4179
4180 if (!vsi->netdev)
4181 return;
4182
4183 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
Alexander Duyck493fb302013-09-28 07:01:44 +00004184 napi_disable(&vsi->q_vectors[q_idx]->napi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004185}
4186
4187/**
Shannon Nelson90ef8d42014-03-14 07:32:26 +00004188 * i40e_vsi_close - Shut down a VSI
4189 * @vsi: the vsi to be quelled
4190 **/
4191static void i40e_vsi_close(struct i40e_vsi *vsi)
4192{
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06004193 bool reset = false;
4194
Shannon Nelson90ef8d42014-03-14 07:32:26 +00004195 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4196 i40e_down(vsi);
4197 i40e_vsi_free_irq(vsi);
4198 i40e_vsi_free_tx_resources(vsi);
4199 i40e_vsi_free_rx_resources(vsi);
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04004200 vsi->current_netdev_flags = 0;
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06004201 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4202 reset = true;
4203 i40e_notify_client_of_netdev_close(vsi, reset);
Shannon Nelson90ef8d42014-03-14 07:32:26 +00004204}
4205
4206/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004207 * i40e_quiesce_vsi - Pause a given VSI
4208 * @vsi: the VSI being paused
4209 **/
4210static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4211{
4212 if (test_bit(__I40E_DOWN, &vsi->state))
4213 return;
4214
Neerav Parikhd341b7a2014-11-12 00:18:51 +00004215 /* No need to disable FCoE VSI when Tx suspended */
4216 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4217 vsi->type == I40E_VSI_FCOE) {
4218 dev_dbg(&vsi->back->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04004219 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
Neerav Parikhd341b7a2014-11-12 00:18:51 +00004220 return;
4221 }
4222
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004223 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
Jesse Brandeburg6995b362015-08-28 17:55:54 -04004224 if (vsi->netdev && netif_running(vsi->netdev))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004225 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
Jesse Brandeburg6995b362015-08-28 17:55:54 -04004226 else
Shannon Nelson90ef8d42014-03-14 07:32:26 +00004227 i40e_vsi_close(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004228}
4229
4230/**
4231 * i40e_unquiesce_vsi - Resume a given VSI
4232 * @vsi: the VSI being resumed
4233 **/
4234static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4235{
4236 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4237 return;
4238
4239 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4240 if (vsi->netdev && netif_running(vsi->netdev))
4241 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4242 else
Shannon Nelson8276f752014-03-14 07:32:27 +00004243 i40e_vsi_open(vsi); /* this clears the DOWN bit */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004244}
4245
4246/**
4247 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4248 * @pf: the PF
4249 **/
4250static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4251{
4252 int v;
4253
Mitch Williams505682c2014-05-20 08:01:37 +00004254 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004255 if (pf->vsi[v])
4256 i40e_quiesce_vsi(pf->vsi[v]);
4257 }
4258}
4259
4260/**
4261 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4262 * @pf: the PF
4263 **/
4264static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4265{
4266 int v;
4267
Mitch Williams505682c2014-05-20 08:01:37 +00004268 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004269 if (pf->vsi[v])
4270 i40e_unquiesce_vsi(pf->vsi[v]);
4271 }
4272}
4273
Neerav Parikh69129dc2014-11-12 00:18:46 +00004274#ifdef CONFIG_I40E_DCB
4275/**
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004276 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
Neerav Parikh69129dc2014-11-12 00:18:46 +00004277 * @vsi: the VSI being configured
4278 *
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004279 * This function waits for the given VSI's queues to be disabled.
Neerav Parikh69129dc2014-11-12 00:18:46 +00004280 **/
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004281static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
Neerav Parikh69129dc2014-11-12 00:18:46 +00004282{
4283 struct i40e_pf *pf = vsi->back;
4284 int i, pf_q, ret;
4285
4286 pf_q = vsi->base_queue;
4287 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4288 /* Check and wait for the disable status of the queue */
4289 ret = i40e_pf_txq_wait(pf, pf_q, false);
4290 if (ret) {
4291 dev_info(&pf->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04004292 "VSI seid %d Tx ring %d disable timeout\n",
4293 vsi->seid, pf_q);
Neerav Parikh69129dc2014-11-12 00:18:46 +00004294 return ret;
4295 }
4296 }
4297
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004298 pf_q = vsi->base_queue;
4299 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4300 /* Check and wait for the disable status of the queue */
4301 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4302 if (ret) {
4303 dev_info(&pf->pdev->dev,
4304 "VSI seid %d Rx ring %d disable timeout\n",
4305 vsi->seid, pf_q);
4306 return ret;
4307 }
4308 }
4309
Neerav Parikh69129dc2014-11-12 00:18:46 +00004310 return 0;
4311}
4312
4313/**
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004314 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
Neerav Parikh69129dc2014-11-12 00:18:46 +00004315 * @pf: the PF
4316 *
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004317 * This function waits for the queues to be in disabled state for all the
Neerav Parikh69129dc2014-11-12 00:18:46 +00004318 * VSIs that are managed by this PF.
4319 **/
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004320static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
Neerav Parikh69129dc2014-11-12 00:18:46 +00004321{
4322 int v, ret = 0;
4323
4324 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
Neerav Parikhd341b7a2014-11-12 00:18:51 +00004325 /* No need to wait for FCoE VSI queues */
4326 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004327 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
Neerav Parikh69129dc2014-11-12 00:18:46 +00004328 if (ret)
4329 break;
4330 }
4331 }
4332
4333 return ret;
4334}
4335
4336#endif
Kiran Patilb03a8c12015-09-24 18:13:15 -04004337
4338/**
4339 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4340 * @q_idx: TX queue number
4341 * @vsi: Pointer to VSI struct
4342 *
4343 * This function checks specified queue for given VSI. Detects hung condition.
4344 * Sets hung bit since it is two step process. Before next run of service task
4345 * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
4346 * hung condition remain unchanged and during subsequent run, this function
4347 * issues SW interrupt to recover from hung condition.
4348 **/
4349static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4350{
4351 struct i40e_ring *tx_ring = NULL;
4352 struct i40e_pf *pf;
Anjali Singhai Jaindd353102016-01-15 14:33:12 -08004353 u32 head, val, tx_pending_hw;
Kiran Patilb03a8c12015-09-24 18:13:15 -04004354 int i;
4355
4356 pf = vsi->back;
4357
4358 /* now that we have an index, find the tx_ring struct */
4359 for (i = 0; i < vsi->num_queue_pairs; i++) {
4360 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4361 if (q_idx == vsi->tx_rings[i]->queue_index) {
4362 tx_ring = vsi->tx_rings[i];
4363 break;
4364 }
4365 }
4366 }
4367
4368 if (!tx_ring)
4369 return;
4370
4371 /* Read interrupt register */
4372 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4373 val = rd32(&pf->hw,
4374 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4375 tx_ring->vsi->base_vector - 1));
4376 else
4377 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4378
4379 head = i40e_get_head(tx_ring);
4380
Anjali Singhai Jaindd353102016-01-15 14:33:12 -08004381 tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
Kiran Patilb03a8c12015-09-24 18:13:15 -04004382
Kiran Patil9c6c1252015-11-06 15:26:02 -08004383 /* HW is done executing descriptors, updated HEAD write back,
4384 * but SW hasn't processed those descriptors. If interrupt is
4385 * not generated from this point ON, it could result into
4386 * dev_watchdog detecting timeout on those netdev_queue,
4387 * hence proactively trigger SW interrupt.
Kiran Patilb03a8c12015-09-24 18:13:15 -04004388 */
Anjali Singhai Jaindd353102016-01-15 14:33:12 -08004389 if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
Kiran Patil9c6c1252015-11-06 15:26:02 -08004390 /* NAPI Poll didn't run and clear since it was set */
4391 if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
4392 &tx_ring->q_vector->hung_detected)) {
Anjali Singhai Jaindd353102016-01-15 14:33:12 -08004393 netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
4394 vsi->seid, q_idx, tx_pending_hw,
Kiran Patil9c6c1252015-11-06 15:26:02 -08004395 tx_ring->next_to_clean, head,
4396 tx_ring->next_to_use,
4397 readl(tx_ring->tail));
4398 netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
4399 vsi->seid, q_idx, val);
4400 i40e_force_wb(vsi, tx_ring->q_vector);
4401 } else {
4402 /* First Chance - detected possible hung */
4403 set_bit(I40E_Q_VECTOR_HUNG_DETECT,
4404 &tx_ring->q_vector->hung_detected);
4405 }
4406 }
Anjali Singhai Jaindd353102016-01-15 14:33:12 -08004407
4408 /* This is the case where we have interrupts missing,
4409 * so the tx_pending in HW will most likely be 0, but we
4410 * will have tx_pending in SW since the WB happened but the
4411 * interrupt got lost.
4412 */
4413 if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
4414 (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4415 if (napi_reschedule(&tx_ring->q_vector->napi))
4416 tx_ring->tx_stats.tx_lost_interrupt++;
4417 }
Kiran Patilb03a8c12015-09-24 18:13:15 -04004418}
4419
4420/**
4421 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4422 * @pf: pointer to PF struct
4423 *
4424 * LAN VSI has netdev and netdev has TX queues. This function is to check
4425 * each of those TX queues if they are hung, trigger recovery by issuing
4426 * SW interrupt.
4427 **/
4428static void i40e_detect_recover_hung(struct i40e_pf *pf)
4429{
4430 struct net_device *netdev;
4431 struct i40e_vsi *vsi;
4432 int i;
4433
4434 /* Only for LAN VSI */
4435 vsi = pf->vsi[pf->lan_vsi];
4436
4437 if (!vsi)
4438 return;
4439
4440 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4441 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4442 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4443 return;
4444
4445 /* Make sure type is MAIN VSI */
4446 if (vsi->type != I40E_VSI_MAIN)
4447 return;
4448
4449 netdev = vsi->netdev;
4450 if (!netdev)
4451 return;
4452
4453 /* Bail out if netif_carrier is not OK */
4454 if (!netif_carrier_ok(netdev))
4455 return;
4456
4457 /* Go thru' TX queues for netdev */
4458 for (i = 0; i < netdev->num_tx_queues; i++) {
4459 struct netdev_queue *q;
4460
4461 q = netdev_get_tx_queue(netdev, i);
4462 if (q)
4463 i40e_detect_recover_hung_queue(i, vsi);
4464 }
4465}
4466
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004467/**
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004468 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00004469 * @pf: pointer to PF
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004470 *
4471 * Get TC map for ISCSI PF type that will include iSCSI TC
4472 * and LAN TC.
4473 **/
4474static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4475{
4476 struct i40e_dcb_app_priority_table app;
4477 struct i40e_hw *hw = &pf->hw;
4478 u8 enabled_tc = 1; /* TC0 is always enabled */
4479 u8 tc, i;
4480 /* Get the iSCSI APP TLV */
4481 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4482
4483 for (i = 0; i < dcbcfg->numapps; i++) {
4484 app = dcbcfg->app[i];
4485 if (app.selector == I40E_APP_SEL_TCPIP &&
4486 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4487 tc = dcbcfg->etscfg.prioritytable[app.priority];
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08004488 enabled_tc |= BIT(tc);
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004489 break;
4490 }
4491 }
4492
4493 return enabled_tc;
4494}
4495
4496/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004497 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4498 * @dcbcfg: the corresponding DCBx configuration structure
4499 *
4500 * Return the number of TCs from given DCBx configuration
4501 **/
4502static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4503{
Dave Ertmanfbfe12c2016-08-12 09:56:32 -07004504 int i, tc_unused = 0;
Jesse Brandeburg078b5872013-09-25 23:41:14 +00004505 u8 num_tc = 0;
Dave Ertmanfbfe12c2016-08-12 09:56:32 -07004506 u8 ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004507
4508 /* Scan the ETS Config Priority Table to find
4509 * traffic class enabled for a given priority
Dave Ertmanfbfe12c2016-08-12 09:56:32 -07004510 * and create a bitmask of enabled TCs
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004511 */
Dave Ertmanfbfe12c2016-08-12 09:56:32 -07004512 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4513 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4514
4515 /* Now scan the bitmask to check for
4516 * contiguous TCs starting with TC0
4517 */
4518 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4519 if (num_tc & BIT(i)) {
4520 if (!tc_unused) {
4521 ret++;
4522 } else {
4523 pr_err("Non-contiguous TC - Disabling DCB\n");
4524 return 1;
4525 }
4526 } else {
4527 tc_unused = 1;
4528 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004529 }
4530
Dave Ertmanfbfe12c2016-08-12 09:56:32 -07004531 /* There is always at least TC0 */
4532 if (!ret)
4533 ret = 1;
4534
4535 return ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004536}
4537
4538/**
4539 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4540 * @dcbcfg: the corresponding DCBx configuration structure
4541 *
4542 * Query the current DCB configuration and return the number of
4543 * traffic classes enabled from the given DCBX config
4544 **/
4545static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4546{
4547 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4548 u8 enabled_tc = 1;
4549 u8 i;
4550
4551 for (i = 0; i < num_tc; i++)
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04004552 enabled_tc |= BIT(i);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004553
4554 return enabled_tc;
4555}
4556
4557/**
4558 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4559 * @pf: PF being queried
4560 *
4561 * Return number of traffic classes enabled for the given PF
4562 **/
4563static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4564{
4565 struct i40e_hw *hw = &pf->hw;
Dave Ertman52a08ca2016-07-27 12:02:34 -07004566 u8 i, enabled_tc = 1;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004567 u8 num_tc = 0;
4568 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4569
4570 /* If DCB is not enabled then always in single TC */
4571 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4572 return 1;
4573
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004574 /* SFP mode will be enabled for all TCs on port */
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004575 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4576 return i40e_dcb_get_num_tc(dcbcfg);
4577
4578 /* MFP mode return count of enabled TCs for this PF */
4579 if (pf->hw.func_caps.iscsi)
4580 enabled_tc = i40e_get_iscsi_tc_map(pf);
4581 else
Neerav Parikhfc51de92015-02-24 06:58:53 +00004582 return 1; /* Only TC0 */
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004583
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004584 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08004585 if (enabled_tc & BIT(i))
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004586 num_tc++;
4587 }
4588 return num_tc;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004589}
4590
4591/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004592 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4593 * @pf: PF being queried
4594 *
4595 * Return a bitmap for enabled traffic classes for this PF.
4596 **/
4597static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4598{
4599 /* If DCB is not enabled for this PF then just return default TC */
4600 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
David Ertmanea6acb72016-09-20 07:10:50 -07004601 return I40E_DEFAULT_TRAFFIC_CLASS;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004602
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004603 /* SFP mode we want PF to be enabled for all TCs */
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004604 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4605 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4606
Neerav Parikhfc51de92015-02-24 06:58:53 +00004607 /* MFP enabled and iSCSI PF type */
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004608 if (pf->hw.func_caps.iscsi)
4609 return i40e_get_iscsi_tc_map(pf);
4610 else
David Ertmanea6acb72016-09-20 07:10:50 -07004611 return I40E_DEFAULT_TRAFFIC_CLASS;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004612}
4613
4614/**
4615 * i40e_vsi_get_bw_info - Query VSI BW Information
4616 * @vsi: the VSI being queried
4617 *
4618 * Returns 0 on success, negative value on failure
4619 **/
4620static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4621{
4622 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4623 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4624 struct i40e_pf *pf = vsi->back;
4625 struct i40e_hw *hw = &pf->hw;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004626 i40e_status ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004627 u32 tc_bw_max;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004628 int i;
4629
4630 /* Get the VSI level BW configuration */
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004631 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4632 if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004633 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004634 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4635 i40e_stat_str(&pf->hw, ret),
4636 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004637 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004638 }
4639
4640 /* Get the VSI level BW configuration per TC */
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004641 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4642 NULL);
4643 if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004644 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004645 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4646 i40e_stat_str(&pf->hw, ret),
4647 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004648 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004649 }
4650
4651 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4652 dev_info(&pf->pdev->dev,
4653 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4654 bw_config.tc_valid_bits,
4655 bw_ets_config.tc_valid_bits);
4656 /* Still continuing */
4657 }
4658
4659 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4660 vsi->bw_max_quanta = bw_config.max_bw;
4661 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4662 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4663 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4664 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4665 vsi->bw_ets_limit_credits[i] =
4666 le16_to_cpu(bw_ets_config.credits[i]);
4667 /* 3 bits out of 4 for each TC */
4668 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4669 }
Jesse Brandeburg078b5872013-09-25 23:41:14 +00004670
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004671 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004672}
4673
4674/**
4675 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4676 * @vsi: the VSI being configured
4677 * @enabled_tc: TC bitmap
4678 * @bw_credits: BW shared credits per TC
4679 *
4680 * Returns 0 on success, negative value on failure
4681 **/
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004682static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004683 u8 *bw_share)
4684{
4685 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004686 i40e_status ret;
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004687 int i;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004688
4689 bw_data.tc_valid_bits = enabled_tc;
4690 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4691 bw_data.tc_bw_credits[i] = bw_share[i];
4692
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004693 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4694 NULL);
4695 if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004696 dev_info(&vsi->back->pdev->dev,
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00004697 "AQ command Config VSI BW allocation per TC failed = %d\n",
4698 vsi->back->hw.aq.asq_last_status);
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004699 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004700 }
4701
4702 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4703 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4704
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004705 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004706}
4707
4708/**
4709 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4710 * @vsi: the VSI being configured
4711 * @enabled_tc: TC map to be enabled
4712 *
4713 **/
4714static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4715{
4716 struct net_device *netdev = vsi->netdev;
4717 struct i40e_pf *pf = vsi->back;
4718 struct i40e_hw *hw = &pf->hw;
4719 u8 netdev_tc = 0;
4720 int i;
4721 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4722
4723 if (!netdev)
4724 return;
4725
4726 if (!enabled_tc) {
4727 netdev_reset_tc(netdev);
4728 return;
4729 }
4730
4731 /* Set up actual enabled TCs on the VSI */
4732 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4733 return;
4734
4735 /* set per TC queues for the VSI */
4736 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4737 /* Only set TC queues for enabled tcs
4738 *
4739 * e.g. For a VSI that has TC0 and TC3 enabled the
4740 * enabled_tc bitmap would be 0x00001001; the driver
4741 * will set the numtc for netdev as 2 that will be
4742 * referenced by the netdev layer as TC 0 and 1.
4743 */
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08004744 if (vsi->tc_config.enabled_tc & BIT(i))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004745 netdev_set_tc_queue(netdev,
4746 vsi->tc_config.tc_info[i].netdev_tc,
4747 vsi->tc_config.tc_info[i].qcount,
4748 vsi->tc_config.tc_info[i].qoffset);
4749 }
4750
4751 /* Assign UP2TC map for the VSI */
4752 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4753 /* Get the actual TC# for the UP */
4754 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4755 /* Get the mapped netdev TC# for the UP */
4756 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
4757 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4758 }
4759}
4760
4761/**
4762 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4763 * @vsi: the VSI being configured
4764 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4765 **/
4766static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4767 struct i40e_vsi_context *ctxt)
4768{
4769 /* copy just the sections touched not the entire info
4770 * since not all sections are valid as returned by
4771 * update vsi params
4772 */
4773 vsi->info.mapping_flags = ctxt->info.mapping_flags;
4774 memcpy(&vsi->info.queue_mapping,
4775 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4776 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4777 sizeof(vsi->info.tc_mapping));
4778}
4779
4780/**
4781 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4782 * @vsi: VSI to be configured
4783 * @enabled_tc: TC bitmap
4784 *
4785 * This configures a particular VSI for TCs that are mapped to the
4786 * given TC bitmap. It uses default bandwidth share for TCs across
4787 * VSIs to configure TC for a particular VSI.
4788 *
4789 * NOTE:
4790 * It is expected that the VSI queues have been quisced before calling
4791 * this function.
4792 **/
4793static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4794{
4795 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4796 struct i40e_vsi_context ctxt;
4797 int ret = 0;
4798 int i;
4799
4800 /* Check if enabled_tc is same as existing or new TCs */
4801 if (vsi->tc_config.enabled_tc == enabled_tc)
4802 return ret;
4803
4804 /* Enable ETS TCs with equal BW Share for now across all VSIs */
4805 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08004806 if (enabled_tc & BIT(i))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004807 bw_share[i] = 1;
4808 }
4809
4810 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4811 if (ret) {
4812 dev_info(&vsi->back->pdev->dev,
4813 "Failed configuring TC map %d for VSI %d\n",
4814 enabled_tc, vsi->seid);
4815 goto out;
4816 }
4817
4818 /* Update Queue Pairs Mapping for currently enabled UPs */
4819 ctxt.seid = vsi->seid;
4820 ctxt.pf_num = vsi->back->hw.pf_id;
4821 ctxt.vf_num = 0;
4822 ctxt.uplink_seid = vsi->uplink_seid;
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07004823 ctxt.info = vsi->info;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004824 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4825
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06004826 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
4827 ctxt.info.valid_sections |=
4828 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
4829 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
4830 }
4831
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004832 /* Update the VSI after updating the VSI queue-mapping information */
4833 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4834 if (ret) {
4835 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004836 "Update vsi tc config failed, err %s aq_err %s\n",
4837 i40e_stat_str(&vsi->back->hw, ret),
4838 i40e_aq_str(&vsi->back->hw,
4839 vsi->back->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004840 goto out;
4841 }
4842 /* update the local VSI info with updated queue map */
4843 i40e_vsi_update_queue_map(vsi, &ctxt);
4844 vsi->info.valid_sections = 0;
4845
4846 /* Update current VSI BW information */
4847 ret = i40e_vsi_get_bw_info(vsi);
4848 if (ret) {
4849 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004850 "Failed updating vsi bw info, err %s aq_err %s\n",
4851 i40e_stat_str(&vsi->back->hw, ret),
4852 i40e_aq_str(&vsi->back->hw,
4853 vsi->back->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004854 goto out;
4855 }
4856
4857 /* Update the netdev TC setup */
4858 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4859out:
4860 return ret;
4861}
4862
4863/**
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004864 * i40e_veb_config_tc - Configure TCs for given VEB
4865 * @veb: given VEB
4866 * @enabled_tc: TC bitmap
4867 *
4868 * Configures given TC bitmap for VEB (switching) element
4869 **/
4870int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4871{
4872 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4873 struct i40e_pf *pf = veb->pf;
4874 int ret = 0;
4875 int i;
4876
4877 /* No TCs or already enabled TCs just return */
4878 if (!enabled_tc || veb->enabled_tc == enabled_tc)
4879 return ret;
4880
4881 bw_data.tc_valid_bits = enabled_tc;
4882 /* bw_data.absolute_credits is not set (relative) */
4883
4884 /* Enable ETS TCs with equal BW Share for now */
4885 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08004886 if (enabled_tc & BIT(i))
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004887 bw_data.tc_bw_share_credits[i] = 1;
4888 }
4889
4890 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4891 &bw_data, NULL);
4892 if (ret) {
4893 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004894 "VEB bw config failed, err %s aq_err %s\n",
4895 i40e_stat_str(&pf->hw, ret),
4896 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004897 goto out;
4898 }
4899
4900 /* Update the BW information */
4901 ret = i40e_veb_get_bw_info(veb);
4902 if (ret) {
4903 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004904 "Failed getting veb bw config, err %s aq_err %s\n",
4905 i40e_stat_str(&pf->hw, ret),
4906 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004907 }
4908
4909out:
4910 return ret;
4911}
4912
4913#ifdef CONFIG_I40E_DCB
4914/**
4915 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4916 * @pf: PF struct
4917 *
4918 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4919 * the caller would've quiesce all the VSIs before calling
4920 * this function
4921 **/
4922static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4923{
4924 u8 tc_map = 0;
4925 int ret;
4926 u8 v;
4927
4928 /* Enable the TCs available on PF to all VEBs */
4929 tc_map = i40e_pf_get_tc_map(pf);
4930 for (v = 0; v < I40E_MAX_VEB; v++) {
4931 if (!pf->veb[v])
4932 continue;
4933 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4934 if (ret) {
4935 dev_info(&pf->pdev->dev,
4936 "Failed configuring TC for VEB seid=%d\n",
4937 pf->veb[v]->seid);
4938 /* Will try to configure as many components */
4939 }
4940 }
4941
4942 /* Update each VSI */
Mitch Williams505682c2014-05-20 08:01:37 +00004943 for (v = 0; v < pf->num_alloc_vsi; v++) {
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004944 if (!pf->vsi[v])
4945 continue;
4946
4947 /* - Enable all TCs for the LAN VSI
Vasu Dev38e00432014-08-01 13:27:03 -07004948#ifdef I40E_FCOE
4949 * - For FCoE VSI only enable the TC configured
4950 * as per the APP TLV
4951#endif
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004952 * - For all others keep them at TC0 for now
4953 */
4954 if (v == pf->lan_vsi)
4955 tc_map = i40e_pf_get_tc_map(pf);
4956 else
David Ertmanea6acb72016-09-20 07:10:50 -07004957 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
Vasu Dev38e00432014-08-01 13:27:03 -07004958#ifdef I40E_FCOE
4959 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4960 tc_map = i40e_get_fcoe_tc_map(pf);
4961#endif /* #ifdef I40E_FCOE */
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004962
4963 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4964 if (ret) {
4965 dev_info(&pf->pdev->dev,
4966 "Failed configuring TC for VSI seid=%d\n",
4967 pf->vsi[v]->seid);
4968 /* Will try to configure as many components */
4969 } else {
Neerav Parikh0672a092014-04-01 07:11:47 +00004970 /* Re-configure VSI vectors based on updated TC map */
4971 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004972 if (pf->vsi[v]->netdev)
4973 i40e_dcbnl_set_all(pf->vsi[v]);
4974 }
4975 }
4976}
4977
4978/**
Neerav Parikh2fd75f32014-11-12 00:18:20 +00004979 * i40e_resume_port_tx - Resume port Tx
4980 * @pf: PF struct
4981 *
4982 * Resume a port's Tx and issue a PF reset in case of failure to
4983 * resume.
4984 **/
4985static int i40e_resume_port_tx(struct i40e_pf *pf)
4986{
4987 struct i40e_hw *hw = &pf->hw;
4988 int ret;
4989
4990 ret = i40e_aq_resume_port_tx(hw, NULL);
4991 if (ret) {
4992 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004993 "Resume Port Tx failed, err %s aq_err %s\n",
4994 i40e_stat_str(&pf->hw, ret),
4995 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Neerav Parikh2fd75f32014-11-12 00:18:20 +00004996 /* Schedule PF reset to recover */
4997 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4998 i40e_service_event_schedule(pf);
4999 }
5000
5001 return ret;
5002}
5003
5004/**
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005005 * i40e_init_pf_dcb - Initialize DCB configuration
5006 * @pf: PF being configured
5007 *
5008 * Query the current DCB configuration and cache it
5009 * in the hardware structure
5010 **/
5011static int i40e_init_pf_dcb(struct i40e_pf *pf)
5012{
5013 struct i40e_hw *hw = &pf->hw;
5014 int err = 0;
5015
Anjali Singhai Jain025b4a52015-02-24 06:58:46 +00005016 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
Neerav Parikhf1bbad32016-01-13 16:51:39 -08005017 if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
Anjali Singhai Jain025b4a52015-02-24 06:58:46 +00005018 goto out;
5019
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005020 /* Get the initial DCB configuration */
5021 err = i40e_init_dcb(hw);
5022 if (!err) {
5023 /* Device/Function is not DCBX capable */
5024 if ((!hw->func_caps.dcb) ||
5025 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5026 dev_info(&pf->pdev->dev,
5027 "DCBX offload is not supported or is disabled for this PF.\n");
5028
5029 if (pf->flags & I40E_FLAG_MFP_ENABLED)
5030 goto out;
5031
5032 } else {
5033 /* When status is not DISABLED then DCBX in FW */
5034 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5035 DCB_CAP_DCBX_VER_IEEE;
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005036
5037 pf->flags |= I40E_FLAG_DCB_CAPABLE;
Dave Ertmana0362442016-08-29 17:38:26 -07005038 /* Enable DCB tagging only when more than one TC
5039 * or explicitly disable if only one TC
5040 */
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005041 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5042 pf->flags |= I40E_FLAG_DCB_ENABLED;
Dave Ertmana0362442016-08-29 17:38:26 -07005043 else
5044 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
Neerav Parikh9fa61dd2014-11-12 00:18:25 +00005045 dev_dbg(&pf->pdev->dev,
5046 "DCBX offload is supported for this PF.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005047 }
Neerav Parikh014269f2014-04-01 07:11:48 +00005048 } else {
Shannon Nelsonaebfc812014-12-11 07:06:38 +00005049 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04005050 "Query for DCB configuration failed, err %s aq_err %s\n",
5051 i40e_stat_str(&pf->hw, err),
5052 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005053 }
5054
5055out:
5056 return err;
5057}
5058#endif /* CONFIG_I40E_DCB */
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005059#define SPEED_SIZE 14
5060#define FC_SIZE 8
5061/**
5062 * i40e_print_link_message - print link up or down
5063 * @vsi: the VSI for which link needs a message
5064 */
Matt Jaredc156f852015-08-27 11:42:39 -04005065void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005066{
Shannon Nelsona9165492015-09-03 17:19:00 -04005067 char *speed = "Unknown";
5068 char *fc = "Unknown";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005069
Matt Jaredc156f852015-08-27 11:42:39 -04005070 if (vsi->current_isup == isup)
5071 return;
5072 vsi->current_isup = isup;
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005073 if (!isup) {
5074 netdev_info(vsi->netdev, "NIC Link is Down\n");
5075 return;
5076 }
5077
Greg Rose148c2d82014-12-11 07:06:27 +00005078 /* Warn user if link speed on NPAR enabled partition is not at
5079 * least 10GB
5080 */
5081 if (vsi->back->hw.func_caps.npar_enable &&
5082 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5083 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5084 netdev_warn(vsi->netdev,
5085 "The partition detected link speed that is less than 10Gbps\n");
5086
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005087 switch (vsi->back->hw.phy.link_info.link_speed) {
5088 case I40E_LINK_SPEED_40GB:
Shannon Nelsona9165492015-09-03 17:19:00 -04005089 speed = "40 G";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005090 break;
Jesse Brandeburgae24b402015-03-27 00:12:09 -07005091 case I40E_LINK_SPEED_20GB:
Shannon Nelsona9165492015-09-03 17:19:00 -04005092 speed = "20 G";
Jesse Brandeburgae24b402015-03-27 00:12:09 -07005093 break;
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005094 case I40E_LINK_SPEED_10GB:
Shannon Nelsona9165492015-09-03 17:19:00 -04005095 speed = "10 G";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005096 break;
5097 case I40E_LINK_SPEED_1GB:
Shannon Nelsona9165492015-09-03 17:19:00 -04005098 speed = "1000 M";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005099 break;
Mitch Williams5960d332014-09-13 07:40:47 +00005100 case I40E_LINK_SPEED_100MB:
Shannon Nelsona9165492015-09-03 17:19:00 -04005101 speed = "100 M";
Mitch Williams5960d332014-09-13 07:40:47 +00005102 break;
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005103 default:
5104 break;
5105 }
5106
5107 switch (vsi->back->hw.fc.current_mode) {
5108 case I40E_FC_FULL:
Shannon Nelsona9165492015-09-03 17:19:00 -04005109 fc = "RX/TX";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005110 break;
5111 case I40E_FC_TX_PAUSE:
Shannon Nelsona9165492015-09-03 17:19:00 -04005112 fc = "TX";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005113 break;
5114 case I40E_FC_RX_PAUSE:
Shannon Nelsona9165492015-09-03 17:19:00 -04005115 fc = "RX";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005116 break;
5117 default:
Shannon Nelsona9165492015-09-03 17:19:00 -04005118 fc = "None";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005119 break;
5120 }
5121
Shannon Nelsona9165492015-09-03 17:19:00 -04005122 netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005123 speed, fc);
5124}
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005125
5126/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005127 * i40e_up_complete - Finish the last steps of bringing up a connection
5128 * @vsi: the VSI being configured
5129 **/
5130static int i40e_up_complete(struct i40e_vsi *vsi)
5131{
5132 struct i40e_pf *pf = vsi->back;
5133 int err;
5134
5135 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5136 i40e_vsi_configure_msix(vsi);
5137 else
5138 i40e_configure_msi_and_legacy(vsi);
5139
5140 /* start rings */
5141 err = i40e_vsi_control_rings(vsi, true);
5142 if (err)
5143 return err;
5144
5145 clear_bit(__I40E_DOWN, &vsi->state);
5146 i40e_napi_enable_all(vsi);
5147 i40e_vsi_enable_irq(vsi);
5148
5149 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5150 (vsi->netdev)) {
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005151 i40e_print_link_message(vsi, true);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005152 netif_tx_start_all_queues(vsi->netdev);
5153 netif_carrier_on(vsi->netdev);
Anjali Singhai6d779b42013-09-28 06:00:02 +00005154 } else if (vsi->netdev) {
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005155 i40e_print_link_message(vsi, false);
Carolyn Wyborny7b592f62014-07-10 07:58:19 +00005156 /* need to check for qualified module here*/
5157 if ((pf->hw.phy.link_info.link_info &
5158 I40E_AQ_MEDIA_AVAILABLE) &&
5159 (!(pf->hw.phy.link_info.an_info &
5160 I40E_AQ_QUALIFIED_MODULE)))
5161 netdev_err(vsi->netdev,
5162 "the driver failed to link because an unqualified module was detected.");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005163 }
Anjali Singhai Jainca64fa42014-02-11 08:26:30 +00005164
5165 /* replay FDIR SB filters */
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005166 if (vsi->type == I40E_VSI_FDIR) {
5167 /* reset fd counters */
5168 pf->fd_add_err = pf->fd_atr_cnt = 0;
5169 if (pf->fd_tcp_rule > 0) {
Jacob Keller234dc4e2016-09-06 18:05:09 -07005170 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -04005171 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5172 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005173 pf->fd_tcp_rule = 0;
5174 }
Anjali Singhai Jainca64fa42014-02-11 08:26:30 +00005175 i40e_fdir_filter_restore(vsi);
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005176 }
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06005177
5178 /* On the next run of the service_task, notify any clients of the new
5179 * opened netdev
5180 */
5181 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005182 i40e_service_event_schedule(pf);
5183
5184 return 0;
5185}
5186
5187/**
5188 * i40e_vsi_reinit_locked - Reset the VSI
5189 * @vsi: the VSI being configured
5190 *
5191 * Rebuild the ring structs after some configuration
5192 * has changed, e.g. MTU size.
5193 **/
5194static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5195{
5196 struct i40e_pf *pf = vsi->back;
5197
5198 WARN_ON(in_interrupt());
5199 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
5200 usleep_range(1000, 2000);
5201 i40e_down(vsi);
5202
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005203 i40e_up(vsi);
5204 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5205}
5206
5207/**
5208 * i40e_up - Bring the connection back up after being down
5209 * @vsi: the VSI being configured
5210 **/
5211int i40e_up(struct i40e_vsi *vsi)
5212{
5213 int err;
5214
5215 err = i40e_vsi_configure(vsi);
5216 if (!err)
5217 err = i40e_up_complete(vsi);
5218
5219 return err;
5220}
5221
5222/**
5223 * i40e_down - Shutdown the connection processing
5224 * @vsi: the VSI being stopped
5225 **/
5226void i40e_down(struct i40e_vsi *vsi)
5227{
5228 int i;
5229
5230 /* It is assumed that the caller of this function
5231 * sets the vsi->state __I40E_DOWN bit.
5232 */
5233 if (vsi->netdev) {
5234 netif_carrier_off(vsi->netdev);
5235 netif_tx_disable(vsi->netdev);
5236 }
5237 i40e_vsi_disable_irq(vsi);
5238 i40e_vsi_control_rings(vsi, false);
5239 i40e_napi_disable_all(vsi);
5240
5241 for (i = 0; i < vsi->num_queue_pairs; i++) {
Alexander Duyck9f65e152013-09-28 06:00:58 +00005242 i40e_clean_tx_ring(vsi->tx_rings[i]);
5243 i40e_clean_rx_ring(vsi->rx_rings[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005244 }
Catherine Sullivanf980d442016-05-16 10:26:34 -07005245
5246 i40e_notify_client_of_netdev_close(vsi, false);
5247
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005248}
5249
5250/**
5251 * i40e_setup_tc - configure multiple traffic classes
5252 * @netdev: net device to configure
5253 * @tc: number of traffic classes to enable
5254 **/
5255static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5256{
5257 struct i40e_netdev_priv *np = netdev_priv(netdev);
5258 struct i40e_vsi *vsi = np->vsi;
5259 struct i40e_pf *pf = vsi->back;
5260 u8 enabled_tc = 0;
5261 int ret = -EINVAL;
5262 int i;
5263
5264 /* Check if DCB enabled to continue */
5265 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5266 netdev_info(netdev, "DCB is not enabled for adapter\n");
5267 goto exit;
5268 }
5269
5270 /* Check if MFP enabled */
5271 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5272 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5273 goto exit;
5274 }
5275
5276 /* Check whether tc count is within enabled limit */
5277 if (tc > i40e_pf_get_num_tc(pf)) {
5278 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5279 goto exit;
5280 }
5281
5282 /* Generate TC map for number of tc requested */
5283 for (i = 0; i < tc; i++)
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08005284 enabled_tc |= BIT(i);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005285
5286 /* Requesting same TC configuration as already enabled */
5287 if (enabled_tc == vsi->tc_config.enabled_tc)
5288 return 0;
5289
5290 /* Quiesce VSI queues */
5291 i40e_quiesce_vsi(vsi);
5292
5293 /* Configure VSI for enabled TCs */
5294 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5295 if (ret) {
5296 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5297 vsi->seid);
5298 goto exit;
5299 }
5300
5301 /* Unquiesce VSI */
5302 i40e_unquiesce_vsi(vsi);
5303
5304exit:
5305 return ret;
5306}
5307
John Fastabende4c67342016-02-16 21:16:15 -08005308#ifdef I40E_FCOE
John Fastabend16e5cc62016-02-16 21:16:43 -08005309int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5310 struct tc_to_netdev *tc)
John Fastabende4c67342016-02-16 21:16:15 -08005311#else
John Fastabend16e5cc62016-02-16 21:16:43 -08005312static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5313 struct tc_to_netdev *tc)
John Fastabende4c67342016-02-16 21:16:15 -08005314#endif
5315{
John Fastabend16e5cc62016-02-16 21:16:43 -08005316 if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
John Fastabende4c67342016-02-16 21:16:15 -08005317 return -EINVAL;
John Fastabend16e5cc62016-02-16 21:16:43 -08005318 return i40e_setup_tc(netdev, tc->tc);
John Fastabende4c67342016-02-16 21:16:15 -08005319}
5320
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005321/**
5322 * i40e_open - Called when a network interface is made active
5323 * @netdev: network interface device structure
5324 *
5325 * The open entry point is called when a network interface is made
5326 * active by the system (IFF_UP). At this point all resources needed
5327 * for transmit and receive operations are allocated, the interrupt
5328 * handler is registered with the OS, the netdev watchdog subtask is
5329 * enabled, and the stack is notified that the interface is ready.
5330 *
5331 * Returns 0 on success, negative value on failure
5332 **/
Vasu Dev38e00432014-08-01 13:27:03 -07005333int i40e_open(struct net_device *netdev)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005334{
5335 struct i40e_netdev_priv *np = netdev_priv(netdev);
5336 struct i40e_vsi *vsi = np->vsi;
5337 struct i40e_pf *pf = vsi->back;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005338 int err;
5339
Shannon Nelson4eb3f762014-03-06 08:59:58 +00005340 /* disallow open during test or if eeprom is broken */
5341 if (test_bit(__I40E_TESTING, &pf->state) ||
5342 test_bit(__I40E_BAD_EEPROM, &pf->state))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005343 return -EBUSY;
5344
5345 netif_carrier_off(netdev);
5346
Elizabeth Kappler6c167f52014-02-15 07:41:38 +00005347 err = i40e_vsi_open(vsi);
5348 if (err)
5349 return err;
5350
Jesse Brandeburg059dab62014-04-01 09:07:20 +00005351 /* configure global TSO hardware offload settings */
5352 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5353 TCP_FLAG_FIN) >> 16);
5354 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5355 TCP_FLAG_FIN |
5356 TCP_FLAG_CWR) >> 16);
5357 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5358
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07005359 udp_tunnel_get_rx_info(netdev);
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06005360
Elizabeth Kappler6c167f52014-02-15 07:41:38 +00005361 return 0;
5362}
5363
5364/**
5365 * i40e_vsi_open -
5366 * @vsi: the VSI to open
5367 *
5368 * Finish initialization of the VSI.
5369 *
5370 * Returns 0 on success, negative value on failure
5371 **/
5372int i40e_vsi_open(struct i40e_vsi *vsi)
5373{
5374 struct i40e_pf *pf = vsi->back;
Carolyn Wybornyb294ac72014-12-11 07:06:39 +00005375 char int_name[I40E_INT_NAME_STR_LEN];
Elizabeth Kappler6c167f52014-02-15 07:41:38 +00005376 int err;
5377
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005378 /* allocate descriptors */
5379 err = i40e_vsi_setup_tx_resources(vsi);
5380 if (err)
5381 goto err_setup_tx;
5382 err = i40e_vsi_setup_rx_resources(vsi);
5383 if (err)
5384 goto err_setup_rx;
5385
5386 err = i40e_vsi_configure(vsi);
5387 if (err)
5388 goto err_setup_rx;
5389
Shannon Nelsonc22e3c62014-03-14 07:32:25 +00005390 if (vsi->netdev) {
5391 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5392 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5393 err = i40e_vsi_request_irq(vsi, int_name);
5394 if (err)
5395 goto err_setup_rx;
5396
5397 /* Notify the stack of the actual queue counts. */
5398 err = netif_set_real_num_tx_queues(vsi->netdev,
5399 vsi->num_queue_pairs);
5400 if (err)
5401 goto err_set_queues;
5402
5403 err = netif_set_real_num_rx_queues(vsi->netdev,
5404 vsi->num_queue_pairs);
5405 if (err)
5406 goto err_set_queues;
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +00005407
5408 } else if (vsi->type == I40E_VSI_FDIR) {
Carolyn Wybornye240f672014-12-11 07:06:37 +00005409 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
Carolyn Wybornyb2008cb2014-11-11 20:05:26 +00005410 dev_driver_string(&pf->pdev->dev),
5411 dev_name(&pf->pdev->dev));
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +00005412 err = i40e_vsi_request_irq(vsi, int_name);
Carolyn Wybornyb2008cb2014-11-11 20:05:26 +00005413
Shannon Nelsonc22e3c62014-03-14 07:32:25 +00005414 } else {
Jean Sacrence9ccb12014-05-01 14:31:18 +00005415 err = -EINVAL;
Elizabeth Kappler6c167f52014-02-15 07:41:38 +00005416 goto err_setup_rx;
5417 }
Anjali Singhai Jain25946dd2013-11-26 10:49:14 +00005418
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005419 err = i40e_up_complete(vsi);
5420 if (err)
5421 goto err_up_complete;
5422
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005423 return 0;
5424
5425err_up_complete:
5426 i40e_down(vsi);
Anjali Singhai Jain25946dd2013-11-26 10:49:14 +00005427err_set_queues:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005428 i40e_vsi_free_irq(vsi);
5429err_setup_rx:
5430 i40e_vsi_free_rx_resources(vsi);
5431err_setup_tx:
5432 i40e_vsi_free_tx_resources(vsi);
5433 if (vsi == pf->vsi[pf->lan_vsi])
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005434 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005435
5436 return err;
5437}
5438
5439/**
Joseph Gasparakis17a73f62014-02-12 01:45:30 +00005440 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00005441 * @pf: Pointer to PF
Joseph Gasparakis17a73f62014-02-12 01:45:30 +00005442 *
5443 * This function destroys the hlist where all the Flow Director
5444 * filters were saved.
5445 **/
5446static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5447{
5448 struct i40e_fdir_filter *filter;
5449 struct hlist_node *node2;
5450
5451 hlist_for_each_entry_safe(filter, node2,
5452 &pf->fdir_filter_list, fdir_node) {
5453 hlist_del(&filter->fdir_node);
5454 kfree(filter);
5455 }
5456 pf->fdir_pf_active_filters = 0;
5457}
5458
5459/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005460 * i40e_close - Disables a network interface
5461 * @netdev: network interface device structure
5462 *
5463 * The close entry point is called when an interface is de-activated
5464 * by the OS. The hardware is still under the driver's control, but
5465 * this netdev interface is disabled.
5466 *
5467 * Returns 0, this is not allowed to fail
5468 **/
Vasu Dev38e00432014-08-01 13:27:03 -07005469int i40e_close(struct net_device *netdev)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005470{
5471 struct i40e_netdev_priv *np = netdev_priv(netdev);
5472 struct i40e_vsi *vsi = np->vsi;
5473
Shannon Nelson90ef8d42014-03-14 07:32:26 +00005474 i40e_vsi_close(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005475
5476 return 0;
5477}
5478
5479/**
5480 * i40e_do_reset - Start a PF or Core Reset sequence
5481 * @pf: board private structure
5482 * @reset_flags: which reset is requested
5483 *
5484 * The essential difference in resets is that the PF Reset
5485 * doesn't clear the packet buffers, doesn't reset the PE
5486 * firmware, and doesn't bother the other PFs on the chip.
5487 **/
5488void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5489{
5490 u32 val;
5491
5492 WARN_ON(in_interrupt());
5493
Mitch Williams263fc482014-04-23 04:50:11 +00005494
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005495 /* do the biggest reset indicated */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005496 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005497
5498 /* Request a Global Reset
5499 *
5500 * This will start the chip's countdown to the actual full
5501 * chip reset event, and a warning interrupt to be sent
5502 * to all PFs, including the requestor. Our handler
5503 * for the warning interrupt will deal with the shutdown
5504 * and recovery of the switch setup.
5505 */
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005506 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005507 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5508 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5509 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5510
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005511 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005512
5513 /* Request a Core Reset
5514 *
5515 * Same as Global Reset, except does *not* include the MAC/PHY
5516 */
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005517 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005518 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5519 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5520 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5521 i40e_flush(&pf->hw);
5522
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005523 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005524
5525 /* Request a PF Reset
5526 *
5527 * Resets only the PF-specific registers
5528 *
5529 * This goes directly to the tear-down and rebuild of
5530 * the switch, since we need to do all the recovery as
5531 * for the Core Reset.
5532 */
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005533 dev_dbg(&pf->pdev->dev, "PFR requested\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005534 i40e_handle_reset_warning(pf);
5535
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005536 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005537 int v;
5538
5539 /* Find the VSI(s) that requested a re-init */
5540 dev_info(&pf->pdev->dev,
5541 "VSI reinit requested\n");
Mitch Williams505682c2014-05-20 08:01:37 +00005542 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005543 struct i40e_vsi *vsi = pf->vsi[v];
Jesse Brandeburg6995b362015-08-28 17:55:54 -04005544
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005545 if (vsi != NULL &&
5546 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5547 i40e_vsi_reinit_locked(pf->vsi[v]);
5548 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5549 }
5550 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005551 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
Neerav Parikhb5d06f02014-06-03 23:50:17 +00005552 int v;
5553
5554 /* Find the VSI(s) that needs to be brought down */
5555 dev_info(&pf->pdev->dev, "VSI down requested\n");
5556 for (v = 0; v < pf->num_alloc_vsi; v++) {
5557 struct i40e_vsi *vsi = pf->vsi[v];
Jesse Brandeburg6995b362015-08-28 17:55:54 -04005558
Neerav Parikhb5d06f02014-06-03 23:50:17 +00005559 if (vsi != NULL &&
5560 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5561 set_bit(__I40E_DOWN, &vsi->state);
5562 i40e_down(vsi);
5563 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5564 }
5565 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005566 } else {
5567 dev_info(&pf->pdev->dev,
5568 "bad reset request 0x%08x\n", reset_flags);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005569 }
5570}
5571
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005572#ifdef CONFIG_I40E_DCB
5573/**
5574 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5575 * @pf: board private structure
5576 * @old_cfg: current DCB config
5577 * @new_cfg: new DCB config
5578 **/
5579bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5580 struct i40e_dcbx_config *old_cfg,
5581 struct i40e_dcbx_config *new_cfg)
5582{
5583 bool need_reconfig = false;
5584
5585 /* Check if ETS configuration has changed */
5586 if (memcmp(&new_cfg->etscfg,
5587 &old_cfg->etscfg,
5588 sizeof(new_cfg->etscfg))) {
5589 /* If Priority Table has changed reconfig is needed */
5590 if (memcmp(&new_cfg->etscfg.prioritytable,
5591 &old_cfg->etscfg.prioritytable,
5592 sizeof(new_cfg->etscfg.prioritytable))) {
5593 need_reconfig = true;
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005594 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005595 }
5596
5597 if (memcmp(&new_cfg->etscfg.tcbwtable,
5598 &old_cfg->etscfg.tcbwtable,
5599 sizeof(new_cfg->etscfg.tcbwtable)))
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005600 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005601
5602 if (memcmp(&new_cfg->etscfg.tsatable,
5603 &old_cfg->etscfg.tsatable,
5604 sizeof(new_cfg->etscfg.tsatable)))
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005605 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005606 }
5607
5608 /* Check if PFC configuration has changed */
5609 if (memcmp(&new_cfg->pfc,
5610 &old_cfg->pfc,
5611 sizeof(new_cfg->pfc))) {
5612 need_reconfig = true;
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005613 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005614 }
5615
5616 /* Check if APP Table has changed */
5617 if (memcmp(&new_cfg->app,
5618 &old_cfg->app,
Dave Jones3d9667a2014-01-27 23:11:09 -05005619 sizeof(new_cfg->app))) {
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005620 need_reconfig = true;
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005621 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
Dave Jones3d9667a2014-01-27 23:11:09 -05005622 }
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005623
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04005624 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005625 return need_reconfig;
5626}
5627
5628/**
5629 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5630 * @pf: board private structure
5631 * @e: event info posted on ARQ
5632 **/
5633static int i40e_handle_lldp_event(struct i40e_pf *pf,
5634 struct i40e_arq_event_info *e)
5635{
5636 struct i40e_aqc_lldp_get_mib *mib =
5637 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5638 struct i40e_hw *hw = &pf->hw;
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005639 struct i40e_dcbx_config tmp_dcbx_cfg;
5640 bool need_reconfig = false;
5641 int ret = 0;
5642 u8 type;
5643
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005644 /* Not DCB capable or capability disabled */
David Ertmanea6acb72016-09-20 07:10:50 -07005645 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005646 return ret;
5647
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005648 /* Ignore if event is not for Nearest Bridge */
5649 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5650 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04005651 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005652 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5653 return ret;
5654
5655 /* Check MIB Type and return if event for Remote MIB update */
5656 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
Neerav Parikh9fa61dd2014-11-12 00:18:25 +00005657 dev_dbg(&pf->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04005658 "LLDP event mib type %s\n", type ? "remote" : "local");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005659 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5660 /* Update the remote cached instance and return */
5661 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5662 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5663 &hw->remote_dcbx_config);
5664 goto exit;
5665 }
5666
Neerav Parikh9fa61dd2014-11-12 00:18:25 +00005667 /* Store the old configuration */
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07005668 tmp_dcbx_cfg = hw->local_dcbx_config;
Neerav Parikh9fa61dd2014-11-12 00:18:25 +00005669
Neerav Parikh750fcbc2015-02-24 06:58:47 +00005670 /* Reset the old DCBx configuration data */
5671 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
Neerav Parikh9fa61dd2014-11-12 00:18:25 +00005672 /* Get updated DCBX data from firmware */
5673 ret = i40e_get_dcb_config(&pf->hw);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005674 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04005675 dev_info(&pf->pdev->dev,
5676 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5677 i40e_stat_str(&pf->hw, ret),
5678 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005679 goto exit;
5680 }
5681
5682 /* No change detected in DCBX configs */
Neerav Parikh750fcbc2015-02-24 06:58:47 +00005683 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5684 sizeof(tmp_dcbx_cfg))) {
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005685 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005686 goto exit;
5687 }
5688
Neerav Parikh750fcbc2015-02-24 06:58:47 +00005689 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5690 &hw->local_dcbx_config);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005691
Neerav Parikh750fcbc2015-02-24 06:58:47 +00005692 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005693
5694 if (!need_reconfig)
5695 goto exit;
5696
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005697 /* Enable DCB tagging only when more than one TC */
Neerav Parikh750fcbc2015-02-24 06:58:47 +00005698 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005699 pf->flags |= I40E_FLAG_DCB_ENABLED;
5700 else
5701 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5702
Neerav Parikh69129dc2014-11-12 00:18:46 +00005703 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005704 /* Reconfiguration needed quiesce all VSIs */
5705 i40e_pf_quiesce_all_vsi(pf);
5706
5707 /* Changes in configuration update VEB/VSI */
5708 i40e_dcb_reconfigure(pf);
5709
Neerav Parikh2fd75f32014-11-12 00:18:20 +00005710 ret = i40e_resume_port_tx(pf);
5711
Neerav Parikh69129dc2014-11-12 00:18:46 +00005712 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
Neerav Parikh2fd75f32014-11-12 00:18:20 +00005713 /* In case of error no point in resuming VSIs */
Neerav Parikh69129dc2014-11-12 00:18:46 +00005714 if (ret)
5715 goto exit;
5716
Neerav Parikh3fe06f42016-02-17 16:12:15 -08005717 /* Wait for the PF's queues to be disabled */
5718 ret = i40e_pf_wait_queues_disabled(pf);
Parikh, Neerav11e47702015-02-21 06:43:55 +00005719 if (ret) {
5720 /* Schedule PF reset to recover */
5721 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5722 i40e_service_event_schedule(pf);
5723 } else {
Neerav Parikh2fd75f32014-11-12 00:18:20 +00005724 i40e_pf_unquiesce_all_vsi(pf);
Neerav Parikh85a1aab2016-06-07 09:14:55 -07005725 /* Notify the client for the DCB changes */
5726 i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
Parikh, Neerav11e47702015-02-21 06:43:55 +00005727 }
5728
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005729exit:
5730 return ret;
5731}
5732#endif /* CONFIG_I40E_DCB */
5733
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005734/**
Anjali Singhai Jain233261862013-11-26 10:49:22 +00005735 * i40e_do_reset_safe - Protected reset path for userland calls.
5736 * @pf: board private structure
5737 * @reset_flags: which reset is requested
5738 *
5739 **/
5740void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5741{
5742 rtnl_lock();
5743 i40e_do_reset(pf, reset_flags);
5744 rtnl_unlock();
5745}
5746
5747/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005748 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5749 * @pf: board private structure
5750 * @e: event info posted on ARQ
5751 *
5752 * Handler for LAN Queue Overflow Event generated by the firmware for PF
5753 * and VF queues
5754 **/
5755static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5756 struct i40e_arq_event_info *e)
5757{
5758 struct i40e_aqc_lan_overflow *data =
5759 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5760 u32 queue = le32_to_cpu(data->prtdcb_rupto);
5761 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5762 struct i40e_hw *hw = &pf->hw;
5763 struct i40e_vf *vf;
5764 u16 vf_id;
5765
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005766 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5767 queue, qtx_ctl);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005768
5769 /* Queue belongs to VF, find the VF and issue VF reset */
5770 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5771 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5772 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5773 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5774 vf_id -= hw->func_caps.vf_base_id;
5775 vf = &pf->vf[vf_id];
5776 i40e_vc_notify_vf_reset(vf);
5777 /* Allow VF to process pending reset notification */
5778 msleep(20);
5779 i40e_reset_vf(vf, false);
5780 }
5781}
5782
5783/**
5784 * i40e_service_event_complete - Finish up the service event
5785 * @pf: board private structure
5786 **/
5787static void i40e_service_event_complete(struct i40e_pf *pf)
5788{
Shannon Nelsonb875f992015-10-21 19:47:03 -04005789 WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005790
5791 /* flush memory to make sure state is correct before next watchog */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005792 smp_mb__before_atomic();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005793 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5794}
5795
5796/**
Anjali Singhai Jain12957382014-06-04 04:22:47 +00005797 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5798 * @pf: board private structure
5799 **/
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005800u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
Anjali Singhai Jain12957382014-06-04 04:22:47 +00005801{
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005802 u32 val, fcnt_prog;
Anjali Singhai Jain12957382014-06-04 04:22:47 +00005803
5804 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5805 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5806 return fcnt_prog;
5807}
5808
5809/**
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005810 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005811 * @pf: board private structure
5812 **/
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005813u32 i40e_get_current_fd_count(struct i40e_pf *pf)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005814{
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005815 u32 val, fcnt_prog;
5816
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005817 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5818 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5819 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5820 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5821 return fcnt_prog;
5822}
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005823
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005824/**
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005825 * i40e_get_global_fd_count - Get total FD filters programmed on device
5826 * @pf: board private structure
5827 **/
5828u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5829{
5830 u32 val, fcnt_prog;
5831
5832 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5833 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5834 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5835 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5836 return fcnt_prog;
5837}
5838
5839/**
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005840 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5841 * @pf: board private structure
5842 **/
5843void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5844{
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -04005845 struct i40e_fdir_filter *filter;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005846 u32 fcnt_prog, fcnt_avail;
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -04005847 struct hlist_node *node;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005848
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005849 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5850 return;
5851
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005852 /* Check if, FD SB or ATR was auto disabled and if there is enough room
5853 * to re-enable
5854 */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005855 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +00005856 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005857 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5858 (pf->fd_add_err == 0) ||
5859 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005860 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5861 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5862 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -04005863 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5864 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005865 }
5866 }
Jacob Kellera3417d22016-09-06 18:05:10 -07005867
5868 /* Wait for some more space to be available to turn on ATR. We also
5869 * must check that no existing ntuple rules for TCP are in effect
5870 */
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005871 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5872 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
Jacob Kellera3417d22016-09-06 18:05:10 -07005873 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
5874 (pf->fd_tcp_rule == 0)) {
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005875 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -04005876 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Jacob Kellera3417d22016-09-06 18:05:10 -07005877 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005878 }
5879 }
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -04005880
5881 /* if hw had a problem adding a filter, delete it */
5882 if (pf->fd_inv > 0) {
5883 hlist_for_each_entry_safe(filter, node,
5884 &pf->fdir_filter_list, fdir_node) {
5885 if (filter->fd_id == pf->fd_inv) {
5886 hlist_del(&filter->fdir_node);
5887 kfree(filter);
5888 pf->fdir_pf_active_filters--;
5889 }
5890 }
5891 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005892}
5893
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005894#define I40E_MIN_FD_FLUSH_INTERVAL 10
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005895#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005896/**
5897 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5898 * @pf: board private structure
5899 **/
5900static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5901{
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005902 unsigned long min_flush_time;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005903 int flush_wait_retry = 50;
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005904 bool disable_atr = false;
5905 int fd_room;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005906 int reg;
5907
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005908 if (!time_after(jiffies, pf->fd_flush_timestamp +
5909 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
5910 return;
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005911
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005912 /* If the flush is happening too quick and we have mostly SB rules we
5913 * should not re-enable ATR for some time.
5914 */
5915 min_flush_time = pf->fd_flush_timestamp +
5916 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5917 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005918
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005919 if (!(time_after(jiffies, min_flush_time)) &&
5920 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5921 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5922 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5923 disable_atr = true;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005924 }
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005925
5926 pf->fd_flush_timestamp = jiffies;
Jacob Keller234dc4e2016-09-06 18:05:09 -07005927 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005928 /* flush all filters */
5929 wr32(&pf->hw, I40E_PFQF_CTL_1,
5930 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5931 i40e_flush(&pf->hw);
5932 pf->fd_flush_cnt++;
5933 pf->fd_add_err = 0;
5934 do {
5935 /* Check FD flush status every 5-6msec */
5936 usleep_range(5000, 6000);
5937 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5938 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5939 break;
5940 } while (flush_wait_retry--);
5941 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5942 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5943 } else {
5944 /* replay sideband filters */
5945 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5946 if (!disable_atr)
Jacob Keller234dc4e2016-09-06 18:05:09 -07005947 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005948 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5949 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5950 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5951 }
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005952}
5953
5954/**
5955 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5956 * @pf: board private structure
5957 **/
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005958u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005959{
5960 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5961}
5962
5963/* We can see up to 256 filter programming desc in transit if the filters are
5964 * being applied really fast; before we see the first
5965 * filter miss error on Rx queue 0. Accumulating enough error messages before
5966 * reacting will make sure we don't cause flush too often.
5967 */
5968#define I40E_MAX_FD_PROGRAM_ERROR 256
5969
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005970/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005971 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5972 * @pf: board private structure
5973 **/
5974static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5975{
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005976
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005977 /* if interface is down do nothing */
5978 if (test_bit(__I40E_DOWN, &pf->state))
5979 return;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005980
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005981 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005982 i40e_fdir_flush_and_replay(pf);
5983
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005984 i40e_fdir_check_and_reenable(pf);
5985
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005986}
5987
5988/**
5989 * i40e_vsi_link_event - notify VSI of a link event
5990 * @vsi: vsi to be notified
5991 * @link_up: link up or down
5992 **/
5993static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5994{
Jesse Brandeburg32b5b812014-08-12 06:33:14 +00005995 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005996 return;
5997
5998 switch (vsi->type) {
5999 case I40E_VSI_MAIN:
Vasu Dev38e00432014-08-01 13:27:03 -07006000#ifdef I40E_FCOE
6001 case I40E_VSI_FCOE:
6002#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006003 if (!vsi->netdev || !vsi->netdev_registered)
6004 break;
6005
6006 if (link_up) {
6007 netif_carrier_on(vsi->netdev);
6008 netif_tx_wake_all_queues(vsi->netdev);
6009 } else {
6010 netif_carrier_off(vsi->netdev);
6011 netif_tx_stop_all_queues(vsi->netdev);
6012 }
6013 break;
6014
6015 case I40E_VSI_SRIOV:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006016 case I40E_VSI_VMDQ2:
6017 case I40E_VSI_CTRL:
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06006018 case I40E_VSI_IWARP:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006019 case I40E_VSI_MIRROR:
6020 default:
6021 /* there is no notification for other VSIs */
6022 break;
6023 }
6024}
6025
6026/**
6027 * i40e_veb_link_event - notify elements on the veb of a link event
6028 * @veb: veb to be notified
6029 * @link_up: link up or down
6030 **/
6031static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6032{
6033 struct i40e_pf *pf;
6034 int i;
6035
6036 if (!veb || !veb->pf)
6037 return;
6038 pf = veb->pf;
6039
6040 /* depth first... */
6041 for (i = 0; i < I40E_MAX_VEB; i++)
6042 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6043 i40e_veb_link_event(pf->veb[i], link_up);
6044
6045 /* ... now the local VSIs */
Mitch Williams505682c2014-05-20 08:01:37 +00006046 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006047 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6048 i40e_vsi_link_event(pf->vsi[i], link_up);
6049}
6050
6051/**
6052 * i40e_link_event - Update netif_carrier status
6053 * @pf: board private structure
6054 **/
6055static void i40e_link_event(struct i40e_pf *pf)
6056{
Mitch Williams320684c2014-10-17 03:14:43 +00006057 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
Catherine Sullivanfef59dd2014-12-11 07:06:33 +00006058 u8 new_link_speed, old_link_speed;
Jesse Brandeburga72a5abc2015-08-26 15:14:19 -04006059 i40e_status status;
6060 bool new_link, old_link;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006061
Catherine Sullivan1f9610e2015-10-21 19:47:09 -04006062 /* save off old link status information */
6063 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6064
Jesse Brandeburg1e701e02014-09-13 07:40:42 +00006065 /* set this to force the get_link_status call to refresh state */
6066 pf->hw.phy.get_link_info = true;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006067
Jesse Brandeburg1e701e02014-09-13 07:40:42 +00006068 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
Jesse Brandeburga72a5abc2015-08-26 15:14:19 -04006069
6070 status = i40e_get_link_status(&pf->hw, &new_link);
6071 if (status) {
6072 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6073 status);
6074 return;
6075 }
6076
Catherine Sullivanfef59dd2014-12-11 07:06:33 +00006077 old_link_speed = pf->hw.phy.link_info_old.link_speed;
6078 new_link_speed = pf->hw.phy.link_info.link_speed;
Jesse Brandeburg1e701e02014-09-13 07:40:42 +00006079
6080 if (new_link == old_link &&
Catherine Sullivanfef59dd2014-12-11 07:06:33 +00006081 new_link_speed == old_link_speed &&
Mitch Williams320684c2014-10-17 03:14:43 +00006082 (test_bit(__I40E_DOWN, &vsi->state) ||
6083 new_link == netif_carrier_ok(vsi->netdev)))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006084 return;
Mitch Williams320684c2014-10-17 03:14:43 +00006085
6086 if (!test_bit(__I40E_DOWN, &vsi->state))
6087 i40e_print_link_message(vsi, new_link);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006088
6089 /* Notify the base of the switch tree connected to
6090 * the link. Floating VEBs are not notified.
6091 */
6092 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6093 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6094 else
Mitch Williams320684c2014-10-17 03:14:43 +00006095 i40e_vsi_link_event(vsi, new_link);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006096
6097 if (pf->vf)
6098 i40e_vc_notify_link_state(pf);
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00006099
6100 if (pf->flags & I40E_FLAG_PTP)
6101 i40e_ptp_set_increment(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006102}
6103
6104/**
Shannon Nelson21536712014-10-25 10:35:25 +00006105 * i40e_watchdog_subtask - periodic checks not using event driven response
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006106 * @pf: board private structure
6107 **/
6108static void i40e_watchdog_subtask(struct i40e_pf *pf)
6109{
6110 int i;
6111
6112 /* if interface is down do nothing */
6113 if (test_bit(__I40E_DOWN, &pf->state) ||
6114 test_bit(__I40E_CONFIG_BUSY, &pf->state))
6115 return;
6116
Shannon Nelson21536712014-10-25 10:35:25 +00006117 /* make sure we don't do these things too often */
6118 if (time_before(jiffies, (pf->service_timer_previous +
6119 pf->service_timer_period)))
6120 return;
6121 pf->service_timer_previous = jiffies;
6122
Shannon Nelson9ac77262015-08-27 11:42:40 -04006123 if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
6124 i40e_link_event(pf);
Shannon Nelson21536712014-10-25 10:35:25 +00006125
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006126 /* Update the stats for active netdevs so the network stack
6127 * can look at updated numbers whenever it cares to
6128 */
Mitch Williams505682c2014-05-20 08:01:37 +00006129 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006130 if (pf->vsi[i] && pf->vsi[i]->netdev)
6131 i40e_update_stats(pf->vsi[i]);
6132
Anjali Singhai Jaind1a8d272015-07-23 16:54:40 -04006133 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6134 /* Update the stats for the active switching components */
6135 for (i = 0; i < I40E_MAX_VEB; i++)
6136 if (pf->veb[i])
6137 i40e_update_veb_stats(pf->veb[i]);
6138 }
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00006139
6140 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006141}
6142
6143/**
6144 * i40e_reset_subtask - Set up for resetting the device and driver
6145 * @pf: board private structure
6146 **/
6147static void i40e_reset_subtask(struct i40e_pf *pf)
6148{
6149 u32 reset_flags = 0;
6150
Anjali Singhai Jain233261862013-11-26 10:49:22 +00006151 rtnl_lock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006152 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08006153 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006154 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
6155 }
6156 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08006157 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006158 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6159 }
6160 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08006161 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006162 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
6163 }
6164 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08006165 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006166 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
6167 }
Neerav Parikhb5d06f02014-06-03 23:50:17 +00006168 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08006169 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
Neerav Parikhb5d06f02014-06-03 23:50:17 +00006170 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
6171 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006172
6173 /* If there's a recovery already waiting, it takes
6174 * precedence before starting a new reset sequence.
6175 */
6176 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
6177 i40e_handle_reset_warning(pf);
Anjali Singhai Jain233261862013-11-26 10:49:22 +00006178 goto unlock;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006179 }
6180
6181 /* If we're already down or resetting, just bail */
6182 if (reset_flags &&
6183 !test_bit(__I40E_DOWN, &pf->state) &&
6184 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
6185 i40e_do_reset(pf, reset_flags);
Anjali Singhai Jain233261862013-11-26 10:49:22 +00006186
6187unlock:
6188 rtnl_unlock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006189}
6190
6191/**
6192 * i40e_handle_link_event - Handle link event
6193 * @pf: board private structure
6194 * @e: event info posted on ARQ
6195 **/
6196static void i40e_handle_link_event(struct i40e_pf *pf,
6197 struct i40e_arq_event_info *e)
6198{
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006199 struct i40e_aqc_get_link_status *status =
6200 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006201
Jesse Brandeburg1e701e02014-09-13 07:40:42 +00006202 /* Do a new status request to re-enable LSE reporting
6203 * and load new status information into the hw struct
6204 * This completely ignores any state information
6205 * in the ARQ event info, instead choosing to always
6206 * issue the AQ update link status command.
6207 */
6208 i40e_link_event(pf);
6209
Carolyn Wyborny7b592f62014-07-10 07:58:19 +00006210 /* check for unqualified module, if link is down */
6211 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6212 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6213 (!(status->link_info & I40E_AQ_LINK_UP)))
6214 dev_err(&pf->pdev->dev,
6215 "The driver failed to link because an unqualified module was detected.\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006216}
6217
6218/**
6219 * i40e_clean_adminq_subtask - Clean the AdminQ rings
6220 * @pf: board private structure
6221 **/
6222static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6223{
6224 struct i40e_arq_event_info event;
6225 struct i40e_hw *hw = &pf->hw;
6226 u16 pending, i = 0;
6227 i40e_status ret;
6228 u16 opcode;
Shannon Nelson86df2422014-05-20 08:01:35 +00006229 u32 oldval;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006230 u32 val;
6231
Anjali Singhai Jaina316f652014-07-12 07:28:25 +00006232 /* Do not run clean AQ when PF reset fails */
6233 if (test_bit(__I40E_RESET_FAILED, &pf->state))
6234 return;
6235
Shannon Nelson86df2422014-05-20 08:01:35 +00006236 /* check for error indications */
6237 val = rd32(&pf->hw, pf->hw.aq.arq.len);
6238 oldval = val;
6239 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006240 if (hw->debug_mask & I40E_DEBUG_AQ)
6241 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006242 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6243 }
6244 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006245 if (hw->debug_mask & I40E_DEBUG_AQ)
6246 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006247 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
Mitch Williams1d0a4ad2015-12-23 12:05:48 -08006248 pf->arq_overflows++;
Shannon Nelson86df2422014-05-20 08:01:35 +00006249 }
6250 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006251 if (hw->debug_mask & I40E_DEBUG_AQ)
6252 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006253 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6254 }
6255 if (oldval != val)
6256 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6257
6258 val = rd32(&pf->hw, pf->hw.aq.asq.len);
6259 oldval = val;
6260 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006261 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6262 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006263 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6264 }
6265 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006266 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6267 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006268 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6269 }
6270 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006271 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6272 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006273 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6274 }
6275 if (oldval != val)
6276 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6277
Mitch Williams1001dc32014-11-11 20:02:19 +00006278 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6279 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006280 if (!event.msg_buf)
6281 return;
6282
6283 do {
6284 ret = i40e_clean_arq_element(hw, &event, &pending);
Mitch Williams56497972014-06-04 08:45:18 +00006285 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006286 break;
Mitch Williams56497972014-06-04 08:45:18 +00006287 else if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006288 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6289 break;
6290 }
6291
6292 opcode = le16_to_cpu(event.desc.opcode);
6293 switch (opcode) {
6294
6295 case i40e_aqc_opc_get_link_status:
6296 i40e_handle_link_event(pf, &event);
6297 break;
6298 case i40e_aqc_opc_send_msg_to_pf:
6299 ret = i40e_vc_process_vf_msg(pf,
6300 le16_to_cpu(event.desc.retval),
6301 le32_to_cpu(event.desc.cookie_high),
6302 le32_to_cpu(event.desc.cookie_low),
6303 event.msg_buf,
Mitch Williams1001dc32014-11-11 20:02:19 +00006304 event.msg_len);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006305 break;
6306 case i40e_aqc_opc_lldp_update_mib:
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00006307 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08006308#ifdef CONFIG_I40E_DCB
6309 rtnl_lock();
6310 ret = i40e_handle_lldp_event(pf, &event);
6311 rtnl_unlock();
6312#endif /* CONFIG_I40E_DCB */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006313 break;
6314 case i40e_aqc_opc_event_lan_overflow:
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00006315 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006316 i40e_handle_lan_overflow_event(pf, &event);
6317 break;
Shannon Nelson0467bc92013-12-18 13:45:58 +00006318 case i40e_aqc_opc_send_msg_to_peer:
6319 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6320 break;
Shannon Nelson91a0f932015-03-19 14:32:01 -07006321 case i40e_aqc_opc_nvm_erase:
6322 case i40e_aqc_opc_nvm_update:
Michal Kosiarz00ada502015-11-19 11:34:20 -08006323 case i40e_aqc_opc_oem_post_update:
Shannon Nelson6e93d0c2016-01-15 14:33:18 -08006324 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6325 "ARQ NVM operation 0x%04x completed\n",
6326 opcode);
Shannon Nelson91a0f932015-03-19 14:32:01 -07006327 break;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006328 default:
6329 dev_info(&pf->pdev->dev,
Shannon Nelson56e5ca62016-03-10 14:59:48 -08006330 "ARQ: Unknown event 0x%04x ignored\n",
Shannon Nelson0467bc92013-12-18 13:45:58 +00006331 opcode);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006332 break;
6333 }
6334 } while (pending && (i++ < pf->adminq_work_limit));
6335
6336 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6337 /* re-enable Admin queue interrupt cause */
6338 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6339 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6340 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6341 i40e_flush(hw);
6342
6343 kfree(event.msg_buf);
6344}
6345
6346/**
Shannon Nelson4eb3f762014-03-06 08:59:58 +00006347 * i40e_verify_eeprom - make sure eeprom is good to use
6348 * @pf: board private structure
6349 **/
6350static void i40e_verify_eeprom(struct i40e_pf *pf)
6351{
6352 int err;
6353
6354 err = i40e_diag_eeprom_test(&pf->hw);
6355 if (err) {
6356 /* retry in case of garbage read */
6357 err = i40e_diag_eeprom_test(&pf->hw);
6358 if (err) {
6359 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6360 err);
6361 set_bit(__I40E_BAD_EEPROM, &pf->state);
6362 }
6363 }
6364
6365 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6366 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6367 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6368 }
6369}
6370
6371/**
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006372 * i40e_enable_pf_switch_lb
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006373 * @pf: pointer to the PF structure
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006374 *
6375 * enable switch loop back or die - no point in a return value
6376 **/
6377static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6378{
6379 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6380 struct i40e_vsi_context ctxt;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006381 int ret;
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006382
6383 ctxt.seid = pf->main_vsi_seid;
6384 ctxt.pf_num = pf->hw.pf_id;
6385 ctxt.vf_num = 0;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006386 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6387 if (ret) {
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006388 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006389 "couldn't get PF vsi config, err %s aq_err %s\n",
6390 i40e_stat_str(&pf->hw, ret),
6391 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006392 return;
6393 }
6394 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6395 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6396 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6397
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006398 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6399 if (ret) {
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006400 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006401 "update vsi switch failed, err %s aq_err %s\n",
6402 i40e_stat_str(&pf->hw, ret),
6403 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006404 }
6405}
6406
6407/**
6408 * i40e_disable_pf_switch_lb
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006409 * @pf: pointer to the PF structure
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006410 *
6411 * disable switch loop back or die - no point in a return value
6412 **/
6413static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6414{
6415 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6416 struct i40e_vsi_context ctxt;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006417 int ret;
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006418
6419 ctxt.seid = pf->main_vsi_seid;
6420 ctxt.pf_num = pf->hw.pf_id;
6421 ctxt.vf_num = 0;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006422 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6423 if (ret) {
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006424 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006425 "couldn't get PF vsi config, err %s aq_err %s\n",
6426 i40e_stat_str(&pf->hw, ret),
6427 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006428 return;
6429 }
6430 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6431 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6432 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6433
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006434 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6435 if (ret) {
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006436 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006437 "update vsi switch failed, err %s aq_err %s\n",
6438 i40e_stat_str(&pf->hw, ret),
6439 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006440 }
6441}
6442
6443/**
Neerav Parikh51616012015-02-06 08:52:14 +00006444 * i40e_config_bridge_mode - Configure the HW bridge mode
6445 * @veb: pointer to the bridge instance
6446 *
6447 * Configure the loop back mode for the LAN VSI that is downlink to the
6448 * specified HW bridge instance. It is expected this function is called
6449 * when a new HW bridge is instantiated.
6450 **/
6451static void i40e_config_bridge_mode(struct i40e_veb *veb)
6452{
6453 struct i40e_pf *pf = veb->pf;
6454
Shannon Nelson6dec1012015-09-28 14:12:30 -04006455 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6456 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6457 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
Neerav Parikh51616012015-02-06 08:52:14 +00006458 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6459 i40e_disable_pf_switch_lb(pf);
6460 else
6461 i40e_enable_pf_switch_lb(pf);
6462}
6463
6464/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006465 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6466 * @veb: pointer to the VEB instance
6467 *
6468 * This is a recursive function that first builds the attached VSIs then
6469 * recurses in to build the next layer of VEB. We track the connections
6470 * through our own index numbers because the seid's from the HW could
6471 * change across the reset.
6472 **/
6473static int i40e_reconstitute_veb(struct i40e_veb *veb)
6474{
6475 struct i40e_vsi *ctl_vsi = NULL;
6476 struct i40e_pf *pf = veb->pf;
6477 int v, veb_idx;
6478 int ret;
6479
6480 /* build VSI that owns this VEB, temporarily attached to base VEB */
Mitch Williams505682c2014-05-20 08:01:37 +00006481 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006482 if (pf->vsi[v] &&
6483 pf->vsi[v]->veb_idx == veb->idx &&
6484 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6485 ctl_vsi = pf->vsi[v];
6486 break;
6487 }
6488 }
6489 if (!ctl_vsi) {
6490 dev_info(&pf->pdev->dev,
6491 "missing owner VSI for veb_idx %d\n", veb->idx);
6492 ret = -ENOENT;
6493 goto end_reconstitute;
6494 }
6495 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6496 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6497 ret = i40e_add_vsi(ctl_vsi);
6498 if (ret) {
6499 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006500 "rebuild of veb_idx %d owner VSI failed: %d\n",
6501 veb->idx, ret);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006502 goto end_reconstitute;
6503 }
6504 i40e_vsi_reset_stats(ctl_vsi);
6505
6506 /* create the VEB in the switch and move the VSI onto the VEB */
6507 ret = i40e_add_veb(veb, ctl_vsi);
6508 if (ret)
6509 goto end_reconstitute;
6510
Anjali Singhai Jainfc608612015-05-08 15:35:57 -07006511 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6512 veb->bridge_mode = BRIDGE_MODE_VEB;
6513 else
6514 veb->bridge_mode = BRIDGE_MODE_VEPA;
Neerav Parikh51616012015-02-06 08:52:14 +00006515 i40e_config_bridge_mode(veb);
Anjali Singhai Jainb64ba082014-11-13 03:06:15 +00006516
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006517 /* create the remaining VSIs attached to this VEB */
Mitch Williams505682c2014-05-20 08:01:37 +00006518 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006519 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6520 continue;
6521
6522 if (pf->vsi[v]->veb_idx == veb->idx) {
6523 struct i40e_vsi *vsi = pf->vsi[v];
Jesse Brandeburg6995b362015-08-28 17:55:54 -04006524
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006525 vsi->uplink_seid = veb->seid;
6526 ret = i40e_add_vsi(vsi);
6527 if (ret) {
6528 dev_info(&pf->pdev->dev,
6529 "rebuild of vsi_idx %d failed: %d\n",
6530 v, ret);
6531 goto end_reconstitute;
6532 }
6533 i40e_vsi_reset_stats(vsi);
6534 }
6535 }
6536
6537 /* create any VEBs attached to this VEB - RECURSION */
6538 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6539 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6540 pf->veb[veb_idx]->uplink_seid = veb->seid;
6541 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6542 if (ret)
6543 break;
6544 }
6545 }
6546
6547end_reconstitute:
6548 return ret;
6549}
6550
6551/**
6552 * i40e_get_capabilities - get info about the HW
6553 * @pf: the PF struct
6554 **/
6555static int i40e_get_capabilities(struct i40e_pf *pf)
6556{
6557 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6558 u16 data_size;
6559 int buf_len;
6560 int err;
6561
6562 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6563 do {
6564 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6565 if (!cap_buf)
6566 return -ENOMEM;
6567
6568 /* this loads the data into the hw struct for us */
6569 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6570 &data_size,
6571 i40e_aqc_opc_list_func_capabilities,
6572 NULL);
6573 /* data loaded, buffer no longer needed */
6574 kfree(cap_buf);
6575
6576 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6577 /* retry with a larger buffer */
6578 buf_len = data_size;
6579 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6580 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006581 "capability discovery failed, err %s aq_err %s\n",
6582 i40e_stat_str(&pf->hw, err),
6583 i40e_aq_str(&pf->hw,
6584 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006585 return -ENODEV;
6586 }
6587 } while (err);
6588
6589 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6590 dev_info(&pf->pdev->dev,
6591 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6592 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6593 pf->hw.func_caps.num_msix_vectors,
6594 pf->hw.func_caps.num_msix_vectors_vf,
6595 pf->hw.func_caps.fd_filters_guaranteed,
6596 pf->hw.func_caps.fd_filters_best_effort,
6597 pf->hw.func_caps.num_tx_qp,
6598 pf->hw.func_caps.num_vsis);
6599
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00006600#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6601 + pf->hw.func_caps.num_vfs)
6602 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6603 dev_info(&pf->pdev->dev,
6604 "got num_vsis %d, setting num_vsis to %d\n",
6605 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6606 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6607 }
6608
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006609 return 0;
6610}
6611
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006612static int i40e_vsi_clear(struct i40e_vsi *vsi);
6613
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006614/**
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006615 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006616 * @pf: board private structure
6617 **/
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006618static void i40e_fdir_sb_setup(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006619{
6620 struct i40e_vsi *vsi;
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +00006621 int i;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006622
Jesse Brandeburg407e0632014-06-03 23:50:12 +00006623 /* quick workaround for an NVM issue that leaves a critical register
6624 * uninitialized
6625 */
6626 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6627 static const u32 hkey[] = {
6628 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6629 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6630 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6631 0x95b3a76d};
6632
6633 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6634 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6635 }
6636
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006637 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006638 return;
6639
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006640 /* find existing VSI and see if it needs configuring */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006641 vsi = NULL;
Mitch Williams505682c2014-05-20 08:01:37 +00006642 for (i = 0; i < pf->num_alloc_vsi; i++) {
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006643 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006644 vsi = pf->vsi[i];
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006645 break;
6646 }
6647 }
6648
6649 /* create a new VSI if none exists */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006650 if (!vsi) {
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006651 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6652 pf->vsi[pf->lan_vsi]->seid, 0);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006653 if (!vsi) {
6654 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +00006655 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6656 return;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006657 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006658 }
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +00006659
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006660 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006661}
6662
6663/**
6664 * i40e_fdir_teardown - release the Flow Director resources
6665 * @pf: board private structure
6666 **/
6667static void i40e_fdir_teardown(struct i40e_pf *pf)
6668{
6669 int i;
6670
Joseph Gasparakis17a73f62014-02-12 01:45:30 +00006671 i40e_fdir_filter_exit(pf);
Mitch Williams505682c2014-05-20 08:01:37 +00006672 for (i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006673 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6674 i40e_vsi_release(pf->vsi[i]);
6675 break;
6676 }
6677 }
6678}
6679
6680/**
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006681 * i40e_prep_for_reset - prep for the core to reset
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006682 * @pf: board private structure
6683 *
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006684 * Close up the VFs and other things in prep for PF Reset.
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006685 **/
Shannon Nelson23cfbe02014-06-04 01:23:14 +00006686static void i40e_prep_for_reset(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006687{
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006688 struct i40e_hw *hw = &pf->hw;
Shannon Nelson60442de2014-04-23 04:50:13 +00006689 i40e_status ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006690 u32 v;
6691
6692 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6693 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
Shannon Nelson23cfbe02014-06-04 01:23:14 +00006694 return;
Mitch Williamsd3ce57342016-03-10 14:59:46 -08006695 if (i40e_check_asq_alive(&pf->hw))
6696 i40e_vc_notify_reset(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006697
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00006698 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006699
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006700 /* quiesce the VSIs and their queues that are not already DOWN */
6701 i40e_pf_quiesce_all_vsi(pf);
6702
Mitch Williams505682c2014-05-20 08:01:37 +00006703 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006704 if (pf->vsi[v])
6705 pf->vsi[v]->seid = 0;
6706 }
6707
6708 i40e_shutdown_adminq(&pf->hw);
6709
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006710 /* call shutdown HMC */
Shannon Nelson60442de2014-04-23 04:50:13 +00006711 if (hw->hmc.hmc_obj) {
6712 ret = i40e_shutdown_lan_hmc(hw);
Shannon Nelson23cfbe02014-06-04 01:23:14 +00006713 if (ret)
Shannon Nelson60442de2014-04-23 04:50:13 +00006714 dev_warn(&pf->pdev->dev,
6715 "shutdown_lan_hmc failed: %d\n", ret);
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006716 }
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006717}
6718
6719/**
Jesse Brandeburg44033fa2014-04-23 04:50:15 +00006720 * i40e_send_version - update firmware with driver version
6721 * @pf: PF struct
6722 */
6723static void i40e_send_version(struct i40e_pf *pf)
6724{
6725 struct i40e_driver_version dv;
6726
6727 dv.major_version = DRV_VERSION_MAJOR;
6728 dv.minor_version = DRV_VERSION_MINOR;
6729 dv.build_version = DRV_VERSION_BUILD;
6730 dv.subbuild_version = 0;
Rickard Strandqvist35a7d802014-07-29 09:26:25 +00006731 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
Jesse Brandeburg44033fa2014-04-23 04:50:15 +00006732 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6733}
6734
6735/**
Jesse Brandeburg4dda12e2013-12-18 13:46:01 +00006736 * i40e_reset_and_rebuild - reset and rebuild using a saved config
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006737 * @pf: board private structure
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00006738 * @reinit: if the Main VSI needs to re-initialized.
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006739 **/
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00006740static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006741{
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006742 struct i40e_hw *hw = &pf->hw;
Anjali Singhai Jaincafa2ee2014-09-13 07:40:45 +00006743 u8 set_fc_aq_fail = 0;
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006744 i40e_status ret;
Anjali Singhai Jain4f2f017c2015-10-21 19:47:07 -04006745 u32 val;
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006746 u32 v;
6747
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006748 /* Now we wait for GRST to settle out.
6749 * We don't have to delete the VEBs or VSIs from the hw switch
6750 * because the reset will make them disappear.
6751 */
6752 ret = i40e_pf_reset(hw);
Akeem G Abodunrinb5565402014-04-09 05:59:04 +00006753 if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006754 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
Anjali Singhai Jaina316f652014-07-12 07:28:25 +00006755 set_bit(__I40E_RESET_FAILED, &pf->state);
6756 goto clear_recovery;
Akeem G Abodunrinb5565402014-04-09 05:59:04 +00006757 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006758 pf->pfr_count++;
6759
6760 if (test_bit(__I40E_DOWN, &pf->state))
Anjali Singhai Jaina316f652014-07-12 07:28:25 +00006761 goto clear_recovery;
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00006762 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006763
6764 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6765 ret = i40e_init_adminq(&pf->hw);
6766 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006767 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6768 i40e_stat_str(&pf->hw, ret),
6769 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Anjali Singhai Jaina316f652014-07-12 07:28:25 +00006770 goto clear_recovery;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006771 }
6772
Shannon Nelson4eb3f762014-03-06 08:59:58 +00006773 /* re-verify the eeprom if we just had an EMP reset */
Anjali Singhai Jain9df42d12015-01-24 09:58:40 +00006774 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
Shannon Nelson4eb3f762014-03-06 08:59:58 +00006775 i40e_verify_eeprom(pf);
Shannon Nelson4eb3f762014-03-06 08:59:58 +00006776
Shannon Nelsone78ac4bf2014-05-10 04:49:09 +00006777 i40e_clear_pxe_mode(hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006778 ret = i40e_get_capabilities(pf);
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006779 if (ret)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006780 goto end_core_reset;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006781
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006782 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6783 hw->func_caps.num_rx_qp,
6784 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6785 if (ret) {
6786 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6787 goto end_core_reset;
6788 }
6789 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6790 if (ret) {
6791 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6792 goto end_core_reset;
6793 }
6794
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08006795#ifdef CONFIG_I40E_DCB
6796 ret = i40e_init_pf_dcb(pf);
6797 if (ret) {
Shannon Nelsonaebfc812014-12-11 07:06:38 +00006798 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6799 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6800 /* Continue without DCB enabled */
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08006801 }
6802#endif /* CONFIG_I40E_DCB */
Vasu Dev38e00432014-08-01 13:27:03 -07006803#ifdef I40E_FCOE
Shannon Nelson21364bc2015-08-26 15:14:13 -04006804 i40e_init_pf_fcoe(pf);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08006805
Vasu Dev38e00432014-08-01 13:27:03 -07006806#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006807 /* do basic switch setup */
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00006808 ret = i40e_setup_pf_switch(pf, reinit);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006809 if (ret)
6810 goto end_core_reset;
6811
Shannon Nelson2f0aff42016-01-04 10:33:08 -08006812 /* The driver only wants link up/down and module qualification
6813 * reports from firmware. Note the negative logic.
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +00006814 */
6815 ret = i40e_aq_set_phy_int_mask(&pf->hw,
Shannon Nelson2f0aff42016-01-04 10:33:08 -08006816 ~(I40E_AQ_EVENT_LINK_UPDOWN |
Shannon Nelson867a79e2016-03-18 12:18:15 -07006817 I40E_AQ_EVENT_MEDIA_NA |
Shannon Nelson2f0aff42016-01-04 10:33:08 -08006818 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +00006819 if (ret)
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006820 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6821 i40e_stat_str(&pf->hw, ret),
6822 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +00006823
Anjali Singhai Jaincafa2ee2014-09-13 07:40:45 +00006824 /* make sure our flow control settings are restored */
6825 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6826 if (ret)
Neerav Parikh8279e492015-09-03 17:18:50 -04006827 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
6828 i40e_stat_str(&pf->hw, ret),
6829 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Anjali Singhai Jaincafa2ee2014-09-13 07:40:45 +00006830
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006831 /* Rebuild the VSIs and VEBs that existed before reset.
6832 * They are still in our local switch element arrays, so only
6833 * need to rebuild the switch model in the HW.
6834 *
6835 * If there were VEBs but the reconstitution failed, we'll try
6836 * try to recover minimal use by getting the basic PF VSI working.
6837 */
6838 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00006839 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006840 /* find the one VEB connected to the MAC, and find orphans */
6841 for (v = 0; v < I40E_MAX_VEB; v++) {
6842 if (!pf->veb[v])
6843 continue;
6844
6845 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6846 pf->veb[v]->uplink_seid == 0) {
6847 ret = i40e_reconstitute_veb(pf->veb[v]);
6848
6849 if (!ret)
6850 continue;
6851
6852 /* If Main VEB failed, we're in deep doodoo,
6853 * so give up rebuilding the switch and set up
6854 * for minimal rebuild of PF VSI.
6855 * If orphan failed, we'll report the error
6856 * but try to keep going.
6857 */
6858 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6859 dev_info(&pf->pdev->dev,
6860 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6861 ret);
6862 pf->vsi[pf->lan_vsi]->uplink_seid
6863 = pf->mac_seid;
6864 break;
6865 } else if (pf->veb[v]->uplink_seid == 0) {
6866 dev_info(&pf->pdev->dev,
6867 "rebuild of orphan VEB failed: %d\n",
6868 ret);
6869 }
6870 }
6871 }
6872 }
6873
6874 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
Shannon Nelsoncde4cbc2014-06-04 01:23:17 +00006875 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006876 /* no VEB, so rebuild only the Main VSI */
6877 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6878 if (ret) {
6879 dev_info(&pf->pdev->dev,
6880 "rebuild of Main VSI failed: %d\n", ret);
6881 goto end_core_reset;
6882 }
6883 }
6884
Anjali Singhai Jain4f2f017c2015-10-21 19:47:07 -04006885 /* Reconfigure hardware for allowing smaller MSS in the case
6886 * of TSO, so that we avoid the MDD being fired and causing
6887 * a reset in the case of small MSS+TSO.
6888 */
6889#define I40E_REG_MSS 0x000E64DC
6890#define I40E_REG_MSS_MIN_MASK 0x3FF0000
6891#define I40E_64BYTE_MSS 0x400000
6892 val = rd32(hw, I40E_REG_MSS);
6893 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
6894 val &= ~I40E_REG_MSS_MIN_MASK;
6895 val |= I40E_64BYTE_MSS;
6896 wr32(hw, I40E_REG_MSS, val);
6897 }
6898
Anjali Singhai Jain8eed76f2015-12-09 15:50:31 -08006899 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
Anjali Singhai Jain025b4a52015-02-24 06:58:46 +00006900 msleep(75);
6901 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6902 if (ret)
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006903 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6904 i40e_stat_str(&pf->hw, ret),
6905 i40e_aq_str(&pf->hw,
6906 pf->hw.aq.asq_last_status));
Anjali Singhai Jaincafa2ee2014-09-13 07:40:45 +00006907 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006908 /* reinit the misc interrupt */
6909 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6910 ret = i40e_setup_misc_vector(pf);
6911
Anjali Singhai Jaine7358f52015-10-01 14:37:34 -04006912 /* Add a filter to drop all Flow control frames from any VSI from being
6913 * transmitted. By doing so we stop a malicious VF from sending out
6914 * PAUSE or PFC frames and potentially controlling traffic for other
6915 * PF/VF VSIs.
6916 * The FW can still send Flow control frames if enabled.
6917 */
6918 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
6919 pf->main_vsi_seid);
6920
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006921 /* restart the VSIs that were rebuilt and running before the reset */
6922 i40e_pf_unquiesce_all_vsi(pf);
6923
Mitch Williams69f64b22014-02-13 03:48:46 -08006924 if (pf->num_alloc_vfs) {
6925 for (v = 0; v < pf->num_alloc_vfs; v++)
6926 i40e_reset_vf(&pf->vf[v], true);
6927 }
6928
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006929 /* tell the firmware that we're starting */
Jesse Brandeburg44033fa2014-04-23 04:50:15 +00006930 i40e_send_version(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006931
6932end_core_reset:
Anjali Singhai Jaina316f652014-07-12 07:28:25 +00006933 clear_bit(__I40E_RESET_FAILED, &pf->state);
6934clear_recovery:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006935 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6936}
6937
6938/**
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006939 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006940 * @pf: board private structure
6941 *
6942 * Close up the VFs and other things in prep for a Core Reset,
6943 * then get ready to rebuild the world.
6944 **/
6945static void i40e_handle_reset_warning(struct i40e_pf *pf)
6946{
Shannon Nelson23cfbe02014-06-04 01:23:14 +00006947 i40e_prep_for_reset(pf);
6948 i40e_reset_and_rebuild(pf, false);
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006949}
6950
6951/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006952 * i40e_handle_mdd_event
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006953 * @pf: pointer to the PF structure
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006954 *
6955 * Called from the MDD irq handler to identify possibly malicious vfs
6956 **/
6957static void i40e_handle_mdd_event(struct i40e_pf *pf)
6958{
6959 struct i40e_hw *hw = &pf->hw;
6960 bool mdd_detected = false;
Neerav Parikhdf430b12014-06-04 01:23:15 +00006961 bool pf_mdd_detected = false;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006962 struct i40e_vf *vf;
6963 u32 reg;
6964 int i;
6965
6966 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6967 return;
6968
6969 /* find what triggered the MDD event */
6970 reg = rd32(hw, I40E_GL_MDET_TX);
6971 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
Anjali Singhai Jain4c33f832014-06-05 00:18:21 +00006972 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6973 I40E_GL_MDET_TX_PF_NUM_SHIFT;
Mitch Williams2089ad02014-10-17 03:14:53 +00006974 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
Anjali Singhai Jain4c33f832014-06-05 00:18:21 +00006975 I40E_GL_MDET_TX_VF_NUM_SHIFT;
Dan Carpenter013f6572014-10-22 20:06:29 -07006976 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
Anjali Singhai Jain4c33f832014-06-05 00:18:21 +00006977 I40E_GL_MDET_TX_EVENT_SHIFT;
Mitch Williams2089ad02014-10-17 03:14:53 +00006978 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6979 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6980 pf->hw.func_caps.base_queue;
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00006981 if (netif_msg_tx_err(pf))
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006982 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00006983 event, queue, pf_num, vf_num);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006984 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6985 mdd_detected = true;
6986 }
6987 reg = rd32(hw, I40E_GL_MDET_RX);
6988 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
Anjali Singhai Jain4c33f832014-06-05 00:18:21 +00006989 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6990 I40E_GL_MDET_RX_FUNCTION_SHIFT;
Dan Carpenter013f6572014-10-22 20:06:29 -07006991 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
Anjali Singhai Jain4c33f832014-06-05 00:18:21 +00006992 I40E_GL_MDET_RX_EVENT_SHIFT;
Mitch Williams2089ad02014-10-17 03:14:53 +00006993 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6994 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6995 pf->hw.func_caps.base_queue;
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00006996 if (netif_msg_rx_err(pf))
6997 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6998 event, queue, func);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006999 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
7000 mdd_detected = true;
7001 }
7002
Neerav Parikhdf430b12014-06-04 01:23:15 +00007003 if (mdd_detected) {
7004 reg = rd32(hw, I40E_PF_MDET_TX);
7005 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7006 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00007007 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
Neerav Parikhdf430b12014-06-04 01:23:15 +00007008 pf_mdd_detected = true;
7009 }
7010 reg = rd32(hw, I40E_PF_MDET_RX);
7011 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7012 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00007013 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
Neerav Parikhdf430b12014-06-04 01:23:15 +00007014 pf_mdd_detected = true;
7015 }
7016 /* Queue belongs to the PF, initiate a reset */
7017 if (pf_mdd_detected) {
7018 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
7019 i40e_service_event_schedule(pf);
7020 }
7021 }
7022
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007023 /* see if one of the VFs needs its hand slapped */
7024 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7025 vf = &(pf->vf[i]);
7026 reg = rd32(hw, I40E_VP_MDET_TX(i));
7027 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7028 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7029 vf->num_mdd_events++;
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00007030 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7031 i);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007032 }
7033
7034 reg = rd32(hw, I40E_VP_MDET_RX(i));
7035 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7036 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7037 vf->num_mdd_events++;
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00007038 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7039 i);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007040 }
7041
7042 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7043 dev_info(&pf->pdev->dev,
7044 "Too many MDD events on VF %d, disabled\n", i);
7045 dev_info(&pf->pdev->dev,
7046 "Use PF Control I/F to re-enable the VF\n");
7047 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
7048 }
7049 }
7050
7051 /* re-enable mdd interrupt cause */
7052 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
7053 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7054 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7055 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7056 i40e_flush(hw);
7057}
7058
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007059/**
Singhai, Anjali6a899022015-12-14 12:21:18 -08007060 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007061 * @pf: board private structure
7062 **/
Singhai, Anjali6a899022015-12-14 12:21:18 -08007063static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007064{
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007065 struct i40e_hw *hw = &pf->hw;
7066 i40e_status ret;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007067 __be16 port;
7068 int i;
7069
Singhai, Anjali6a899022015-12-14 12:21:18 -08007070 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007071 return;
7072
Singhai, Anjali6a899022015-12-14 12:21:18 -08007073 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007074
7075 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
Singhai, Anjali6a899022015-12-14 12:21:18 -08007076 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7077 pf->pending_udp_bitmap &= ~BIT_ULL(i);
7078 port = pf->udp_ports[i].index;
Shannon Nelsonc22c06c2015-03-31 00:45:04 -07007079 if (port)
Carolyn Wybornyb3f5c7b2016-08-24 11:33:51 -07007080 ret = i40e_aq_add_udp_tunnel(hw, port,
7081 pf->udp_ports[i].type,
7082 NULL, NULL);
Shannon Nelsonc22c06c2015-03-31 00:45:04 -07007083 else
7084 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007085
7086 if (ret) {
Carolyn Wyborny730a8f82016-02-17 16:12:16 -08007087 dev_dbg(&pf->pdev->dev,
7088 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7089 pf->udp_ports[i].type ? "vxlan" : "geneve",
7090 port ? "add" : "delete",
7091 ntohs(port), i,
7092 i40e_stat_str(&pf->hw, ret),
7093 i40e_aq_str(&pf->hw,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04007094 pf->hw.aq.asq_last_status));
Singhai, Anjali6a899022015-12-14 12:21:18 -08007095 pf->udp_ports[i].index = 0;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007096 }
7097 }
7098 }
7099}
7100
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007101/**
7102 * i40e_service_task - Run the driver's async subtasks
7103 * @work: pointer to work_struct containing our data
7104 **/
7105static void i40e_service_task(struct work_struct *work)
7106{
7107 struct i40e_pf *pf = container_of(work,
7108 struct i40e_pf,
7109 service_task);
7110 unsigned long start_time = jiffies;
7111
Shannon Nelsone57a2fe2014-06-03 23:50:19 +00007112 /* don't bother with service tasks if a reset is in progress */
7113 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7114 i40e_service_event_complete(pf);
7115 return;
7116 }
7117
Kiran Patilb03a8c12015-09-24 18:13:15 -04007118 i40e_detect_recover_hung(pf);
Jesse Brandeburg2818ccd2016-01-13 16:51:38 -08007119 i40e_sync_filters_subtask(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007120 i40e_reset_subtask(pf);
7121 i40e_handle_mdd_event(pf);
7122 i40e_vc_process_vflr_event(pf);
7123 i40e_watchdog_subtask(pf);
7124 i40e_fdir_reinit_subtask(pf);
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007125 i40e_client_subtask(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007126 i40e_sync_filters_subtask(pf);
Singhai, Anjali6a899022015-12-14 12:21:18 -08007127 i40e_sync_udp_filters_subtask(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007128 i40e_clean_adminq_subtask(pf);
7129
7130 i40e_service_event_complete(pf);
7131
7132 /* If the tasks have taken longer than one timer cycle or there
7133 * is more work to be done, reschedule the service task now
7134 * rather than wait for the timer to tick again.
7135 */
7136 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7137 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
7138 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
7139 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
7140 i40e_service_event_schedule(pf);
7141}
7142
7143/**
7144 * i40e_service_timer - timer callback
7145 * @data: pointer to PF struct
7146 **/
7147static void i40e_service_timer(unsigned long data)
7148{
7149 struct i40e_pf *pf = (struct i40e_pf *)data;
7150
7151 mod_timer(&pf->service_timer,
7152 round_jiffies(jiffies + pf->service_timer_period));
7153 i40e_service_event_schedule(pf);
7154}
7155
7156/**
7157 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7158 * @vsi: the VSI being configured
7159 **/
7160static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7161{
7162 struct i40e_pf *pf = vsi->back;
7163
7164 switch (vsi->type) {
7165 case I40E_VSI_MAIN:
7166 vsi->alloc_queue_pairs = pf->num_lan_qps;
7167 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7168 I40E_REQ_DESCRIPTOR_MULTIPLE);
7169 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7170 vsi->num_q_vectors = pf->num_lan_msix;
7171 else
7172 vsi->num_q_vectors = 1;
7173
7174 break;
7175
7176 case I40E_VSI_FDIR:
7177 vsi->alloc_queue_pairs = 1;
7178 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7179 I40E_REQ_DESCRIPTOR_MULTIPLE);
Tushar Davea70e4072016-05-16 12:40:53 -07007180 vsi->num_q_vectors = pf->num_fdsb_msix;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007181 break;
7182
7183 case I40E_VSI_VMDQ2:
7184 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7185 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7186 I40E_REQ_DESCRIPTOR_MULTIPLE);
7187 vsi->num_q_vectors = pf->num_vmdq_msix;
7188 break;
7189
7190 case I40E_VSI_SRIOV:
7191 vsi->alloc_queue_pairs = pf->num_vf_qps;
7192 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7193 I40E_REQ_DESCRIPTOR_MULTIPLE);
7194 break;
7195
Vasu Dev38e00432014-08-01 13:27:03 -07007196#ifdef I40E_FCOE
7197 case I40E_VSI_FCOE:
7198 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
7199 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7200 I40E_REQ_DESCRIPTOR_MULTIPLE);
7201 vsi->num_q_vectors = pf->num_fcoe_msix;
7202 break;
7203
7204#endif /* I40E_FCOE */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007205 default:
7206 WARN_ON(1);
7207 return -ENODATA;
7208 }
7209
7210 return 0;
7211}
7212
7213/**
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007214 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7215 * @type: VSI pointer
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007216 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007217 *
7218 * On error: returns error code (negative)
7219 * On success: returns 0
7220 **/
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007221static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007222{
7223 int size;
7224 int ret = 0;
7225
Shannon Nelsonac6c5e32013-11-20 10:02:57 +00007226 /* allocate memory for both Tx and Rx ring pointers */
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007227 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
7228 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7229 if (!vsi->tx_rings)
7230 return -ENOMEM;
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007231 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
7232
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007233 if (alloc_qvectors) {
7234 /* allocate memory for q_vector pointers */
Julia Lawallf57e4fb2014-07-30 03:11:09 +00007235 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007236 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7237 if (!vsi->q_vectors) {
7238 ret = -ENOMEM;
7239 goto err_vectors;
7240 }
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007241 }
7242 return ret;
7243
7244err_vectors:
7245 kfree(vsi->tx_rings);
7246 return ret;
7247}
7248
7249/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007250 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7251 * @pf: board private structure
7252 * @type: type of VSI
7253 *
7254 * On error: returns error code (negative)
7255 * On success: returns vsi index in PF (positive)
7256 **/
7257static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7258{
7259 int ret = -ENODEV;
7260 struct i40e_vsi *vsi;
7261 int vsi_idx;
7262 int i;
7263
7264 /* Need to protect the allocation of the VSIs at the PF level */
7265 mutex_lock(&pf->switch_mutex);
7266
7267 /* VSI list may be fragmented if VSI creation/destruction has
7268 * been happening. We can afford to do a quick scan to look
7269 * for any free VSIs in the list.
7270 *
7271 * find next empty vsi slot, looping back around if necessary
7272 */
7273 i = pf->next_vsi;
Mitch Williams505682c2014-05-20 08:01:37 +00007274 while (i < pf->num_alloc_vsi && pf->vsi[i])
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007275 i++;
Mitch Williams505682c2014-05-20 08:01:37 +00007276 if (i >= pf->num_alloc_vsi) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007277 i = 0;
7278 while (i < pf->next_vsi && pf->vsi[i])
7279 i++;
7280 }
7281
Mitch Williams505682c2014-05-20 08:01:37 +00007282 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007283 vsi_idx = i; /* Found one! */
7284 } else {
7285 ret = -ENODEV;
Alexander Duyck493fb302013-09-28 07:01:44 +00007286 goto unlock_pf; /* out of VSI slots! */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007287 }
7288 pf->next_vsi = ++i;
7289
7290 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7291 if (!vsi) {
7292 ret = -ENOMEM;
Alexander Duyck493fb302013-09-28 07:01:44 +00007293 goto unlock_pf;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007294 }
7295 vsi->type = type;
7296 vsi->back = pf;
7297 set_bit(__I40E_DOWN, &vsi->state);
7298 vsi->flags = 0;
7299 vsi->idx = vsi_idx;
Jesse Brandeburgac26fc12015-09-28 14:12:37 -04007300 vsi->int_rate_limit = 0;
Anjali Singhai Jain5db4cb52015-02-24 06:58:49 +00007301 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7302 pf->rss_table_size : 64;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007303 vsi->netdev_registered = false;
7304 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7305 INIT_LIST_HEAD(&vsi->mac_filter_list);
Shannon Nelson63741842014-04-23 04:50:16 +00007306 vsi->irqs_ready = false;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007307
Alexander Duyck9f65e152013-09-28 06:00:58 +00007308 ret = i40e_set_num_rings_in_vsi(vsi);
7309 if (ret)
7310 goto err_rings;
7311
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007312 ret = i40e_vsi_alloc_arrays(vsi, true);
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007313 if (ret)
Alexander Duyck9f65e152013-09-28 06:00:58 +00007314 goto err_rings;
Alexander Duyck493fb302013-09-28 07:01:44 +00007315
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007316 /* Setup default MSIX irq handler for VSI */
7317 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7318
Kiran Patil21659032015-09-30 14:09:03 -04007319 /* Initialize VSI lock */
7320 spin_lock_init(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007321 pf->vsi[vsi_idx] = vsi;
7322 ret = vsi_idx;
Alexander Duyck493fb302013-09-28 07:01:44 +00007323 goto unlock_pf;
7324
Alexander Duyck9f65e152013-09-28 06:00:58 +00007325err_rings:
Alexander Duyck493fb302013-09-28 07:01:44 +00007326 pf->next_vsi = i - 1;
7327 kfree(vsi);
7328unlock_pf:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007329 mutex_unlock(&pf->switch_mutex);
7330 return ret;
7331}
7332
7333/**
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007334 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7335 * @type: VSI pointer
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007336 * @free_qvectors: a bool to specify if q_vectors need to be freed.
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007337 *
7338 * On error: returns error code (negative)
7339 * On success: returns 0
7340 **/
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007341static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007342{
7343 /* free the ring and vector containers */
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007344 if (free_qvectors) {
7345 kfree(vsi->q_vectors);
7346 vsi->q_vectors = NULL;
7347 }
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007348 kfree(vsi->tx_rings);
7349 vsi->tx_rings = NULL;
7350 vsi->rx_rings = NULL;
7351}
7352
7353/**
Helin Zhang28c58692015-10-26 19:44:27 -04007354 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7355 * and lookup table
7356 * @vsi: Pointer to VSI structure
7357 */
7358static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7359{
7360 if (!vsi)
7361 return;
7362
7363 kfree(vsi->rss_hkey_user);
7364 vsi->rss_hkey_user = NULL;
7365
7366 kfree(vsi->rss_lut_user);
7367 vsi->rss_lut_user = NULL;
7368}
7369
7370/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007371 * i40e_vsi_clear - Deallocate the VSI provided
7372 * @vsi: the VSI being un-configured
7373 **/
7374static int i40e_vsi_clear(struct i40e_vsi *vsi)
7375{
7376 struct i40e_pf *pf;
7377
7378 if (!vsi)
7379 return 0;
7380
7381 if (!vsi->back)
7382 goto free_vsi;
7383 pf = vsi->back;
7384
7385 mutex_lock(&pf->switch_mutex);
7386 if (!pf->vsi[vsi->idx]) {
7387 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7388 vsi->idx, vsi->idx, vsi, vsi->type);
7389 goto unlock_vsi;
7390 }
7391
7392 if (pf->vsi[vsi->idx] != vsi) {
7393 dev_err(&pf->pdev->dev,
7394 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7395 pf->vsi[vsi->idx]->idx,
7396 pf->vsi[vsi->idx],
7397 pf->vsi[vsi->idx]->type,
7398 vsi->idx, vsi, vsi->type);
7399 goto unlock_vsi;
7400 }
7401
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00007402 /* updates the PF for this cleared vsi */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007403 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7404 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7405
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007406 i40e_vsi_free_arrays(vsi, true);
Helin Zhang28c58692015-10-26 19:44:27 -04007407 i40e_clear_rss_config_user(vsi);
Alexander Duyck493fb302013-09-28 07:01:44 +00007408
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007409 pf->vsi[vsi->idx] = NULL;
7410 if (vsi->idx < pf->next_vsi)
7411 pf->next_vsi = vsi->idx;
7412
7413unlock_vsi:
7414 mutex_unlock(&pf->switch_mutex);
7415free_vsi:
7416 kfree(vsi);
7417
7418 return 0;
7419}
7420
7421/**
Alexander Duyck9f65e152013-09-28 06:00:58 +00007422 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7423 * @vsi: the VSI being cleaned
7424 **/
Shannon Nelsonbe1d5ee2013-11-28 06:39:23 +00007425static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
Alexander Duyck9f65e152013-09-28 06:00:58 +00007426{
7427 int i;
7428
Greg Rose8e9dca52013-12-18 13:45:53 +00007429 if (vsi->tx_rings && vsi->tx_rings[0]) {
Neerav Parikhd7397642013-11-28 06:39:37 +00007430 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
Mitch Williams00403f02013-09-28 07:13:13 +00007431 kfree_rcu(vsi->tx_rings[i], rcu);
7432 vsi->tx_rings[i] = NULL;
7433 vsi->rx_rings[i] = NULL;
7434 }
Shannon Nelsonbe1d5ee2013-11-28 06:39:23 +00007435 }
Alexander Duyck9f65e152013-09-28 06:00:58 +00007436}
7437
7438/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007439 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7440 * @vsi: the VSI being configured
7441 **/
7442static int i40e_alloc_rings(struct i40e_vsi *vsi)
7443{
Akeem G Abodunrine7046ee2014-04-09 05:58:58 +00007444 struct i40e_ring *tx_ring, *rx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007445 struct i40e_pf *pf = vsi->back;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007446 int i;
7447
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007448 /* Set basic values in the rings to be used later during open() */
Neerav Parikhd7397642013-11-28 06:39:37 +00007449 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
Shannon Nelsonac6c5e32013-11-20 10:02:57 +00007450 /* allocate space for both Tx and Rx in one shot */
Alexander Duyck9f65e152013-09-28 06:00:58 +00007451 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7452 if (!tx_ring)
7453 goto err_out;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007454
7455 tx_ring->queue_index = i;
7456 tx_ring->reg_idx = vsi->base_queue + i;
7457 tx_ring->ring_active = false;
7458 tx_ring->vsi = vsi;
7459 tx_ring->netdev = vsi->netdev;
7460 tx_ring->dev = &pf->pdev->dev;
7461 tx_ring->count = vsi->num_desc;
7462 tx_ring->size = 0;
7463 tx_ring->dcb_tc = 0;
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04007464 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7465 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
Kan Lianga75e8002016-02-19 09:24:04 -05007466 tx_ring->tx_itr_setting = pf->tx_itr_default;
Alexander Duyck9f65e152013-09-28 06:00:58 +00007467 vsi->tx_rings[i] = tx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007468
Alexander Duyck9f65e152013-09-28 06:00:58 +00007469 rx_ring = &tx_ring[1];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007470 rx_ring->queue_index = i;
7471 rx_ring->reg_idx = vsi->base_queue + i;
7472 rx_ring->ring_active = false;
7473 rx_ring->vsi = vsi;
7474 rx_ring->netdev = vsi->netdev;
7475 rx_ring->dev = &pf->pdev->dev;
7476 rx_ring->count = vsi->num_desc;
7477 rx_ring->size = 0;
7478 rx_ring->dcb_tc = 0;
Kan Lianga75e8002016-02-19 09:24:04 -05007479 rx_ring->rx_itr_setting = pf->rx_itr_default;
Alexander Duyck9f65e152013-09-28 06:00:58 +00007480 vsi->rx_rings[i] = rx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007481 }
7482
7483 return 0;
Alexander Duyck9f65e152013-09-28 06:00:58 +00007484
7485err_out:
7486 i40e_vsi_clear_rings(vsi);
7487 return -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007488}
7489
7490/**
7491 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7492 * @pf: board private structure
7493 * @vectors: the number of MSI-X vectors to request
7494 *
7495 * Returns the number of vectors reserved, or error
7496 **/
7497static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7498{
Alexander Gordeev7b37f372014-02-18 11:11:42 +01007499 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7500 I40E_MIN_MSIX, vectors);
7501 if (vectors < 0) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007502 dev_info(&pf->pdev->dev,
Alexander Gordeev7b37f372014-02-18 11:11:42 +01007503 "MSI-X vector reservation failed: %d\n", vectors);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007504 vectors = 0;
7505 }
7506
7507 return vectors;
7508}
7509
7510/**
7511 * i40e_init_msix - Setup the MSIX capability
7512 * @pf: board private structure
7513 *
7514 * Work with the OS to set up the MSIX vectors needed.
7515 *
Shannon Nelson3b444392015-02-26 16:15:57 +00007516 * Returns the number of vectors reserved or negative on failure
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007517 **/
7518static int i40e_init_msix(struct i40e_pf *pf)
7519{
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007520 struct i40e_hw *hw = &pf->hw;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007521 int vectors_left;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007522 int v_budget, i;
Shannon Nelson3b444392015-02-26 16:15:57 +00007523 int v_actual;
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007524 int iwarp_requested = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007525
7526 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7527 return -ENODEV;
7528
7529 /* The number of vectors we'll request will be comprised of:
7530 * - Add 1 for "other" cause for Admin Queue events, etc.
7531 * - The number of LAN queue pairs
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00007532 * - Queues being used for RSS.
7533 * We don't need as many as max_rss_size vectors.
7534 * use rss_size instead in the calculation since that
7535 * is governed by number of cpus in the system.
7536 * - assumes symmetric Tx/Rx pairing
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007537 * - The number of VMDq pairs
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007538 * - The CPU count within the NUMA node if iWARP is enabled
Vasu Dev38e00432014-08-01 13:27:03 -07007539#ifdef I40E_FCOE
7540 * - The number of FCOE qps.
7541#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007542 * Once we count this up, try the request.
7543 *
7544 * If we can't get what we want, we'll simplify to nearly nothing
7545 * and try again. If that still fails, we punt.
7546 */
Shannon Nelson1e200e42015-02-27 09:15:24 +00007547 vectors_left = hw->func_caps.num_msix_vectors;
7548 v_budget = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007549
Shannon Nelson1e200e42015-02-27 09:15:24 +00007550 /* reserve one vector for miscellaneous handler */
7551 if (vectors_left) {
7552 v_budget++;
7553 vectors_left--;
7554 }
7555
7556 /* reserve vectors for the main PF traffic queues */
7557 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7558 vectors_left -= pf->num_lan_msix;
7559 v_budget += pf->num_lan_msix;
7560
7561 /* reserve one vector for sideband flow director */
7562 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7563 if (vectors_left) {
Tushar Davea70e4072016-05-16 12:40:53 -07007564 pf->num_fdsb_msix = 1;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007565 v_budget++;
7566 vectors_left--;
7567 } else {
Tushar Davea70e4072016-05-16 12:40:53 -07007568 pf->num_fdsb_msix = 0;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007569 }
7570 }
John W Linville83840e42015-01-14 03:06:28 +00007571
Vasu Dev38e00432014-08-01 13:27:03 -07007572#ifdef I40E_FCOE
Shannon Nelson1e200e42015-02-27 09:15:24 +00007573 /* can we reserve enough for FCoE? */
Vasu Dev38e00432014-08-01 13:27:03 -07007574 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
Shannon Nelson1e200e42015-02-27 09:15:24 +00007575 if (!vectors_left)
7576 pf->num_fcoe_msix = 0;
7577 else if (vectors_left >= pf->num_fcoe_qps)
7578 pf->num_fcoe_msix = pf->num_fcoe_qps;
7579 else
7580 pf->num_fcoe_msix = 1;
Vasu Dev38e00432014-08-01 13:27:03 -07007581 v_budget += pf->num_fcoe_msix;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007582 vectors_left -= pf->num_fcoe_msix;
Vasu Dev38e00432014-08-01 13:27:03 -07007583 }
Shannon Nelson1e200e42015-02-27 09:15:24 +00007584
Vasu Dev38e00432014-08-01 13:27:03 -07007585#endif
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007586 /* can we reserve enough for iWARP? */
7587 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007588 iwarp_requested = pf->num_iwarp_msix;
7589
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007590 if (!vectors_left)
7591 pf->num_iwarp_msix = 0;
7592 else if (vectors_left < pf->num_iwarp_msix)
7593 pf->num_iwarp_msix = 1;
7594 v_budget += pf->num_iwarp_msix;
7595 vectors_left -= pf->num_iwarp_msix;
7596 }
7597
Shannon Nelson1e200e42015-02-27 09:15:24 +00007598 /* any vectors left over go for VMDq support */
7599 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7600 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7601 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7602
Stefan Assmann9ca57e92016-09-19 13:37:49 +02007603 if (!vectors_left) {
7604 pf->num_vmdq_msix = 0;
7605 pf->num_vmdq_qps = 0;
7606 } else {
7607 /* if we're short on vectors for what's desired, we limit
7608 * the queues per vmdq. If this is still more than are
7609 * available, the user will need to change the number of
7610 * queues/vectors used by the PF later with the ethtool
7611 * channels command
7612 */
7613 if (vmdq_vecs < vmdq_vecs_wanted)
7614 pf->num_vmdq_qps = 1;
7615 pf->num_vmdq_msix = pf->num_vmdq_qps;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007616
Stefan Assmann9ca57e92016-09-19 13:37:49 +02007617 v_budget += vmdq_vecs;
7618 vectors_left -= vmdq_vecs;
7619 }
Shannon Nelson1e200e42015-02-27 09:15:24 +00007620 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007621
7622 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7623 GFP_KERNEL);
7624 if (!pf->msix_entries)
7625 return -ENOMEM;
7626
7627 for (i = 0; i < v_budget; i++)
7628 pf->msix_entries[i].entry = i;
Shannon Nelson3b444392015-02-26 16:15:57 +00007629 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
Anjali Singhai Jaina34977b2014-05-21 23:32:43 +00007630
Shannon Nelson3b444392015-02-26 16:15:57 +00007631 if (v_actual < I40E_MIN_MSIX) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007632 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7633 kfree(pf->msix_entries);
7634 pf->msix_entries = NULL;
Guilherme G Piccoli4c95aa52016-09-22 10:03:58 -03007635 pci_disable_msix(pf->pdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007636 return -ENODEV;
7637
Shannon Nelson3b444392015-02-26 16:15:57 +00007638 } else if (v_actual == I40E_MIN_MSIX) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007639 /* Adjust for minimal MSIX use */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007640 pf->num_vmdq_vsis = 0;
7641 pf->num_vmdq_qps = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007642 pf->num_lan_qps = 1;
7643 pf->num_lan_msix = 1;
7644
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007645 } else if (!vectors_left) {
7646 /* If we have limited resources, we will start with no vectors
7647 * for the special features and then allocate vectors to some
7648 * of these features based on the policy and at the end disable
7649 * the features that did not get any vectors.
7650 */
Shannon Nelson3b444392015-02-26 16:15:57 +00007651 int vec;
7652
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007653 dev_info(&pf->pdev->dev,
7654 "MSI-X vector limit reached, attempting to redistribute vectors\n");
Anjali Singhai Jaina34977b2014-05-21 23:32:43 +00007655 /* reserve the misc vector */
Shannon Nelson3b444392015-02-26 16:15:57 +00007656 vec = v_actual - 1;
Anjali Singhai Jaina34977b2014-05-21 23:32:43 +00007657
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007658 /* Scale vector usage down */
7659 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
Anjali Singhai Jaina34977b2014-05-21 23:32:43 +00007660 pf->num_vmdq_vsis = 1;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007661 pf->num_vmdq_qps = 1;
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007662#ifdef I40E_FCOE
7663 pf->num_fcoe_qps = 0;
7664 pf->num_fcoe_msix = 0;
7665#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007666
7667 /* partition out the remaining vectors */
7668 switch (vec) {
7669 case 2:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007670 pf->num_lan_msix = 1;
7671 break;
7672 case 3:
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007673 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7674 pf->num_lan_msix = 1;
7675 pf->num_iwarp_msix = 1;
7676 } else {
7677 pf->num_lan_msix = 2;
7678 }
Vasu Dev38e00432014-08-01 13:27:03 -07007679#ifdef I40E_FCOE
7680 /* give one vector to FCoE */
7681 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7682 pf->num_lan_msix = 1;
7683 pf->num_fcoe_msix = 1;
7684 }
Vasu Dev38e00432014-08-01 13:27:03 -07007685#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007686 break;
7687 default:
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007688 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7689 pf->num_iwarp_msix = min_t(int, (vec / 3),
7690 iwarp_requested);
7691 pf->num_vmdq_vsis = min_t(int, (vec / 3),
7692 I40E_DEFAULT_NUM_VMDQ_VSI);
7693 } else {
7694 pf->num_vmdq_vsis = min_t(int, (vec / 2),
7695 I40E_DEFAULT_NUM_VMDQ_VSI);
7696 }
Stefan Assmannabd97a92016-09-19 13:37:51 +02007697 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7698 pf->num_fdsb_msix = 1;
7699 vec--;
7700 }
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007701 pf->num_lan_msix = min_t(int,
7702 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
7703 pf->num_lan_msix);
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007704 pf->num_lan_qps = pf->num_lan_msix;
Vasu Dev38e00432014-08-01 13:27:03 -07007705#ifdef I40E_FCOE
7706 /* give one vector to FCoE */
7707 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7708 pf->num_fcoe_msix = 1;
7709 vec--;
7710 }
7711#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007712 break;
7713 }
7714 }
7715
Stefan Assmannabd97a92016-09-19 13:37:51 +02007716 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
7717 (pf->num_fdsb_msix == 0)) {
7718 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
7719 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7720 }
Anjali Singhai Jaina34977b2014-05-21 23:32:43 +00007721 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7722 (pf->num_vmdq_msix == 0)) {
7723 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7724 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7725 }
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007726
7727 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
7728 (pf->num_iwarp_msix == 0)) {
7729 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
7730 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
7731 }
Vasu Dev38e00432014-08-01 13:27:03 -07007732#ifdef I40E_FCOE
7733
7734 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7735 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7736 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7737 }
7738#endif
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007739 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
7740 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
7741 pf->num_lan_msix,
7742 pf->num_vmdq_msix * pf->num_vmdq_vsis,
7743 pf->num_fdsb_msix,
7744 pf->num_iwarp_msix);
7745
Shannon Nelson3b444392015-02-26 16:15:57 +00007746 return v_actual;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007747}
7748
7749/**
Greg Rose90e04072014-03-06 08:59:57 +00007750 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
Alexander Duyck493fb302013-09-28 07:01:44 +00007751 * @vsi: the VSI being configured
7752 * @v_idx: index of the vector in the vsi struct
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007753 * @cpu: cpu to be used on affinity_mask
Alexander Duyck493fb302013-09-28 07:01:44 +00007754 *
7755 * We allocate one q_vector. If allocation fails we return -ENOMEM.
7756 **/
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007757static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
Alexander Duyck493fb302013-09-28 07:01:44 +00007758{
7759 struct i40e_q_vector *q_vector;
7760
7761 /* allocate q_vector */
7762 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7763 if (!q_vector)
7764 return -ENOMEM;
7765
7766 q_vector->vsi = vsi;
7767 q_vector->v_idx = v_idx;
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007768 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
7769
Alexander Duyck493fb302013-09-28 07:01:44 +00007770 if (vsi->netdev)
7771 netif_napi_add(vsi->netdev, &q_vector->napi,
Jesse Brandeburgeefeace2014-05-10 04:49:13 +00007772 i40e_napi_poll, NAPI_POLL_WEIGHT);
Alexander Duyck493fb302013-09-28 07:01:44 +00007773
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00007774 q_vector->rx.latency_range = I40E_LOW_LATENCY;
7775 q_vector->tx.latency_range = I40E_LOW_LATENCY;
7776
Alexander Duyck493fb302013-09-28 07:01:44 +00007777 /* tie q_vector and vsi together */
7778 vsi->q_vectors[v_idx] = q_vector;
7779
7780 return 0;
7781}
7782
7783/**
Greg Rose90e04072014-03-06 08:59:57 +00007784 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007785 * @vsi: the VSI being configured
7786 *
7787 * We allocate one q_vector per queue interrupt. If allocation fails we
7788 * return -ENOMEM.
7789 **/
Greg Rose90e04072014-03-06 08:59:57 +00007790static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007791{
7792 struct i40e_pf *pf = vsi->back;
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007793 int err, v_idx, num_q_vectors, current_cpu;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007794
7795 /* if not MSIX, give the one vector only to the LAN VSI */
7796 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7797 num_q_vectors = vsi->num_q_vectors;
7798 else if (vsi == pf->vsi[pf->lan_vsi])
7799 num_q_vectors = 1;
7800 else
7801 return -EINVAL;
7802
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007803 current_cpu = cpumask_first(cpu_online_mask);
7804
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007805 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007806 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
Alexander Duyck493fb302013-09-28 07:01:44 +00007807 if (err)
7808 goto err_out;
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007809 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
7810 if (unlikely(current_cpu >= nr_cpu_ids))
7811 current_cpu = cpumask_first(cpu_online_mask);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007812 }
7813
7814 return 0;
Alexander Duyck493fb302013-09-28 07:01:44 +00007815
7816err_out:
7817 while (v_idx--)
7818 i40e_free_q_vector(vsi, v_idx);
7819
7820 return err;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007821}
7822
7823/**
7824 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7825 * @pf: board private structure to initialize
7826 **/
Jesse Brandeburgc11472802015-04-07 19:45:39 -04007827static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007828{
Shannon Nelson3b444392015-02-26 16:15:57 +00007829 int vectors = 0;
7830 ssize_t size;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007831
7832 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
Shannon Nelson3b444392015-02-26 16:15:57 +00007833 vectors = i40e_init_msix(pf);
7834 if (vectors < 0) {
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08007835 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007836 I40E_FLAG_IWARP_ENABLED |
Vasu Dev38e00432014-08-01 13:27:03 -07007837#ifdef I40E_FCOE
7838 I40E_FLAG_FCOE_ENABLED |
7839#endif
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08007840 I40E_FLAG_RSS_ENABLED |
Neerav Parikh4d9b6042014-05-22 06:31:51 +00007841 I40E_FLAG_DCB_CAPABLE |
Dave Ertmana0362442016-08-29 17:38:26 -07007842 I40E_FLAG_DCB_ENABLED |
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08007843 I40E_FLAG_SRIOV_ENABLED |
7844 I40E_FLAG_FD_SB_ENABLED |
7845 I40E_FLAG_FD_ATR_ENABLED |
7846 I40E_FLAG_VMDQ_ENABLED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007847
7848 /* rework the queue expectations without MSIX */
7849 i40e_determine_queue_usage(pf);
7850 }
7851 }
7852
7853 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7854 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
Catherine Sullivan77fa28b2014-02-20 19:29:17 -08007855 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
Shannon Nelson3b444392015-02-26 16:15:57 +00007856 vectors = pci_enable_msi(pf->pdev);
7857 if (vectors < 0) {
7858 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7859 vectors);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007860 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7861 }
Shannon Nelson3b444392015-02-26 16:15:57 +00007862 vectors = 1; /* one MSI or Legacy vector */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007863 }
7864
Shannon Nelson958a3e32013-09-28 07:13:28 +00007865 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
Catherine Sullivan77fa28b2014-02-20 19:29:17 -08007866 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
Shannon Nelson958a3e32013-09-28 07:13:28 +00007867
Shannon Nelson3b444392015-02-26 16:15:57 +00007868 /* set up vector assignment tracking */
7869 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7870 pf->irq_pile = kzalloc(size, GFP_KERNEL);
Jesse Brandeburgc11472802015-04-07 19:45:39 -04007871 if (!pf->irq_pile) {
7872 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7873 return -ENOMEM;
7874 }
Shannon Nelson3b444392015-02-26 16:15:57 +00007875 pf->irq_pile->num_entries = vectors;
7876 pf->irq_pile->search_hint = 0;
7877
Jesse Brandeburgc11472802015-04-07 19:45:39 -04007878 /* track first vector for misc interrupts, ignore return */
Shannon Nelson3b444392015-02-26 16:15:57 +00007879 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
Jesse Brandeburgc11472802015-04-07 19:45:39 -04007880
7881 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007882}
7883
7884/**
7885 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7886 * @pf: board private structure
7887 *
7888 * This sets up the handler for MSIX 0, which is used to manage the
7889 * non-queue interrupts, e.g. AdminQ and errors. This is not used
7890 * when in MSI or Legacy interrupt mode.
7891 **/
7892static int i40e_setup_misc_vector(struct i40e_pf *pf)
7893{
7894 struct i40e_hw *hw = &pf->hw;
7895 int err = 0;
7896
7897 /* Only request the irq if this is the first time through, and
7898 * not when we're rebuilding after a Reset
7899 */
7900 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7901 err = request_irq(pf->msix_entries[0].vector,
Carolyn Wybornyb294ac72014-12-11 07:06:39 +00007902 i40e_intr, 0, pf->int_name, pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007903 if (err) {
7904 dev_info(&pf->pdev->dev,
Catherine Sullivan77fa28b2014-02-20 19:29:17 -08007905 "request_irq for %s failed: %d\n",
Carolyn Wybornyb294ac72014-12-11 07:06:39 +00007906 pf->int_name, err);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007907 return -EFAULT;
7908 }
7909 }
7910
Jacob Kellerab437b52014-12-14 01:55:08 +00007911 i40e_enable_misc_int_causes(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007912
7913 /* associate no queues to the misc vector */
7914 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7915 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7916
7917 i40e_flush(hw);
7918
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08007919 i40e_irq_dynamic_enable_icr0(pf, true);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007920
7921 return err;
7922}
7923
7924/**
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007925 * i40e_config_rss_aq - Prepare for RSS using AQ commands
7926 * @vsi: vsi structure
7927 * @seed: RSS hash seed
7928 **/
Helin Zhange69ff812015-10-21 19:56:22 -04007929static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
7930 u8 *lut, u16 lut_size)
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007931{
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007932 struct i40e_pf *pf = vsi->back;
7933 struct i40e_hw *hw = &pf->hw;
Jacob Keller776b2e12016-07-19 16:23:30 -07007934 int ret = 0;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007935
Jacob Keller776b2e12016-07-19 16:23:30 -07007936 if (seed) {
7937 struct i40e_aqc_get_set_rss_key_data *seed_dw =
7938 (struct i40e_aqc_get_set_rss_key_data *)seed;
7939 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
7940 if (ret) {
7941 dev_info(&pf->pdev->dev,
7942 "Cannot set RSS key, err %s aq_err %s\n",
7943 i40e_stat_str(hw, ret),
7944 i40e_aq_str(hw, hw->aq.asq_last_status));
7945 return ret;
7946 }
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007947 }
Jacob Keller776b2e12016-07-19 16:23:30 -07007948 if (lut) {
7949 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007950
Jacob Keller776b2e12016-07-19 16:23:30 -07007951 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
7952 if (ret) {
7953 dev_info(&pf->pdev->dev,
7954 "Cannot set RSS lut, err %s aq_err %s\n",
7955 i40e_stat_str(hw, ret),
7956 i40e_aq_str(hw, hw->aq.asq_last_status));
7957 return ret;
7958 }
7959 }
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007960 return ret;
7961}
7962
7963/**
Anjali Singhai Jain95a73782015-12-22 14:25:04 -08007964 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
7965 * @vsi: Pointer to vsi structure
7966 * @seed: Buffter to store the hash keys
7967 * @lut: Buffer to store the lookup table entries
7968 * @lut_size: Size of buffer to store the lookup table entries
7969 *
7970 * Return 0 on success, negative on failure
7971 */
7972static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
7973 u8 *lut, u16 lut_size)
7974{
7975 struct i40e_pf *pf = vsi->back;
7976 struct i40e_hw *hw = &pf->hw;
7977 int ret = 0;
7978
7979 if (seed) {
7980 ret = i40e_aq_get_rss_key(hw, vsi->id,
7981 (struct i40e_aqc_get_set_rss_key_data *)seed);
7982 if (ret) {
7983 dev_info(&pf->pdev->dev,
7984 "Cannot get RSS key, err %s aq_err %s\n",
7985 i40e_stat_str(&pf->hw, ret),
7986 i40e_aq_str(&pf->hw,
7987 pf->hw.aq.asq_last_status));
7988 return ret;
7989 }
7990 }
7991
7992 if (lut) {
7993 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
7994
7995 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
7996 if (ret) {
7997 dev_info(&pf->pdev->dev,
7998 "Cannot get RSS lut, err %s aq_err %s\n",
7999 i40e_stat_str(&pf->hw, ret),
8000 i40e_aq_str(&pf->hw,
8001 pf->hw.aq.asq_last_status));
8002 return ret;
8003 }
8004 }
8005
8006 return ret;
8007}
8008
8009/**
Jacob Keller0582b962016-07-19 16:23:29 -07008010 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
8011 * @vsi: VSI structure
8012 **/
8013static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
8014{
8015 u8 seed[I40E_HKEY_ARRAY_SIZE];
8016 struct i40e_pf *pf = vsi->back;
8017 u8 *lut;
8018 int ret;
8019
8020 if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
8021 return 0;
8022
Jacob Keller552b9962016-07-19 16:23:31 -07008023 if (!vsi->rss_size)
8024 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8025 vsi->num_queue_pairs);
8026 if (!vsi->rss_size)
8027 return -EINVAL;
8028
Jacob Keller0582b962016-07-19 16:23:29 -07008029 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8030 if (!lut)
8031 return -ENOMEM;
Jacob Keller552b9962016-07-19 16:23:31 -07008032 /* Use the user configured hash keys and lookup table if there is one,
8033 * otherwise use default
8034 */
8035 if (vsi->rss_lut_user)
8036 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8037 else
8038 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8039 if (vsi->rss_hkey_user)
8040 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8041 else
8042 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
Jacob Keller0582b962016-07-19 16:23:29 -07008043 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8044 kfree(lut);
8045
8046 return ret;
8047}
8048
8049/**
Helin Zhang043dd652015-10-21 19:56:23 -04008050 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
Helin Zhange69ff812015-10-21 19:56:22 -04008051 * @vsi: Pointer to vsi structure
8052 * @seed: RSS hash seed
8053 * @lut: Lookup table
8054 * @lut_size: Lookup table size
8055 *
8056 * Returns 0 on success, negative on failure
8057 **/
8058static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8059 const u8 *lut, u16 lut_size)
8060{
8061 struct i40e_pf *pf = vsi->back;
8062 struct i40e_hw *hw = &pf->hw;
Mitch Williamsc4e18682016-04-12 08:30:40 -07008063 u16 vf_id = vsi->vf_id;
Helin Zhange69ff812015-10-21 19:56:22 -04008064 u8 i;
8065
8066 /* Fill out hash function seed */
8067 if (seed) {
8068 u32 *seed_dw = (u32 *)seed;
8069
Mitch Williamsc4e18682016-04-12 08:30:40 -07008070 if (vsi->type == I40E_VSI_MAIN) {
8071 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8072 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
8073 seed_dw[i]);
8074 } else if (vsi->type == I40E_VSI_SRIOV) {
8075 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8076 i40e_write_rx_ctl(hw,
8077 I40E_VFQF_HKEY1(i, vf_id),
8078 seed_dw[i]);
8079 } else {
8080 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8081 }
Helin Zhange69ff812015-10-21 19:56:22 -04008082 }
8083
8084 if (lut) {
8085 u32 *lut_dw = (u32 *)lut;
8086
Mitch Williamsc4e18682016-04-12 08:30:40 -07008087 if (vsi->type == I40E_VSI_MAIN) {
8088 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8089 return -EINVAL;
8090 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8091 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8092 } else if (vsi->type == I40E_VSI_SRIOV) {
8093 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8094 return -EINVAL;
8095 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8096 i40e_write_rx_ctl(hw,
8097 I40E_VFQF_HLUT1(i, vf_id),
8098 lut_dw[i]);
8099 } else {
8100 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8101 }
Helin Zhange69ff812015-10-21 19:56:22 -04008102 }
8103 i40e_flush(hw);
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008104
8105 return 0;
8106}
8107
8108/**
Helin Zhang043dd652015-10-21 19:56:23 -04008109 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
8110 * @vsi: Pointer to VSI structure
8111 * @seed: Buffer to store the keys
8112 * @lut: Buffer to store the lookup table entries
8113 * @lut_size: Size of buffer to store the lookup table entries
8114 *
8115 * Returns 0 on success, negative on failure
8116 */
8117static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8118 u8 *lut, u16 lut_size)
8119{
8120 struct i40e_pf *pf = vsi->back;
8121 struct i40e_hw *hw = &pf->hw;
8122 u16 i;
8123
8124 if (seed) {
8125 u32 *seed_dw = (u32 *)seed;
8126
8127 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
Shannon Nelson272cdaf22016-02-17 16:12:21 -08008128 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
Helin Zhang043dd652015-10-21 19:56:23 -04008129 }
8130 if (lut) {
8131 u32 *lut_dw = (u32 *)lut;
8132
8133 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8134 return -EINVAL;
8135 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8136 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8137 }
8138
8139 return 0;
8140}
8141
8142/**
8143 * i40e_config_rss - Configure RSS keys and lut
8144 * @vsi: Pointer to VSI structure
8145 * @seed: RSS hash seed
8146 * @lut: Lookup table
8147 * @lut_size: Lookup table size
8148 *
8149 * Returns 0 on success, negative on failure
8150 */
8151int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8152{
8153 struct i40e_pf *pf = vsi->back;
8154
8155 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8156 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8157 else
8158 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8159}
8160
8161/**
8162 * i40e_get_rss - Get RSS keys and lut
8163 * @vsi: Pointer to VSI structure
8164 * @seed: Buffer to store the keys
8165 * @lut: Buffer to store the lookup table entries
8166 * lut_size: Size of buffer to store the lookup table entries
8167 *
8168 * Returns 0 on success, negative on failure
8169 */
8170int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8171{
Anjali Singhai Jain95a73782015-12-22 14:25:04 -08008172 struct i40e_pf *pf = vsi->back;
8173
8174 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8175 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8176 else
8177 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
Helin Zhang043dd652015-10-21 19:56:23 -04008178}
8179
8180/**
Helin Zhange69ff812015-10-21 19:56:22 -04008181 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8182 * @pf: Pointer to board private structure
8183 * @lut: Lookup table
8184 * @rss_table_size: Lookup table size
8185 * @rss_size: Range of queue number for hashing
8186 */
Alan Bradyf1582352016-08-24 11:33:46 -07008187void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8188 u16 rss_table_size, u16 rss_size)
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008189{
Helin Zhange69ff812015-10-21 19:56:22 -04008190 u16 i;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008191
Helin Zhange69ff812015-10-21 19:56:22 -04008192 for (i = 0; i < rss_table_size; i++)
8193 lut[i] = i % rss_size;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008194}
8195
8196/**
Helin Zhang043dd652015-10-21 19:56:23 -04008197 * i40e_pf_config_rss - Prepare for RSS if used
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008198 * @pf: board private structure
8199 **/
Helin Zhang043dd652015-10-21 19:56:23 -04008200static int i40e_pf_config_rss(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008201{
Anjali Singhai Jain66ddcff2015-02-24 06:58:50 +00008202 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008203 u8 seed[I40E_HKEY_ARRAY_SIZE];
Helin Zhange69ff812015-10-21 19:56:22 -04008204 u8 *lut;
Anjali Singhai Jain4617e8c2013-11-20 10:02:56 +00008205 struct i40e_hw *hw = &pf->hw;
Carolyn Wybornye157ea32014-06-03 23:50:22 +00008206 u32 reg_val;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008207 u64 hena;
Helin Zhange69ff812015-10-21 19:56:22 -04008208 int ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008209
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008210 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
Shannon Nelson272cdaf22016-02-17 16:12:21 -08008211 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8212 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008213 hena |= i40e_pf_get_default_rss_hena(pf);
8214
Shannon Nelson272cdaf22016-02-17 16:12:21 -08008215 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8216 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008217
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008218 /* Determine the RSS table size based on the hardware capabilities */
Shannon Nelson272cdaf22016-02-17 16:12:21 -08008219 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008220 reg_val = (pf->rss_table_size == 512) ?
8221 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8222 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
Shannon Nelson272cdaf22016-02-17 16:12:21 -08008223 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
Carolyn Wybornye157ea32014-06-03 23:50:22 +00008224
Helin Zhang28c58692015-10-26 19:44:27 -04008225 /* Determine the RSS size of the VSI */
8226 if (!vsi->rss_size)
Helin Zhangacd65442015-10-26 19:44:28 -04008227 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8228 vsi->num_queue_pairs);
Mitch Williamsa4fa59c2016-09-12 14:18:43 -07008229 if (!vsi->rss_size)
8230 return -EINVAL;
Helin Zhang28c58692015-10-26 19:44:27 -04008231
Helin Zhange69ff812015-10-21 19:56:22 -04008232 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8233 if (!lut)
8234 return -ENOMEM;
8235
Helin Zhang28c58692015-10-26 19:44:27 -04008236 /* Use user configured lut if there is one, otherwise use default */
8237 if (vsi->rss_lut_user)
8238 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8239 else
8240 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
Helin Zhange69ff812015-10-21 19:56:22 -04008241
Helin Zhang28c58692015-10-26 19:44:27 -04008242 /* Use user configured hash key if there is one, otherwise
8243 * use default.
8244 */
8245 if (vsi->rss_hkey_user)
8246 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8247 else
8248 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
Helin Zhang043dd652015-10-21 19:56:23 -04008249 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
Helin Zhange69ff812015-10-21 19:56:22 -04008250 kfree(lut);
8251
8252 return ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008253}
8254
8255/**
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008256 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8257 * @pf: board private structure
8258 * @queue_count: the requested queue count for rss.
8259 *
8260 * returns 0 if rss is not enabled, if enabled returns the final rss queue
8261 * count which may be different from the requested queue count.
8262 **/
8263int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8264{
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +00008265 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8266 int new_rss_size;
8267
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008268 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8269 return 0;
8270
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +00008271 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008272
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +00008273 if (queue_count != vsi->num_queue_pairs) {
8274 vsi->req_queue_pairs = queue_count;
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008275 i40e_prep_for_reset(pf);
8276
Helin Zhangacd65442015-10-26 19:44:28 -04008277 pf->alloc_rss_size = new_rss_size;
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008278
8279 i40e_reset_and_rebuild(pf, true);
Helin Zhang28c58692015-10-26 19:44:27 -04008280
8281 /* Discard the user configured hash keys and lut, if less
8282 * queues are enabled.
8283 */
8284 if (queue_count < vsi->rss_size) {
8285 i40e_clear_rss_config_user(vsi);
8286 dev_dbg(&pf->pdev->dev,
8287 "discard user configured hash keys and lut\n");
8288 }
8289
8290 /* Reset vsi->rss_size, as number of enabled queues changed */
Helin Zhangacd65442015-10-26 19:44:28 -04008291 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8292 vsi->num_queue_pairs);
Helin Zhang28c58692015-10-26 19:44:27 -04008293
Helin Zhang043dd652015-10-21 19:56:23 -04008294 i40e_pf_config_rss(pf);
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008295 }
Lihong Yang12815052016-09-27 11:28:48 -07008296 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
8297 vsi->req_queue_pairs, pf->rss_size_max);
Helin Zhangacd65442015-10-26 19:44:28 -04008298 return pf->alloc_rss_size;
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008299}
8300
8301/**
Greg Rosef4492db2015-02-06 08:52:12 +00008302 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
8303 * @pf: board private structure
8304 **/
8305i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
8306{
8307 i40e_status status;
8308 bool min_valid, max_valid;
8309 u32 max_bw, min_bw;
8310
8311 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8312 &min_valid, &max_valid);
8313
8314 if (!status) {
8315 if (min_valid)
8316 pf->npar_min_bw = min_bw;
8317 if (max_valid)
8318 pf->npar_max_bw = max_bw;
8319 }
8320
8321 return status;
8322}
8323
8324/**
8325 * i40e_set_npar_bw_setting - Set BW settings for this PF partition
8326 * @pf: board private structure
8327 **/
8328i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
8329{
8330 struct i40e_aqc_configure_partition_bw_data bw_data;
8331 i40e_status status;
8332
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00008333 /* Set the valid bit for this PF */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04008334 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
Greg Rosef4492db2015-02-06 08:52:12 +00008335 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
8336 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
8337
8338 /* Set the new bandwidths */
8339 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8340
8341 return status;
8342}
8343
8344/**
8345 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
8346 * @pf: board private structure
8347 **/
8348i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
8349{
8350 /* Commit temporary BW setting to permanent NVM image */
8351 enum i40e_admin_queue_err last_aq_status;
8352 i40e_status ret;
8353 u16 nvm_word;
8354
8355 if (pf->hw.partition_id != 1) {
8356 dev_info(&pf->pdev->dev,
8357 "Commit BW only works on partition 1! This is partition %d",
8358 pf->hw.partition_id);
8359 ret = I40E_NOT_SUPPORTED;
8360 goto bw_commit_out;
8361 }
8362
8363 /* Acquire NVM for read access */
8364 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8365 last_aq_status = pf->hw.aq.asq_last_status;
8366 if (ret) {
8367 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04008368 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8369 i40e_stat_str(&pf->hw, ret),
8370 i40e_aq_str(&pf->hw, last_aq_status));
Greg Rosef4492db2015-02-06 08:52:12 +00008371 goto bw_commit_out;
8372 }
8373
8374 /* Read word 0x10 of NVM - SW compatibility word 1 */
8375 ret = i40e_aq_read_nvm(&pf->hw,
8376 I40E_SR_NVM_CONTROL_WORD,
8377 0x10, sizeof(nvm_word), &nvm_word,
8378 false, NULL);
8379 /* Save off last admin queue command status before releasing
8380 * the NVM
8381 */
8382 last_aq_status = pf->hw.aq.asq_last_status;
8383 i40e_release_nvm(&pf->hw);
8384 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04008385 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8386 i40e_stat_str(&pf->hw, ret),
8387 i40e_aq_str(&pf->hw, last_aq_status));
Greg Rosef4492db2015-02-06 08:52:12 +00008388 goto bw_commit_out;
8389 }
8390
8391 /* Wait a bit for NVM release to complete */
8392 msleep(50);
8393
8394 /* Acquire NVM for write access */
8395 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8396 last_aq_status = pf->hw.aq.asq_last_status;
8397 if (ret) {
8398 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04008399 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8400 i40e_stat_str(&pf->hw, ret),
8401 i40e_aq_str(&pf->hw, last_aq_status));
Greg Rosef4492db2015-02-06 08:52:12 +00008402 goto bw_commit_out;
8403 }
8404 /* Write it back out unchanged to initiate update NVM,
8405 * which will force a write of the shadow (alt) RAM to
8406 * the NVM - thus storing the bandwidth values permanently.
8407 */
8408 ret = i40e_aq_update_nvm(&pf->hw,
8409 I40E_SR_NVM_CONTROL_WORD,
8410 0x10, sizeof(nvm_word),
8411 &nvm_word, true, NULL);
8412 /* Save off last admin queue command status before releasing
8413 * the NVM
8414 */
8415 last_aq_status = pf->hw.aq.asq_last_status;
8416 i40e_release_nvm(&pf->hw);
8417 if (ret)
8418 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04008419 "BW settings NOT SAVED, err %s aq_err %s\n",
8420 i40e_stat_str(&pf->hw, ret),
8421 i40e_aq_str(&pf->hw, last_aq_status));
Greg Rosef4492db2015-02-06 08:52:12 +00008422bw_commit_out:
8423
8424 return ret;
8425}
8426
8427/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008428 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8429 * @pf: board private structure to initialize
8430 *
8431 * i40e_sw_init initializes the Adapter private data structure.
8432 * Fields are initialized based on PCI device information and
8433 * OS network device settings (MTU size).
8434 **/
8435static int i40e_sw_init(struct i40e_pf *pf)
8436{
8437 int err = 0;
8438 int size;
8439
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008440 /* Set default capability flags */
8441 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8442 I40E_FLAG_MSI_ENABLED |
Mitch Williams2bc7ee82015-02-06 08:52:11 +00008443 I40E_FLAG_MSIX_ENABLED;
8444
Mitch Williamsca99eb92014-04-04 04:43:07 +00008445 /* Set default ITR */
8446 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8447 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8448
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00008449 /* Depending on PF configurations, it is possible that the RSS
8450 * maximum might end up larger than the available queues
8451 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04008452 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
Helin Zhangacd65442015-10-26 19:44:28 -04008453 pf->alloc_rss_size = 1;
Anjali Singhai Jain5db4cb52015-02-24 06:58:49 +00008454 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00008455 pf->rss_size_max = min_t(int, pf->rss_size_max,
8456 pf->hw.func_caps.num_tx_qp);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008457 if (pf->hw.func_caps.rss) {
8458 pf->flags |= I40E_FLAG_RSS_ENABLED;
Helin Zhangacd65442015-10-26 19:44:28 -04008459 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8460 num_online_cpus());
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008461 }
8462
Catherine Sullivan2050bc62013-12-18 13:46:03 +00008463 /* MFP mode enabled */
Pawel Orlowskic78b9532015-04-22 19:34:06 -04008464 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
Catherine Sullivan2050bc62013-12-18 13:46:03 +00008465 pf->flags |= I40E_FLAG_MFP_ENABLED;
8466 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
Greg Rosef4492db2015-02-06 08:52:12 +00008467 if (i40e_get_npar_bw_setting(pf))
8468 dev_warn(&pf->pdev->dev,
8469 "Could not get NPAR bw settings\n");
8470 else
8471 dev_info(&pf->pdev->dev,
8472 "Min BW = %8.8x, Max BW = %8.8x\n",
8473 pf->npar_min_bw, pf->npar_max_bw);
Catherine Sullivan2050bc62013-12-18 13:46:03 +00008474 }
8475
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08008476 /* FW/NVM is not yet fixed in this regard */
8477 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8478 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8479 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8480 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
Shannon Nelson6eae9c62015-09-03 17:18:55 -04008481 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8482 pf->hw.num_partitions > 1)
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08008483 dev_info(&pf->pdev->dev,
Anjali Singhai Jain0b675842014-03-06 08:59:51 +00008484 "Flow Director Sideband mode Disabled in MFP mode\n");
Shannon Nelson6eae9c62015-09-03 17:18:55 -04008485 else
8486 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08008487 pf->fdir_pf_filter_count =
8488 pf->hw.func_caps.fd_filters_guaranteed;
8489 pf->hw.fdir_shared_filter_count =
8490 pf->hw.func_caps.fd_filters_best_effort;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008491 }
8492
Neerav Parikhf1bbad32016-01-13 16:51:39 -08008493 if (i40e_is_mac_710(&pf->hw) &&
Anjali Singhai Jain8eed76f2015-12-09 15:50:31 -08008494 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
Neerav Parikhf1bbad32016-01-13 16:51:39 -08008495 (pf->hw.aq.fw_maj_ver < 4))) {
Anjali Singhai Jain8eed76f2015-12-09 15:50:31 -08008496 pf->flags |= I40E_FLAG_RESTART_AUTONEG;
Neerav Parikhf1bbad32016-01-13 16:51:39 -08008497 /* No DCB support for FW < v4.33 */
8498 pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
8499 }
8500
8501 /* Disable FW LLDP if FW < v4.3 */
8502 if (i40e_is_mac_710(&pf->hw) &&
8503 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
8504 (pf->hw.aq.fw_maj_ver < 4)))
8505 pf->flags |= I40E_FLAG_STOP_FW_LLDP;
8506
8507 /* Use the FW Set LLDP MIB API if FW > v4.40 */
8508 if (i40e_is_mac_710(&pf->hw) &&
8509 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
8510 (pf->hw.aq.fw_maj_ver >= 5)))
8511 pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
Anjali Singhai Jain8eed76f2015-12-09 15:50:31 -08008512
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008513 if (pf->hw.func_caps.vmdq) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008514 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008515 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
Jesse Brandeburge9e53662015-10-02 17:57:21 -07008516 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008517 }
8518
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06008519 if (pf->hw.func_caps.iwarp) {
8520 pf->flags |= I40E_FLAG_IWARP_ENABLED;
8521 /* IWARP needs one extra vector for CQP just like MISC.*/
8522 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
8523 }
8524
Vasu Dev38e00432014-08-01 13:27:03 -07008525#ifdef I40E_FCOE
Shannon Nelson21364bc2015-08-26 15:14:13 -04008526 i40e_init_pf_fcoe(pf);
Vasu Dev38e00432014-08-01 13:27:03 -07008527
8528#endif /* I40E_FCOE */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008529#ifdef CONFIG_PCI_IOV
Shannon Nelsonba252f132014-12-11 07:06:34 +00008530 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008531 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
8532 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
8533 pf->num_req_vfs = min_t(int,
8534 pf->hw.func_caps.num_vfs,
8535 I40E_MAX_VF_COUNT);
8536 }
8537#endif /* CONFIG_PCI_IOV */
Anjali Singhai Jaind502ce02015-06-05 12:20:26 -04008538 if (pf->hw.mac.type == I40E_MAC_X722) {
8539 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
8540 I40E_FLAG_128_QP_RSS_CAPABLE |
8541 I40E_FLAG_HW_ATR_EVICT_CAPABLE |
8542 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8543 I40E_FLAG_WB_ON_ITR_CAPABLE |
Singhai, Anjali6a899022015-12-14 12:21:18 -08008544 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
Jesse Brandeburg8e2cc0e2016-02-29 11:00:24 -08008545 I40E_FLAG_NO_PCI_LINK_CHECK |
Neerav Parikhf1bbad32016-01-13 16:51:39 -08008546 I40E_FLAG_USE_SET_LLDP_MIB |
Singhai, Anjali6a899022015-12-14 12:21:18 -08008547 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
Anjali Singhaia340c782016-01-06 11:49:28 -08008548 } else if ((pf->hw.aq.api_maj_ver > 1) ||
8549 ((pf->hw.aq.api_maj_ver == 1) &&
8550 (pf->hw.aq.api_min_ver > 4))) {
8551 /* Supported in FW API version higher than 1.4 */
8552 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08008553 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8554 } else {
8555 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
Anjali Singhai Jaind502ce02015-06-05 12:20:26 -04008556 }
Anjali Singhaia340c782016-01-06 11:49:28 -08008557
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008558 pf->eeprom_version = 0xDEAD;
8559 pf->lan_veb = I40E_NO_VEB;
8560 pf->lan_vsi = I40E_NO_VSI;
8561
Anjali Singhai Jaind1a8d272015-07-23 16:54:40 -04008562 /* By default FW has this off for performance reasons */
8563 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
8564
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008565 /* set up queue assignment tracking */
8566 size = sizeof(struct i40e_lump_tracking)
8567 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
8568 pf->qp_pile = kzalloc(size, GFP_KERNEL);
8569 if (!pf->qp_pile) {
8570 err = -ENOMEM;
8571 goto sw_init_done;
8572 }
8573 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8574 pf->qp_pile->search_hint = 0;
8575
Anjali Singhai Jain327fe042014-06-04 01:23:26 +00008576 pf->tx_timeout_recovery_level = 1;
8577
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008578 mutex_init(&pf->switch_mutex);
8579
Greg Rosec668a122015-02-26 16:10:39 +00008580 /* If NPAR is enabled nudge the Tx scheduler */
8581 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8582 i40e_set_npar_bw_setting(pf);
8583
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008584sw_init_done:
8585 return err;
8586}
8587
8588/**
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008589 * i40e_set_ntuple - set the ntuple feature flag and take action
8590 * @pf: board private structure to initialize
8591 * @features: the feature set that the stack is suggesting
8592 *
8593 * returns a bool to indicate if reset needs to happen
8594 **/
8595bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8596{
8597 bool need_reset = false;
8598
8599 /* Check if Flow Director n-tuple support was enabled or disabled. If
8600 * the state changed, we need to reset.
8601 */
8602 if (features & NETIF_F_NTUPLE) {
8603 /* Enable filters and mark for reset */
8604 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8605 need_reset = true;
Tushar Davea70e4072016-05-16 12:40:53 -07008606 /* enable FD_SB only if there is MSI-X vector */
8607 if (pf->num_fdsb_msix > 0)
8608 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008609 } else {
8610 /* turn off filters, mark for reset and clear SW filter list */
8611 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8612 need_reset = true;
8613 i40e_fdir_filter_exit(pf);
8614 }
8615 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain8a4f34f2014-06-04 08:45:20 +00008616 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00008617 /* reset fd counters */
8618 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
8619 pf->fdir_pf_active_filters = 0;
Anjali Singhai Jain8a4f34f2014-06-04 08:45:20 +00008620 /* if ATR was auto disabled it can be re-enabled. */
8621 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
Jacob Keller234dc4e2016-09-06 18:05:09 -07008622 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
Anjali Singhai Jain8a4f34f2014-06-04 08:45:20 +00008623 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
Jacob Keller234dc4e2016-09-06 18:05:09 -07008624 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8625 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8626 }
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008627 }
8628 return need_reset;
8629}
8630
8631/**
Alan Bradyd8ec9862016-07-27 12:02:38 -07008632 * i40e_clear_rss_lut - clear the rx hash lookup table
8633 * @vsi: the VSI being configured
8634 **/
8635static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
8636{
8637 struct i40e_pf *pf = vsi->back;
8638 struct i40e_hw *hw = &pf->hw;
8639 u16 vf_id = vsi->vf_id;
8640 u8 i;
8641
8642 if (vsi->type == I40E_VSI_MAIN) {
8643 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8644 wr32(hw, I40E_PFQF_HLUT(i), 0);
8645 } else if (vsi->type == I40E_VSI_SRIOV) {
8646 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8647 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
8648 } else {
8649 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8650 }
8651}
8652
8653/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008654 * i40e_set_features - set the netdev feature flags
8655 * @netdev: ptr to the netdev being adjusted
8656 * @features: the feature set that the stack is suggesting
8657 **/
8658static int i40e_set_features(struct net_device *netdev,
8659 netdev_features_t features)
8660{
8661 struct i40e_netdev_priv *np = netdev_priv(netdev);
8662 struct i40e_vsi *vsi = np->vsi;
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008663 struct i40e_pf *pf = vsi->back;
8664 bool need_reset;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008665
Alan Bradyd8ec9862016-07-27 12:02:38 -07008666 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
8667 i40e_pf_config_rss(pf);
8668 else if (!(features & NETIF_F_RXHASH) &&
8669 netdev->features & NETIF_F_RXHASH)
8670 i40e_clear_rss_lut(vsi);
8671
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008672 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8673 i40e_vlan_stripping_enable(vsi);
8674 else
8675 i40e_vlan_stripping_disable(vsi);
8676
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008677 need_reset = i40e_set_ntuple(pf, features);
8678
8679 if (need_reset)
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04008680 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008681
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008682 return 0;
8683}
8684
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008685/**
Singhai, Anjali6a899022015-12-14 12:21:18 -08008686 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008687 * @pf: board private structure
8688 * @port: The UDP port to look up
8689 *
8690 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8691 **/
Singhai, Anjali6a899022015-12-14 12:21:18 -08008692static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008693{
8694 u8 i;
8695
8696 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
Singhai, Anjali6a899022015-12-14 12:21:18 -08008697 if (pf->udp_ports[i].index == port)
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008698 return i;
8699 }
8700
8701 return i;
8702}
8703
8704/**
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008705 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008706 * @netdev: This physical port's netdev
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008707 * @ti: Tunnel endpoint information
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008708 **/
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008709static void i40e_udp_tunnel_add(struct net_device *netdev,
8710 struct udp_tunnel_info *ti)
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008711{
8712 struct i40e_netdev_priv *np = netdev_priv(netdev);
8713 struct i40e_vsi *vsi = np->vsi;
8714 struct i40e_pf *pf = vsi->back;
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008715 __be16 port = ti->port;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008716 u8 next_idx;
8717 u8 idx;
8718
Singhai, Anjali6a899022015-12-14 12:21:18 -08008719 idx = i40e_get_udp_port_idx(pf, port);
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008720
8721 /* Check if port already exists */
8722 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008723 netdev_info(netdev, "port %d already offloaded\n",
Shannon Nelsonc22c06c2015-03-31 00:45:04 -07008724 ntohs(port));
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008725 return;
8726 }
8727
8728 /* Now check if there is space to add the new port */
Singhai, Anjali6a899022015-12-14 12:21:18 -08008729 next_idx = i40e_get_udp_port_idx(pf, 0);
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008730
8731 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008732 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008733 ntohs(port));
8734 return;
8735 }
8736
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008737 switch (ti->type) {
8738 case UDP_TUNNEL_TYPE_VXLAN:
8739 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8740 break;
8741 case UDP_TUNNEL_TYPE_GENEVE:
8742 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8743 return;
8744 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
8745 break;
8746 default:
8747 return;
8748 }
8749
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008750 /* New port: add it and mark its index in the bitmap */
Singhai, Anjali6a899022015-12-14 12:21:18 -08008751 pf->udp_ports[next_idx].index = port;
Singhai, Anjali6a899022015-12-14 12:21:18 -08008752 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8753 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008754}
8755
8756/**
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008757 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008758 * @netdev: This physical port's netdev
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008759 * @ti: Tunnel endpoint information
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008760 **/
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008761static void i40e_udp_tunnel_del(struct net_device *netdev,
8762 struct udp_tunnel_info *ti)
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008763{
8764 struct i40e_netdev_priv *np = netdev_priv(netdev);
8765 struct i40e_vsi *vsi = np->vsi;
8766 struct i40e_pf *pf = vsi->back;
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008767 __be16 port = ti->port;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008768 u8 idx;
8769
Singhai, Anjali6a899022015-12-14 12:21:18 -08008770 idx = i40e_get_udp_port_idx(pf, port);
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008771
8772 /* Check if port already exists */
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008773 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
8774 goto not_found;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008775
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008776 switch (ti->type) {
8777 case UDP_TUNNEL_TYPE_VXLAN:
8778 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
8779 goto not_found;
8780 break;
8781 case UDP_TUNNEL_TYPE_GENEVE:
8782 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
8783 goto not_found;
8784 break;
8785 default:
8786 goto not_found;
Singhai, Anjali6a899022015-12-14 12:21:18 -08008787 }
8788
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008789 /* if port exists, set it to 0 (mark for deletion)
8790 * and make it pending
8791 */
8792 pf->udp_ports[idx].index = 0;
8793 pf->pending_udp_bitmap |= BIT_ULL(idx);
Singhai, Anjali6a899022015-12-14 12:21:18 -08008794 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8795
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008796 return;
8797not_found:
8798 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
8799 ntohs(port));
Singhai, Anjali6a899022015-12-14 12:21:18 -08008800}
8801
Neerav Parikh1f224ad2014-02-12 01:45:31 +00008802static int i40e_get_phys_port_id(struct net_device *netdev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01008803 struct netdev_phys_item_id *ppid)
Neerav Parikh1f224ad2014-02-12 01:45:31 +00008804{
8805 struct i40e_netdev_priv *np = netdev_priv(netdev);
8806 struct i40e_pf *pf = np->vsi->back;
8807 struct i40e_hw *hw = &pf->hw;
8808
8809 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8810 return -EOPNOTSUPP;
8811
8812 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8813 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8814
8815 return 0;
8816}
8817
Jesse Brandeburg2f90ade2014-11-20 16:30:02 -08008818/**
8819 * i40e_ndo_fdb_add - add an entry to the hardware database
8820 * @ndm: the input from the stack
8821 * @tb: pointer to array of nladdr (unused)
8822 * @dev: the net device pointer
8823 * @addr: the MAC address entry being added
8824 * @flags: instructions from stack about fdb operation
8825 */
Greg Rose4ba0dea2014-03-06 08:59:55 +00008826static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8827 struct net_device *dev,
Jiri Pirkof6f64242014-11-28 14:34:15 +01008828 const unsigned char *addr, u16 vid,
Greg Rose4ba0dea2014-03-06 08:59:55 +00008829 u16 flags)
Greg Rose4ba0dea2014-03-06 08:59:55 +00008830{
8831 struct i40e_netdev_priv *np = netdev_priv(dev);
8832 struct i40e_pf *pf = np->vsi->back;
8833 int err = 0;
8834
8835 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8836 return -EOPNOTSUPP;
8837
Or Gerlitz65891fe2014-12-14 18:19:05 +02008838 if (vid) {
8839 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8840 return -EINVAL;
8841 }
8842
Greg Rose4ba0dea2014-03-06 08:59:55 +00008843 /* Hardware does not support aging addresses so if a
8844 * ndm_state is given only allow permanent addresses
8845 */
8846 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8847 netdev_info(dev, "FDB only supports static addresses\n");
8848 return -EINVAL;
8849 }
8850
8851 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8852 err = dev_uc_add_excl(dev, addr);
8853 else if (is_multicast_ether_addr(addr))
8854 err = dev_mc_add_excl(dev, addr);
8855 else
8856 err = -EINVAL;
8857
8858 /* Only return duplicate errors if NLM_F_EXCL is set */
8859 if (err == -EEXIST && !(flags & NLM_F_EXCL))
8860 err = 0;
8861
8862 return err;
8863}
8864
Neerav Parikh51616012015-02-06 08:52:14 +00008865/**
8866 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8867 * @dev: the netdev being configured
8868 * @nlh: RTNL message
8869 *
8870 * Inserts a new hardware bridge if not already created and
8871 * enables the bridging mode requested (VEB or VEPA). If the
8872 * hardware bridge has already been inserted and the request
8873 * is to change the mode then that requires a PF reset to
8874 * allow rebuild of the components with required hardware
8875 * bridge mode enabled.
8876 **/
8877static int i40e_ndo_bridge_setlink(struct net_device *dev,
Carolyn Wyborny9df70b62015-04-27 14:57:11 -04008878 struct nlmsghdr *nlh,
8879 u16 flags)
Neerav Parikh51616012015-02-06 08:52:14 +00008880{
8881 struct i40e_netdev_priv *np = netdev_priv(dev);
8882 struct i40e_vsi *vsi = np->vsi;
8883 struct i40e_pf *pf = vsi->back;
8884 struct i40e_veb *veb = NULL;
8885 struct nlattr *attr, *br_spec;
8886 int i, rem;
8887
8888 /* Only for PF VSI for now */
8889 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8890 return -EOPNOTSUPP;
8891
8892 /* Find the HW bridge for PF VSI */
8893 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8894 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8895 veb = pf->veb[i];
8896 }
8897
8898 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8899
8900 nla_for_each_nested(attr, br_spec, rem) {
8901 __u16 mode;
8902
8903 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8904 continue;
8905
8906 mode = nla_get_u16(attr);
8907 if ((mode != BRIDGE_MODE_VEPA) &&
8908 (mode != BRIDGE_MODE_VEB))
8909 return -EINVAL;
8910
8911 /* Insert a new HW bridge */
8912 if (!veb) {
8913 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8914 vsi->tc_config.enabled_tc);
8915 if (veb) {
8916 veb->bridge_mode = mode;
8917 i40e_config_bridge_mode(veb);
8918 } else {
8919 /* No Bridge HW offload available */
8920 return -ENOENT;
8921 }
8922 break;
8923 } else if (mode != veb->bridge_mode) {
8924 /* Existing HW bridge but different mode needs reset */
8925 veb->bridge_mode = mode;
Anjali Singhai Jainfc608612015-05-08 15:35:57 -07008926 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8927 if (mode == BRIDGE_MODE_VEB)
8928 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8929 else
8930 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8931 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
Neerav Parikh51616012015-02-06 08:52:14 +00008932 break;
8933 }
8934 }
8935
8936 return 0;
8937}
8938
8939/**
8940 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8941 * @skb: skb buff
8942 * @pid: process id
8943 * @seq: RTNL message seq #
8944 * @dev: the netdev being configured
8945 * @filter_mask: unused
Jesse Brandeburgd4b2f9f2015-09-03 17:18:48 -04008946 * @nlflags: netlink flags passed in
Neerav Parikh51616012015-02-06 08:52:14 +00008947 *
8948 * Return the mode in which the hardware bridge is operating in
8949 * i.e VEB or VEPA.
8950 **/
Neerav Parikh51616012015-02-06 08:52:14 +00008951static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8952 struct net_device *dev,
Carolyn Wyborny9f4ffc42015-08-31 19:54:42 -04008953 u32 __always_unused filter_mask,
8954 int nlflags)
Neerav Parikh51616012015-02-06 08:52:14 +00008955{
8956 struct i40e_netdev_priv *np = netdev_priv(dev);
8957 struct i40e_vsi *vsi = np->vsi;
8958 struct i40e_pf *pf = vsi->back;
8959 struct i40e_veb *veb = NULL;
8960 int i;
8961
8962 /* Only for PF VSI for now */
8963 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8964 return -EOPNOTSUPP;
8965
8966 /* Find the HW bridge for the PF VSI */
8967 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8968 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8969 veb = pf->veb[i];
8970 }
8971
8972 if (!veb)
8973 return 0;
8974
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02008975 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
Huaibin Wang599b0762016-09-26 09:51:18 +02008976 0, 0, nlflags, filter_mask, NULL);
Neerav Parikh51616012015-02-06 08:52:14 +00008977}
Neerav Parikh51616012015-02-06 08:52:14 +00008978
Singhai, Anjali6a899022015-12-14 12:21:18 -08008979/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
8980 * inner mac plus all inner ethertypes.
8981 */
8982#define I40E_MAX_TUNNEL_HDR_LEN 128
Joe Stringerf44a75e2015-04-14 17:09:14 -07008983/**
8984 * i40e_features_check - Validate encapsulated packet conforms to limits
8985 * @skb: skb buff
Jean Sacren2bc11c62015-09-19 05:08:43 -06008986 * @dev: This physical port's netdev
Joe Stringerf44a75e2015-04-14 17:09:14 -07008987 * @features: Offload features that the stack believes apply
8988 **/
8989static netdev_features_t i40e_features_check(struct sk_buff *skb,
8990 struct net_device *dev,
8991 netdev_features_t features)
8992{
8993 if (skb->encapsulation &&
Singhai, Anjali6a899022015-12-14 12:21:18 -08008994 ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
Joe Stringerf44a75e2015-04-14 17:09:14 -07008995 I40E_MAX_TUNNEL_HDR_LEN))
Tom Herberta1882222015-12-14 11:19:43 -08008996 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Joe Stringerf44a75e2015-04-14 17:09:14 -07008997
8998 return features;
8999}
9000
Shannon Nelson37a29732015-02-27 09:15:19 +00009001static const struct net_device_ops i40e_netdev_ops = {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009002 .ndo_open = i40e_open,
9003 .ndo_stop = i40e_close,
9004 .ndo_start_xmit = i40e_lan_xmit_frame,
9005 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
9006 .ndo_set_rx_mode = i40e_set_rx_mode,
9007 .ndo_validate_addr = eth_validate_addr,
9008 .ndo_set_mac_address = i40e_set_mac,
9009 .ndo_change_mtu = i40e_change_mtu,
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00009010 .ndo_do_ioctl = i40e_ioctl,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009011 .ndo_tx_timeout = i40e_tx_timeout,
9012 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
9013 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
9014#ifdef CONFIG_NET_POLL_CONTROLLER
9015 .ndo_poll_controller = i40e_netpoll,
9016#endif
John Fastabende4c67342016-02-16 21:16:15 -08009017 .ndo_setup_tc = __i40e_setup_tc,
Vasu Dev38e00432014-08-01 13:27:03 -07009018#ifdef I40E_FCOE
9019 .ndo_fcoe_enable = i40e_fcoe_enable,
9020 .ndo_fcoe_disable = i40e_fcoe_disable,
9021#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009022 .ndo_set_features = i40e_set_features,
9023 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
9024 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04009025 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009026 .ndo_get_vf_config = i40e_ndo_get_vf_config,
Mitch Williams588aefa2014-02-11 08:27:49 +00009027 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
Serey Konge6d90042014-07-12 07:28:14 +00009028 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
Anjali Singhai Jainc3bbbd22016-04-01 03:56:07 -07009029 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07009030 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
9031 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
Neerav Parikh1f224ad2014-02-12 01:45:31 +00009032 .ndo_get_phys_port_id = i40e_get_phys_port_id,
Greg Rose4ba0dea2014-03-06 08:59:55 +00009033 .ndo_fdb_add = i40e_ndo_fdb_add,
Joe Stringerf44a75e2015-04-14 17:09:14 -07009034 .ndo_features_check = i40e_features_check,
Neerav Parikh51616012015-02-06 08:52:14 +00009035 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
9036 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009037};
9038
9039/**
9040 * i40e_config_netdev - Setup the netdev flags
9041 * @vsi: the VSI being configured
9042 *
9043 * Returns 0 on success, negative value on failure
9044 **/
9045static int i40e_config_netdev(struct i40e_vsi *vsi)
9046{
9047 struct i40e_pf *pf = vsi->back;
9048 struct i40e_hw *hw = &pf->hw;
9049 struct i40e_netdev_priv *np;
9050 struct net_device *netdev;
9051 u8 mac_addr[ETH_ALEN];
9052 int etherdev_size;
9053
9054 etherdev_size = sizeof(struct i40e_netdev_priv);
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00009055 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009056 if (!netdev)
9057 return -ENOMEM;
9058
9059 vsi->netdev = netdev;
9060 np = netdev_priv(netdev);
9061 np->vsi = vsi;
9062
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009063 netdev->hw_enc_features |= NETIF_F_SG |
9064 NETIF_F_IP_CSUM |
9065 NETIF_F_IPV6_CSUM |
9066 NETIF_F_HIGHDMA |
9067 NETIF_F_SOFT_FEATURES |
9068 NETIF_F_TSO |
9069 NETIF_F_TSO_ECN |
9070 NETIF_F_TSO6 |
9071 NETIF_F_GSO_GRE |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04009072 NETIF_F_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07009073 NETIF_F_GSO_IPXIP4 |
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07009074 NETIF_F_GSO_IPXIP6 |
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009075 NETIF_F_GSO_UDP_TUNNEL |
9076 NETIF_F_GSO_UDP_TUNNEL_CSUM |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04009077 NETIF_F_GSO_PARTIAL |
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009078 NETIF_F_SCTP_CRC |
9079 NETIF_F_RXHASH |
9080 NETIF_F_RXCSUM |
Jesse Brandeburg5afdaaa2015-12-10 11:38:50 -08009081 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009082
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009083 if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04009084 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9085
9086 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009087
9088 /* record features VLANs can make use of */
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04009089 netdev->vlan_features |= netdev->hw_enc_features |
9090 NETIF_F_TSO_MANGLEID;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009091
Anjali Singhai Jain2e86a0b2014-04-01 07:11:53 +00009092 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009093 netdev->hw_features |= NETIF_F_NTUPLE;
Anjali Singhai Jain2e86a0b2014-04-01 07:11:53 +00009094
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009095 netdev->hw_features |= netdev->hw_enc_features |
9096 NETIF_F_HW_VLAN_CTAG_TX |
9097 NETIF_F_HW_VLAN_CTAG_RX;
9098
9099 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04009100 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009101
9102 if (vsi->type == I40E_VSI_MAIN) {
9103 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
Greg Rose9a173902014-05-22 06:32:02 +00009104 ether_addr_copy(mac_addr, hw->mac.perm_addr);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07009105 spin_lock_bh(&vsi->mac_filter_list_lock);
Jacob Keller1bc87e82016-10-05 09:30:31 -07009106 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07009107 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009108 } else {
9109 /* relate the VSI_VMDQ name to the VSI_MAIN name */
9110 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
9111 pf->vsi[pf->lan_vsi]->netdev->name);
9112 random_ether_addr(mac_addr);
Kiran Patil21659032015-09-30 14:09:03 -04009113
9114 spin_lock_bh(&vsi->mac_filter_list_lock);
Jacob Keller1bc87e82016-10-05 09:30:31 -07009115 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
Kiran Patil21659032015-09-30 14:09:03 -04009116 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009117 }
Kiran Patil21659032015-09-30 14:09:03 -04009118
Greg Rose9a173902014-05-22 06:32:02 +00009119 ether_addr_copy(netdev->dev_addr, mac_addr);
9120 ether_addr_copy(netdev->perm_addr, mac_addr);
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009121
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009122 netdev->priv_flags |= IFF_UNICAST_FLT;
9123 netdev->priv_flags |= IFF_SUPP_NOFCS;
9124 /* Setup netdev TC information */
9125 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9126
9127 netdev->netdev_ops = &i40e_netdev_ops;
9128 netdev->watchdog_timeo = 5 * HZ;
9129 i40e_set_ethtool_ops(netdev);
Vasu Dev38e00432014-08-01 13:27:03 -07009130#ifdef I40E_FCOE
9131 i40e_fcoe_config_netdev(netdev, vsi);
9132#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009133
Jarod Wilson91c527a2016-10-17 15:54:05 -04009134 /* MTU range: 68 - 9706 */
9135 netdev->min_mtu = ETH_MIN_MTU;
9136 netdev->max_mtu = I40E_MAX_RXBUFFER -
9137 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9138
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009139 return 0;
9140}
9141
9142/**
9143 * i40e_vsi_delete - Delete a VSI from the switch
9144 * @vsi: the VSI being removed
9145 *
9146 * Returns 0 on success, negative value on failure
9147 **/
9148static void i40e_vsi_delete(struct i40e_vsi *vsi)
9149{
9150 /* remove default VSI is not allowed */
9151 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9152 return;
9153
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009154 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009155}
9156
9157/**
Neerav Parikh51616012015-02-06 08:52:14 +00009158 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9159 * @vsi: the VSI being queried
9160 *
9161 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9162 **/
9163int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9164{
9165 struct i40e_veb *veb;
9166 struct i40e_pf *pf = vsi->back;
9167
9168 /* Uplink is not a bridge so default to VEB */
9169 if (vsi->veb_idx == I40E_NO_VEB)
9170 return 1;
9171
9172 veb = pf->veb[vsi->veb_idx];
Akeem G Abodunrin09603ea2015-10-01 14:37:36 -04009173 if (!veb) {
9174 dev_info(&pf->pdev->dev,
9175 "There is no veb associated with the bridge\n");
9176 return -ENOENT;
9177 }
Neerav Parikh51616012015-02-06 08:52:14 +00009178
Akeem G Abodunrin09603ea2015-10-01 14:37:36 -04009179 /* Uplink is a bridge in VEPA mode */
9180 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9181 return 0;
9182 } else {
9183 /* Uplink is a bridge in VEB mode */
9184 return 1;
9185 }
9186
9187 /* VEPA is now default bridge, so return 0 */
9188 return 0;
Neerav Parikh51616012015-02-06 08:52:14 +00009189}
9190
9191/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009192 * i40e_add_vsi - Add a VSI to the switch
9193 * @vsi: the VSI being configured
9194 *
9195 * This initializes a VSI context depending on the VSI type to be added and
9196 * passes it down to the add_vsi aq command.
9197 **/
9198static int i40e_add_vsi(struct i40e_vsi *vsi)
9199{
9200 int ret = -ENODEV;
Kiran Patilf6bd0962016-06-20 09:10:34 -07009201 i40e_status aq_ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009202 struct i40e_pf *pf = vsi->back;
9203 struct i40e_hw *hw = &pf->hw;
9204 struct i40e_vsi_context ctxt;
Kiran Patil21659032015-09-30 14:09:03 -04009205 struct i40e_mac_filter *f, *ftmp;
9206
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009207 u8 enabled_tc = 0x1; /* TC0 enabled */
9208 int f_count = 0;
9209
9210 memset(&ctxt, 0, sizeof(ctxt));
9211 switch (vsi->type) {
9212 case I40E_VSI_MAIN:
9213 /* The PF's main VSI is already setup as part of the
9214 * device initialization, so we'll not bother with
9215 * the add_vsi call, but we will retrieve the current
9216 * VSI context.
9217 */
9218 ctxt.seid = pf->main_vsi_seid;
9219 ctxt.pf_num = pf->hw.pf_id;
9220 ctxt.vf_num = 0;
9221 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9222 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9223 if (ret) {
9224 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009225 "couldn't get PF vsi config, err %s aq_err %s\n",
9226 i40e_stat_str(&pf->hw, ret),
9227 i40e_aq_str(&pf->hw,
9228 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009229 return -ENOENT;
9230 }
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07009231 vsi->info = ctxt.info;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009232 vsi->info.valid_sections = 0;
9233
9234 vsi->seid = ctxt.seid;
9235 vsi->id = ctxt.vsi_number;
9236
9237 enabled_tc = i40e_pf_get_tc_map(pf);
9238
9239 /* MFP mode setup queue map and update VSI */
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00009240 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9241 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009242 memset(&ctxt, 0, sizeof(ctxt));
9243 ctxt.seid = pf->main_vsi_seid;
9244 ctxt.pf_num = pf->hw.pf_id;
9245 ctxt.vf_num = 0;
9246 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9247 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9248 if (ret) {
9249 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009250 "update vsi failed, err %s aq_err %s\n",
9251 i40e_stat_str(&pf->hw, ret),
9252 i40e_aq_str(&pf->hw,
9253 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009254 ret = -ENOENT;
9255 goto err;
9256 }
9257 /* update the local VSI info queue map */
9258 i40e_vsi_update_queue_map(vsi, &ctxt);
9259 vsi->info.valid_sections = 0;
9260 } else {
9261 /* Default/Main VSI is only enabled for TC0
9262 * reconfigure it to enable all TCs that are
9263 * available on the port in SFP mode.
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00009264 * For MFP case the iSCSI PF would use this
9265 * flow to enable LAN+iSCSI TC.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009266 */
9267 ret = i40e_vsi_config_tc(vsi, enabled_tc);
9268 if (ret) {
9269 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009270 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9271 enabled_tc,
9272 i40e_stat_str(&pf->hw, ret),
9273 i40e_aq_str(&pf->hw,
9274 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009275 ret = -ENOENT;
9276 }
9277 }
9278 break;
9279
9280 case I40E_VSI_FDIR:
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08009281 ctxt.pf_num = hw->pf_id;
9282 ctxt.vf_num = 0;
9283 ctxt.uplink_seid = vsi->uplink_seid;
Neerav Parikh2b18e592015-01-24 09:58:38 +00009284 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08009285 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
Anjali Singhai Jainfc608612015-05-08 15:35:57 -07009286 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9287 (i40e_is_vsi_uplink_mode_veb(vsi))) {
Neerav Parikh51616012015-02-06 08:52:14 +00009288 ctxt.info.valid_sections |=
Anjali Singhai Jainfc608612015-05-08 15:35:57 -07009289 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
Neerav Parikh51616012015-02-06 08:52:14 +00009290 ctxt.info.switch_id =
Anjali Singhai Jainfc608612015-05-08 15:35:57 -07009291 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
Neerav Parikh51616012015-02-06 08:52:14 +00009292 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009293 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009294 break;
9295
9296 case I40E_VSI_VMDQ2:
9297 ctxt.pf_num = hw->pf_id;
9298 ctxt.vf_num = 0;
9299 ctxt.uplink_seid = vsi->uplink_seid;
Neerav Parikh2b18e592015-01-24 09:58:38 +00009300 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009301 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9302
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009303 /* This VSI is connected to VEB so the switch_id
9304 * should be set to zero by default.
9305 */
Neerav Parikh51616012015-02-06 08:52:14 +00009306 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9307 ctxt.info.valid_sections |=
9308 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9309 ctxt.info.switch_id =
9310 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9311 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009312
9313 /* Setup the VSI tx/rx queue map for TC0 only for now */
9314 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9315 break;
9316
9317 case I40E_VSI_SRIOV:
9318 ctxt.pf_num = hw->pf_id;
9319 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9320 ctxt.uplink_seid = vsi->uplink_seid;
Neerav Parikh2b18e592015-01-24 09:58:38 +00009321 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009322 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9323
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009324 /* This VSI is connected to VEB so the switch_id
9325 * should be set to zero by default.
9326 */
Neerav Parikh51616012015-02-06 08:52:14 +00009327 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9328 ctxt.info.valid_sections |=
9329 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9330 ctxt.info.switch_id =
9331 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9332 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009333
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06009334 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9335 ctxt.info.valid_sections |=
9336 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9337 ctxt.info.queueing_opt_flags |=
Ashish Shah4b28cdb2016-05-03 15:13:17 -07009338 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
9339 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06009340 }
9341
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009342 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9343 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
Mitch Williamsc674d122014-05-20 08:01:40 +00009344 if (pf->vf[vsi->vf_id].spoofchk) {
9345 ctxt.info.valid_sections |=
9346 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9347 ctxt.info.sec_flags |=
9348 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9349 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9350 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009351 /* Setup the VSI tx/rx queue map for TC0 only for now */
9352 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9353 break;
9354
Vasu Dev38e00432014-08-01 13:27:03 -07009355#ifdef I40E_FCOE
9356 case I40E_VSI_FCOE:
9357 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
9358 if (ret) {
9359 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
9360 return ret;
9361 }
9362 break;
9363
9364#endif /* I40E_FCOE */
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06009365 case I40E_VSI_IWARP:
9366 /* send down message to iWARP */
9367 break;
9368
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009369 default:
9370 return -ENODEV;
9371 }
9372
9373 if (vsi->type != I40E_VSI_MAIN) {
9374 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9375 if (ret) {
9376 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009377 "add vsi failed, err %s aq_err %s\n",
9378 i40e_stat_str(&pf->hw, ret),
9379 i40e_aq_str(&pf->hw,
9380 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009381 ret = -ENOENT;
9382 goto err;
9383 }
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07009384 vsi->info = ctxt.info;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009385 vsi->info.valid_sections = 0;
9386 vsi->seid = ctxt.seid;
9387 vsi->id = ctxt.vsi_number;
9388 }
Kiran Patilf6bd0962016-06-20 09:10:34 -07009389 /* Except FDIR VSI, for all othet VSI set the broadcast filter */
9390 if (vsi->type != I40E_VSI_FDIR) {
9391 aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
9392 if (aq_ret) {
9393 ret = i40e_aq_rc_to_posix(aq_ret,
9394 hw->aq.asq_last_status);
9395 dev_info(&pf->pdev->dev,
9396 "set brdcast promisc failed, err %s, aq_err %s\n",
9397 i40e_stat_str(hw, aq_ret),
9398 i40e_aq_str(hw, hw->aq.asq_last_status));
9399 }
9400 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009401
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07009402 vsi->active_filters = 0;
9403 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
Kiran Patil21659032015-09-30 14:09:03 -04009404 spin_lock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009405 /* If macvlan filters already exist, force them to get loaded */
9406 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07009407 f->state = I40E_FILTER_NEW;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009408 f_count++;
9409 }
Kiran Patil21659032015-09-30 14:09:03 -04009410 spin_unlock_bh(&vsi->mac_filter_list_lock);
9411
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009412 if (f_count) {
9413 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
9414 pf->flags |= I40E_FLAG_FILTER_SYNC;
9415 }
9416
9417 /* Update VSI BW information */
9418 ret = i40e_vsi_get_bw_info(vsi);
9419 if (ret) {
9420 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009421 "couldn't get vsi bw info, err %s aq_err %s\n",
9422 i40e_stat_str(&pf->hw, ret),
9423 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009424 /* VSI is already added so not tearing that up */
9425 ret = 0;
9426 }
9427
9428err:
9429 return ret;
9430}
9431
9432/**
9433 * i40e_vsi_release - Delete a VSI and free its resources
9434 * @vsi: the VSI being removed
9435 *
9436 * Returns 0 on success or < 0 on error
9437 **/
9438int i40e_vsi_release(struct i40e_vsi *vsi)
9439{
9440 struct i40e_mac_filter *f, *ftmp;
9441 struct i40e_veb *veb = NULL;
9442 struct i40e_pf *pf;
9443 u16 uplink_seid;
9444 int i, n;
9445
9446 pf = vsi->back;
9447
9448 /* release of a VEB-owner or last VSI is not allowed */
9449 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
9450 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
9451 vsi->seid, vsi->uplink_seid);
9452 return -ENODEV;
9453 }
9454 if (vsi == pf->vsi[pf->lan_vsi] &&
9455 !test_bit(__I40E_DOWN, &pf->state)) {
9456 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9457 return -ENODEV;
9458 }
9459
9460 uplink_seid = vsi->uplink_seid;
9461 if (vsi->type != I40E_VSI_SRIOV) {
9462 if (vsi->netdev_registered) {
9463 vsi->netdev_registered = false;
9464 if (vsi->netdev) {
9465 /* results in a call to i40e_close() */
9466 unregister_netdev(vsi->netdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009467 }
9468 } else {
Shannon Nelson90ef8d42014-03-14 07:32:26 +00009469 i40e_vsi_close(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009470 }
9471 i40e_vsi_disable_irq(vsi);
9472 }
9473
Kiran Patil21659032015-09-30 14:09:03 -04009474 spin_lock_bh(&vsi->mac_filter_list_lock);
Jacob Keller6622f5c2016-10-05 09:30:32 -07009475
9476 /* clear the sync flag on all filters */
9477 if (vsi->netdev) {
9478 __dev_uc_unsync(vsi->netdev, NULL);
9479 __dev_mc_unsync(vsi->netdev, NULL);
9480 }
9481
9482 /* make sure any remaining filters are marked for deletion */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009483 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
Jacob Keller290d2552016-10-05 09:30:36 -07009484 __i40e_del_filter(vsi, f);
Jacob Keller6622f5c2016-10-05 09:30:32 -07009485
Kiran Patil21659032015-09-30 14:09:03 -04009486 spin_unlock_bh(&vsi->mac_filter_list_lock);
9487
Jesse Brandeburg17652c62015-11-05 17:01:02 -08009488 i40e_sync_vsi_filters(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009489
9490 i40e_vsi_delete(vsi);
9491 i40e_vsi_free_q_vectors(vsi);
Shannon Nelsona4866592014-02-11 08:24:07 +00009492 if (vsi->netdev) {
9493 free_netdev(vsi->netdev);
9494 vsi->netdev = NULL;
9495 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009496 i40e_vsi_clear_rings(vsi);
9497 i40e_vsi_clear(vsi);
9498
9499 /* If this was the last thing on the VEB, except for the
9500 * controlling VSI, remove the VEB, which puts the controlling
9501 * VSI onto the next level down in the switch.
9502 *
9503 * Well, okay, there's one more exception here: don't remove
9504 * the orphan VEBs yet. We'll wait for an explicit remove request
9505 * from up the network stack.
9506 */
Mitch Williams505682c2014-05-20 08:01:37 +00009507 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009508 if (pf->vsi[i] &&
9509 pf->vsi[i]->uplink_seid == uplink_seid &&
9510 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9511 n++; /* count the VSIs */
9512 }
9513 }
9514 for (i = 0; i < I40E_MAX_VEB; i++) {
9515 if (!pf->veb[i])
9516 continue;
9517 if (pf->veb[i]->uplink_seid == uplink_seid)
9518 n++; /* count the VEBs */
9519 if (pf->veb[i]->seid == uplink_seid)
9520 veb = pf->veb[i];
9521 }
9522 if (n == 0 && veb && veb->uplink_seid != 0)
9523 i40e_veb_release(veb);
9524
9525 return 0;
9526}
9527
9528/**
9529 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
9530 * @vsi: ptr to the VSI
9531 *
9532 * This should only be called after i40e_vsi_mem_alloc() which allocates the
9533 * corresponding SW VSI structure and initializes num_queue_pairs for the
9534 * newly allocated VSI.
9535 *
9536 * Returns 0 on success or negative on failure
9537 **/
9538static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
9539{
9540 int ret = -ENOENT;
9541 struct i40e_pf *pf = vsi->back;
9542
Alexander Duyck493fb302013-09-28 07:01:44 +00009543 if (vsi->q_vectors[0]) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009544 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
9545 vsi->seid);
9546 return -EEXIST;
9547 }
9548
9549 if (vsi->base_vector) {
Jesse Brandeburgf29eaa32014-02-11 08:24:12 +00009550 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009551 vsi->seid, vsi->base_vector);
9552 return -EEXIST;
9553 }
9554
Greg Rose90e04072014-03-06 08:59:57 +00009555 ret = i40e_vsi_alloc_q_vectors(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009556 if (ret) {
9557 dev_info(&pf->pdev->dev,
9558 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
9559 vsi->num_q_vectors, vsi->seid, ret);
9560 vsi->num_q_vectors = 0;
9561 goto vector_setup_out;
9562 }
9563
Anjali Singhai Jain26cdc442015-07-10 19:36:00 -04009564 /* In Legacy mode, we do not have to get any other vector since we
9565 * piggyback on the misc/ICR0 for queue interrupts.
9566 */
9567 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
9568 return ret;
Shannon Nelson958a3e32013-09-28 07:13:28 +00009569 if (vsi->num_q_vectors)
9570 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
9571 vsi->num_q_vectors, vsi->idx);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009572 if (vsi->base_vector < 0) {
9573 dev_info(&pf->pdev->dev,
Shannon Nelson049a2be2014-10-17 03:14:50 +00009574 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
9575 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009576 i40e_vsi_free_q_vectors(vsi);
9577 ret = -ENOENT;
9578 goto vector_setup_out;
9579 }
9580
9581vector_setup_out:
9582 return ret;
9583}
9584
9585/**
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00009586 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
9587 * @vsi: pointer to the vsi.
9588 *
9589 * This re-allocates a vsi's queue resources.
9590 *
9591 * Returns pointer to the successfully allocated and configured VSI sw struct
9592 * on success, otherwise returns NULL on failure.
9593 **/
9594static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
9595{
John Underwoodf5340392016-02-18 09:19:24 -08009596 struct i40e_pf *pf;
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00009597 u8 enabled_tc;
9598 int ret;
9599
John Underwoodf5340392016-02-18 09:19:24 -08009600 if (!vsi)
9601 return NULL;
9602
9603 pf = vsi->back;
9604
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00009605 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9606 i40e_vsi_clear_rings(vsi);
9607
9608 i40e_vsi_free_arrays(vsi, false);
9609 i40e_set_num_rings_in_vsi(vsi);
9610 ret = i40e_vsi_alloc_arrays(vsi, false);
9611 if (ret)
9612 goto err_vsi;
9613
9614 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
9615 if (ret < 0) {
Shannon Nelson049a2be2014-10-17 03:14:50 +00009616 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009617 "failed to get tracking for %d queues for VSI %d err %d\n",
Shannon Nelson049a2be2014-10-17 03:14:50 +00009618 vsi->alloc_queue_pairs, vsi->seid, ret);
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00009619 goto err_vsi;
9620 }
9621 vsi->base_queue = ret;
9622
9623 /* Update the FW view of the VSI. Force a reset of TC and queue
9624 * layout configurations.
9625 */
9626 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9627 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9628 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9629 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9630
9631 /* assign it some queues */
9632 ret = i40e_alloc_rings(vsi);
9633 if (ret)
9634 goto err_rings;
9635
9636 /* map all of the rings to the q_vectors */
9637 i40e_vsi_map_rings_to_vectors(vsi);
9638 return vsi;
9639
9640err_rings:
9641 i40e_vsi_free_q_vectors(vsi);
9642 if (vsi->netdev_registered) {
9643 vsi->netdev_registered = false;
9644 unregister_netdev(vsi->netdev);
9645 free_netdev(vsi->netdev);
9646 vsi->netdev = NULL;
9647 }
9648 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9649err_vsi:
9650 i40e_vsi_clear(vsi);
9651 return NULL;
9652}
9653
9654/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009655 * i40e_vsi_setup - Set up a VSI by a given type
9656 * @pf: board private structure
9657 * @type: VSI type
9658 * @uplink_seid: the switch element to link to
9659 * @param1: usage depends upon VSI type. For VF types, indicates VF id
9660 *
9661 * This allocates the sw VSI structure and its queue resources, then add a VSI
9662 * to the identified VEB.
9663 *
9664 * Returns pointer to the successfully allocated and configure VSI sw struct on
9665 * success, otherwise returns NULL on failure.
9666 **/
9667struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9668 u16 uplink_seid, u32 param1)
9669{
9670 struct i40e_vsi *vsi = NULL;
9671 struct i40e_veb *veb = NULL;
9672 int ret, i;
9673 int v_idx;
9674
9675 /* The requested uplink_seid must be either
9676 * - the PF's port seid
9677 * no VEB is needed because this is the PF
9678 * or this is a Flow Director special case VSI
9679 * - seid of an existing VEB
9680 * - seid of a VSI that owns an existing VEB
9681 * - seid of a VSI that doesn't own a VEB
9682 * a new VEB is created and the VSI becomes the owner
9683 * - seid of the PF VSI, which is what creates the first VEB
9684 * this is a special case of the previous
9685 *
9686 * Find which uplink_seid we were given and create a new VEB if needed
9687 */
9688 for (i = 0; i < I40E_MAX_VEB; i++) {
9689 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9690 veb = pf->veb[i];
9691 break;
9692 }
9693 }
9694
9695 if (!veb && uplink_seid != pf->mac_seid) {
9696
Mitch Williams505682c2014-05-20 08:01:37 +00009697 for (i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009698 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9699 vsi = pf->vsi[i];
9700 break;
9701 }
9702 }
9703 if (!vsi) {
9704 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9705 uplink_seid);
9706 return NULL;
9707 }
9708
9709 if (vsi->uplink_seid == pf->mac_seid)
9710 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9711 vsi->tc_config.enabled_tc);
9712 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9713 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9714 vsi->tc_config.enabled_tc);
Anjali Singhai Jain79c21a82014-11-13 03:06:14 +00009715 if (veb) {
9716 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9717 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04009718 "New VSI creation error, uplink seid of LAN VSI expected.\n");
Anjali Singhai Jain79c21a82014-11-13 03:06:14 +00009719 return NULL;
9720 }
Anjali Singhai Jainfa11cb32015-05-27 12:06:14 -04009721 /* We come up by default in VEPA mode if SRIOV is not
9722 * already enabled, in which case we can't force VEPA
9723 * mode.
9724 */
9725 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9726 veb->bridge_mode = BRIDGE_MODE_VEPA;
9727 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9728 }
Neerav Parikh51616012015-02-06 08:52:14 +00009729 i40e_config_bridge_mode(veb);
Anjali Singhai Jain79c21a82014-11-13 03:06:14 +00009730 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009731 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9732 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9733 veb = pf->veb[i];
9734 }
9735 if (!veb) {
9736 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9737 return NULL;
9738 }
9739
9740 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9741 uplink_seid = veb->seid;
9742 }
9743
9744 /* get vsi sw struct */
9745 v_idx = i40e_vsi_mem_alloc(pf, type);
9746 if (v_idx < 0)
9747 goto err_alloc;
9748 vsi = pf->vsi[v_idx];
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08009749 if (!vsi)
9750 goto err_alloc;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009751 vsi->type = type;
9752 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9753
9754 if (type == I40E_VSI_MAIN)
9755 pf->lan_vsi = v_idx;
9756 else if (type == I40E_VSI_SRIOV)
9757 vsi->vf_id = param1;
9758 /* assign it some queues */
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08009759 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9760 vsi->idx);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009761 if (ret < 0) {
Shannon Nelson049a2be2014-10-17 03:14:50 +00009762 dev_info(&pf->pdev->dev,
9763 "failed to get tracking for %d queues for VSI %d err=%d\n",
9764 vsi->alloc_queue_pairs, vsi->seid, ret);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009765 goto err_vsi;
9766 }
9767 vsi->base_queue = ret;
9768
9769 /* get a VSI from the hardware */
9770 vsi->uplink_seid = uplink_seid;
9771 ret = i40e_add_vsi(vsi);
9772 if (ret)
9773 goto err_vsi;
9774
9775 switch (vsi->type) {
9776 /* setup the netdev if needed */
9777 case I40E_VSI_MAIN:
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -05009778 /* Apply relevant filters if a platform-specific mac
9779 * address was selected.
9780 */
9781 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
9782 ret = i40e_macaddr_init(vsi, pf->hw.mac.addr);
9783 if (ret) {
9784 dev_warn(&pf->pdev->dev,
9785 "could not set up macaddr; err %d\n",
9786 ret);
9787 }
9788 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009789 case I40E_VSI_VMDQ2:
Vasu Dev38e00432014-08-01 13:27:03 -07009790 case I40E_VSI_FCOE:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009791 ret = i40e_config_netdev(vsi);
9792 if (ret)
9793 goto err_netdev;
9794 ret = register_netdev(vsi->netdev);
9795 if (ret)
9796 goto err_netdev;
9797 vsi->netdev_registered = true;
9798 netif_carrier_off(vsi->netdev);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08009799#ifdef CONFIG_I40E_DCB
9800 /* Setup DCB netlink interface */
9801 i40e_dcbnl_setup(vsi);
9802#endif /* CONFIG_I40E_DCB */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009803 /* fall through */
9804
9805 case I40E_VSI_FDIR:
9806 /* set up vectors and rings if needed */
9807 ret = i40e_vsi_setup_vectors(vsi);
9808 if (ret)
9809 goto err_msix;
9810
9811 ret = i40e_alloc_rings(vsi);
9812 if (ret)
9813 goto err_rings;
9814
9815 /* map all of the rings to the q_vectors */
9816 i40e_vsi_map_rings_to_vectors(vsi);
9817
9818 i40e_vsi_reset_stats(vsi);
9819 break;
9820
9821 default:
9822 /* no netdev or rings for the other VSI types */
9823 break;
9824 }
9825
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04009826 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9827 (vsi->type == I40E_VSI_VMDQ2)) {
9828 ret = i40e_vsi_config_rss(vsi);
9829 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009830 return vsi;
9831
9832err_rings:
9833 i40e_vsi_free_q_vectors(vsi);
9834err_msix:
9835 if (vsi->netdev_registered) {
9836 vsi->netdev_registered = false;
9837 unregister_netdev(vsi->netdev);
9838 free_netdev(vsi->netdev);
9839 vsi->netdev = NULL;
9840 }
9841err_netdev:
9842 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9843err_vsi:
9844 i40e_vsi_clear(vsi);
9845err_alloc:
9846 return NULL;
9847}
9848
9849/**
9850 * i40e_veb_get_bw_info - Query VEB BW information
9851 * @veb: the veb to query
9852 *
9853 * Query the Tx scheduler BW configuration data for given VEB
9854 **/
9855static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9856{
9857 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9858 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9859 struct i40e_pf *pf = veb->pf;
9860 struct i40e_hw *hw = &pf->hw;
9861 u32 tc_bw_max;
9862 int ret = 0;
9863 int i;
9864
9865 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9866 &bw_data, NULL);
9867 if (ret) {
9868 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009869 "query veb bw config failed, err %s aq_err %s\n",
9870 i40e_stat_str(&pf->hw, ret),
9871 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009872 goto out;
9873 }
9874
9875 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9876 &ets_data, NULL);
9877 if (ret) {
9878 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009879 "query veb bw ets config failed, err %s aq_err %s\n",
9880 i40e_stat_str(&pf->hw, ret),
9881 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009882 goto out;
9883 }
9884
9885 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9886 veb->bw_max_quanta = ets_data.tc_bw_max;
9887 veb->is_abs_credits = bw_data.absolute_credits_enable;
Neerav Parikh23cd1f02014-11-12 00:18:41 +00009888 veb->enabled_tc = ets_data.tc_valid_bits;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009889 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9890 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9891 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9892 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9893 veb->bw_tc_limit_credits[i] =
9894 le16_to_cpu(bw_data.tc_bw_limits[i]);
9895 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9896 }
9897
9898out:
9899 return ret;
9900}
9901
9902/**
9903 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9904 * @pf: board private structure
9905 *
9906 * On error: returns error code (negative)
9907 * On success: returns vsi index in PF (positive)
9908 **/
9909static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9910{
9911 int ret = -ENOENT;
9912 struct i40e_veb *veb;
9913 int i;
9914
9915 /* Need to protect the allocation of switch elements at the PF level */
9916 mutex_lock(&pf->switch_mutex);
9917
9918 /* VEB list may be fragmented if VEB creation/destruction has
9919 * been happening. We can afford to do a quick scan to look
9920 * for any free slots in the list.
9921 *
9922 * find next empty veb slot, looping back around if necessary
9923 */
9924 i = 0;
9925 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9926 i++;
9927 if (i >= I40E_MAX_VEB) {
9928 ret = -ENOMEM;
9929 goto err_alloc_veb; /* out of VEB slots! */
9930 }
9931
9932 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9933 if (!veb) {
9934 ret = -ENOMEM;
9935 goto err_alloc_veb;
9936 }
9937 veb->pf = pf;
9938 veb->idx = i;
9939 veb->enabled_tc = 1;
9940
9941 pf->veb[i] = veb;
9942 ret = i;
9943err_alloc_veb:
9944 mutex_unlock(&pf->switch_mutex);
9945 return ret;
9946}
9947
9948/**
9949 * i40e_switch_branch_release - Delete a branch of the switch tree
9950 * @branch: where to start deleting
9951 *
9952 * This uses recursion to find the tips of the branch to be
9953 * removed, deleting until we get back to and can delete this VEB.
9954 **/
9955static void i40e_switch_branch_release(struct i40e_veb *branch)
9956{
9957 struct i40e_pf *pf = branch->pf;
9958 u16 branch_seid = branch->seid;
9959 u16 veb_idx = branch->idx;
9960 int i;
9961
9962 /* release any VEBs on this VEB - RECURSION */
9963 for (i = 0; i < I40E_MAX_VEB; i++) {
9964 if (!pf->veb[i])
9965 continue;
9966 if (pf->veb[i]->uplink_seid == branch->seid)
9967 i40e_switch_branch_release(pf->veb[i]);
9968 }
9969
9970 /* Release the VSIs on this VEB, but not the owner VSI.
9971 *
9972 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
9973 * the VEB itself, so don't use (*branch) after this loop.
9974 */
Mitch Williams505682c2014-05-20 08:01:37 +00009975 for (i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009976 if (!pf->vsi[i])
9977 continue;
9978 if (pf->vsi[i]->uplink_seid == branch_seid &&
9979 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9980 i40e_vsi_release(pf->vsi[i]);
9981 }
9982 }
9983
9984 /* There's one corner case where the VEB might not have been
9985 * removed, so double check it here and remove it if needed.
9986 * This case happens if the veb was created from the debugfs
9987 * commands and no VSIs were added to it.
9988 */
9989 if (pf->veb[veb_idx])
9990 i40e_veb_release(pf->veb[veb_idx]);
9991}
9992
9993/**
9994 * i40e_veb_clear - remove veb struct
9995 * @veb: the veb to remove
9996 **/
9997static void i40e_veb_clear(struct i40e_veb *veb)
9998{
9999 if (!veb)
10000 return;
10001
10002 if (veb->pf) {
10003 struct i40e_pf *pf = veb->pf;
10004
10005 mutex_lock(&pf->switch_mutex);
10006 if (pf->veb[veb->idx] == veb)
10007 pf->veb[veb->idx] = NULL;
10008 mutex_unlock(&pf->switch_mutex);
10009 }
10010
10011 kfree(veb);
10012}
10013
10014/**
10015 * i40e_veb_release - Delete a VEB and free its resources
10016 * @veb: the VEB being removed
10017 **/
10018void i40e_veb_release(struct i40e_veb *veb)
10019{
10020 struct i40e_vsi *vsi = NULL;
10021 struct i40e_pf *pf;
10022 int i, n = 0;
10023
10024 pf = veb->pf;
10025
10026 /* find the remaining VSI and check for extras */
Mitch Williams505682c2014-05-20 08:01:37 +000010027 for (i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010028 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10029 n++;
10030 vsi = pf->vsi[i];
10031 }
10032 }
10033 if (n != 1) {
10034 dev_info(&pf->pdev->dev,
10035 "can't remove VEB %d with %d VSIs left\n",
10036 veb->seid, n);
10037 return;
10038 }
10039
10040 /* move the remaining VSI to uplink veb */
10041 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10042 if (veb->uplink_seid) {
10043 vsi->uplink_seid = veb->uplink_seid;
10044 if (veb->uplink_seid == pf->mac_seid)
10045 vsi->veb_idx = I40E_NO_VEB;
10046 else
10047 vsi->veb_idx = veb->veb_idx;
10048 } else {
10049 /* floating VEB */
10050 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10051 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10052 }
10053
10054 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10055 i40e_veb_clear(veb);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010056}
10057
10058/**
10059 * i40e_add_veb - create the VEB in the switch
10060 * @veb: the VEB to be instantiated
10061 * @vsi: the controlling VSI
10062 **/
10063static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10064{
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010065 struct i40e_pf *pf = veb->pf;
Shannon Nelson66fc3602016-01-13 16:51:42 -080010066 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010067 int ret;
10068
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010069 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
Mitch Williams5bc16032016-05-16 10:26:43 -070010070 veb->enabled_tc, false,
Shannon Nelson66fc3602016-01-13 16:51:42 -080010071 &veb->seid, enable_stats, NULL);
Mitch Williams5bc16032016-05-16 10:26:43 -070010072
10073 /* get a VEB from the hardware */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010074 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010075 dev_info(&pf->pdev->dev,
10076 "couldn't add VEB, err %s aq_err %s\n",
10077 i40e_stat_str(&pf->hw, ret),
10078 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010079 return -EPERM;
10080 }
10081
10082 /* get statistics counter */
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010083 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010084 &veb->stats_idx, NULL, NULL, NULL);
10085 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010086 dev_info(&pf->pdev->dev,
10087 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10088 i40e_stat_str(&pf->hw, ret),
10089 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010090 return -EPERM;
10091 }
10092 ret = i40e_veb_get_bw_info(veb);
10093 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010094 dev_info(&pf->pdev->dev,
10095 "couldn't get VEB bw info, err %s aq_err %s\n",
10096 i40e_stat_str(&pf->hw, ret),
10097 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10098 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010099 return -ENOENT;
10100 }
10101
10102 vsi->uplink_seid = veb->seid;
10103 vsi->veb_idx = veb->idx;
10104 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10105
10106 return 0;
10107}
10108
10109/**
10110 * i40e_veb_setup - Set up a VEB
10111 * @pf: board private structure
10112 * @flags: VEB setup flags
10113 * @uplink_seid: the switch element to link to
10114 * @vsi_seid: the initial VSI seid
10115 * @enabled_tc: Enabled TC bit-map
10116 *
10117 * This allocates the sw VEB structure and links it into the switch
10118 * It is possible and legal for this to be a duplicate of an already
10119 * existing VEB. It is also possible for both uplink and vsi seids
10120 * to be zero, in order to create a floating VEB.
10121 *
10122 * Returns pointer to the successfully allocated VEB sw struct on
10123 * success, otherwise returns NULL on failure.
10124 **/
10125struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10126 u16 uplink_seid, u16 vsi_seid,
10127 u8 enabled_tc)
10128{
10129 struct i40e_veb *veb, *uplink_veb = NULL;
10130 int vsi_idx, veb_idx;
10131 int ret;
10132
10133 /* if one seid is 0, the other must be 0 to create a floating relay */
10134 if ((uplink_seid == 0 || vsi_seid == 0) &&
10135 (uplink_seid + vsi_seid != 0)) {
10136 dev_info(&pf->pdev->dev,
10137 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10138 uplink_seid, vsi_seid);
10139 return NULL;
10140 }
10141
10142 /* make sure there is such a vsi and uplink */
Mitch Williams505682c2014-05-20 08:01:37 +000010143 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010144 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10145 break;
Mitch Williams505682c2014-05-20 08:01:37 +000010146 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010147 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10148 vsi_seid);
10149 return NULL;
10150 }
10151
10152 if (uplink_seid && uplink_seid != pf->mac_seid) {
10153 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10154 if (pf->veb[veb_idx] &&
10155 pf->veb[veb_idx]->seid == uplink_seid) {
10156 uplink_veb = pf->veb[veb_idx];
10157 break;
10158 }
10159 }
10160 if (!uplink_veb) {
10161 dev_info(&pf->pdev->dev,
10162 "uplink seid %d not found\n", uplink_seid);
10163 return NULL;
10164 }
10165 }
10166
10167 /* get veb sw struct */
10168 veb_idx = i40e_veb_mem_alloc(pf);
10169 if (veb_idx < 0)
10170 goto err_alloc;
10171 veb = pf->veb[veb_idx];
10172 veb->flags = flags;
10173 veb->uplink_seid = uplink_seid;
10174 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10175 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10176
10177 /* create the VEB in the switch */
10178 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10179 if (ret)
10180 goto err_veb;
Shannon Nelson1bb8b932014-04-23 04:49:54 +000010181 if (vsi_idx == pf->lan_vsi)
10182 pf->lan_veb = veb->idx;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010183
10184 return veb;
10185
10186err_veb:
10187 i40e_veb_clear(veb);
10188err_alloc:
10189 return NULL;
10190}
10191
10192/**
Jeff Kirsherb40c82e62015-02-27 09:18:34 +000010193 * i40e_setup_pf_switch_element - set PF vars based on switch type
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010194 * @pf: board private structure
10195 * @ele: element we are building info from
10196 * @num_reported: total number of elements
10197 * @printconfig: should we print the contents
10198 *
10199 * helper function to assist in extracting a few useful SEID values.
10200 **/
10201static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10202 struct i40e_aqc_switch_config_element_resp *ele,
10203 u16 num_reported, bool printconfig)
10204{
10205 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10206 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10207 u8 element_type = ele->element_type;
10208 u16 seid = le16_to_cpu(ele->seid);
10209
10210 if (printconfig)
10211 dev_info(&pf->pdev->dev,
10212 "type=%d seid=%d uplink=%d downlink=%d\n",
10213 element_type, seid, uplink_seid, downlink_seid);
10214
10215 switch (element_type) {
10216 case I40E_SWITCH_ELEMENT_TYPE_MAC:
10217 pf->mac_seid = seid;
10218 break;
10219 case I40E_SWITCH_ELEMENT_TYPE_VEB:
10220 /* Main VEB? */
10221 if (uplink_seid != pf->mac_seid)
10222 break;
10223 if (pf->lan_veb == I40E_NO_VEB) {
10224 int v;
10225
10226 /* find existing or else empty VEB */
10227 for (v = 0; v < I40E_MAX_VEB; v++) {
10228 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10229 pf->lan_veb = v;
10230 break;
10231 }
10232 }
10233 if (pf->lan_veb == I40E_NO_VEB) {
10234 v = i40e_veb_mem_alloc(pf);
10235 if (v < 0)
10236 break;
10237 pf->lan_veb = v;
10238 }
10239 }
10240
10241 pf->veb[pf->lan_veb]->seid = seid;
10242 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10243 pf->veb[pf->lan_veb]->pf = pf;
10244 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10245 break;
10246 case I40E_SWITCH_ELEMENT_TYPE_VSI:
10247 if (num_reported != 1)
10248 break;
10249 /* This is immediately after a reset so we can assume this is
10250 * the PF's VSI
10251 */
10252 pf->mac_seid = uplink_seid;
10253 pf->pf_seid = downlink_seid;
10254 pf->main_vsi_seid = seid;
10255 if (printconfig)
10256 dev_info(&pf->pdev->dev,
10257 "pf_seid=%d main_vsi_seid=%d\n",
10258 pf->pf_seid, pf->main_vsi_seid);
10259 break;
10260 case I40E_SWITCH_ELEMENT_TYPE_PF:
10261 case I40E_SWITCH_ELEMENT_TYPE_VF:
10262 case I40E_SWITCH_ELEMENT_TYPE_EMP:
10263 case I40E_SWITCH_ELEMENT_TYPE_BMC:
10264 case I40E_SWITCH_ELEMENT_TYPE_PE:
10265 case I40E_SWITCH_ELEMENT_TYPE_PA:
10266 /* ignore these for now */
10267 break;
10268 default:
10269 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10270 element_type, seid);
10271 break;
10272 }
10273}
10274
10275/**
10276 * i40e_fetch_switch_configuration - Get switch config from firmware
10277 * @pf: board private structure
10278 * @printconfig: should we print the contents
10279 *
10280 * Get the current switch configuration from the device and
10281 * extract a few useful SEID values.
10282 **/
10283int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10284{
10285 struct i40e_aqc_get_switch_config_resp *sw_config;
10286 u16 next_seid = 0;
10287 int ret = 0;
10288 u8 *aq_buf;
10289 int i;
10290
10291 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10292 if (!aq_buf)
10293 return -ENOMEM;
10294
10295 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10296 do {
10297 u16 num_reported, num_total;
10298
10299 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10300 I40E_AQ_LARGE_BUF,
10301 &next_seid, NULL);
10302 if (ret) {
10303 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010304 "get switch config failed err %s aq_err %s\n",
10305 i40e_stat_str(&pf->hw, ret),
10306 i40e_aq_str(&pf->hw,
10307 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010308 kfree(aq_buf);
10309 return -ENOENT;
10310 }
10311
10312 num_reported = le16_to_cpu(sw_config->header.num_reported);
10313 num_total = le16_to_cpu(sw_config->header.num_total);
10314
10315 if (printconfig)
10316 dev_info(&pf->pdev->dev,
10317 "header: %d reported %d total\n",
10318 num_reported, num_total);
10319
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010320 for (i = 0; i < num_reported; i++) {
10321 struct i40e_aqc_switch_config_element_resp *ele =
10322 &sw_config->element[i];
10323
10324 i40e_setup_pf_switch_element(pf, ele, num_reported,
10325 printconfig);
10326 }
10327 } while (next_seid != 0);
10328
10329 kfree(aq_buf);
10330 return ret;
10331}
10332
10333/**
10334 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
10335 * @pf: board private structure
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000010336 * @reinit: if the Main VSI needs to re-initialized.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010337 *
10338 * Returns 0 on success, negative value on failure
10339 **/
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000010340static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010341{
Anjali Singhai Jainb5569892016-05-03 15:13:12 -070010342 u16 flags = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010343 int ret;
10344
10345 /* find out what's out there already */
10346 ret = i40e_fetch_switch_configuration(pf, false);
10347 if (ret) {
10348 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010349 "couldn't fetch switch config, err %s aq_err %s\n",
10350 i40e_stat_str(&pf->hw, ret),
10351 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010352 return ret;
10353 }
10354 i40e_pf_reset_stats(pf);
10355
Anjali Singhai Jainb5569892016-05-03 15:13:12 -070010356 /* set the switch config bit for the whole device to
10357 * support limited promisc or true promisc
10358 * when user requests promisc. The default is limited
10359 * promisc.
10360 */
10361
10362 if ((pf->hw.pf_id == 0) &&
10363 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
10364 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10365
10366 if (pf->hw.pf_id == 0) {
10367 u16 valid_flags;
10368
10369 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10370 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
10371 NULL);
10372 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
10373 dev_info(&pf->pdev->dev,
10374 "couldn't set switch config bits, err %s aq_err %s\n",
10375 i40e_stat_str(&pf->hw, ret),
10376 i40e_aq_str(&pf->hw,
10377 pf->hw.aq.asq_last_status));
10378 /* not a fatal problem, just keep going */
10379 }
10380 }
10381
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010382 /* first time setup */
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000010383 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010384 struct i40e_vsi *vsi = NULL;
10385 u16 uplink_seid;
10386
10387 /* Set up the PF VSI associated with the PF's main VSI
10388 * that is already in the HW switch
10389 */
10390 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
10391 uplink_seid = pf->veb[pf->lan_veb]->seid;
10392 else
10393 uplink_seid = pf->mac_seid;
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000010394 if (pf->lan_vsi == I40E_NO_VSI)
10395 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
10396 else if (reinit)
10397 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010398 if (!vsi) {
10399 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
10400 i40e_fdir_teardown(pf);
10401 return -EAGAIN;
10402 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010403 } else {
10404 /* force a reset of TC and queue layout configurations */
10405 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
Jesse Brandeburg6995b362015-08-28 17:55:54 -040010406
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010407 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10408 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10409 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10410 }
10411 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
10412
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010413 i40e_fdir_sb_setup(pf);
10414
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010415 /* Setup static PF queue filter control settings */
10416 ret = i40e_setup_pf_filter_control(pf);
10417 if (ret) {
10418 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
10419 ret);
10420 /* Failure here should not stop continuing other steps */
10421 }
10422
10423 /* enable RSS in the HW, even for only one queue, as the stack can use
10424 * the hash
10425 */
10426 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
Helin Zhang043dd652015-10-21 19:56:23 -040010427 i40e_pf_config_rss(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010428
10429 /* fill in link information and enable LSE reporting */
Catherine Sullivan0a862b42015-08-31 19:54:53 -040010430 i40e_update_link_info(&pf->hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010431 i40e_link_event(pf);
10432
Jesse Brandeburgd52c20b2013-11-26 10:49:15 +000010433 /* Initialize user-specific link properties */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010434 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
10435 I40E_AQ_AN_COMPLETED) ? true : false);
Jesse Brandeburgd52c20b2013-11-26 10:49:15 +000010436
Jacob Kellerbeb0dff2014-01-11 05:43:19 +000010437 i40e_ptp_init(pf);
10438
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010439 return ret;
10440}
10441
10442/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010443 * i40e_determine_queue_usage - Work out queue distribution
10444 * @pf: board private structure
10445 **/
10446static void i40e_determine_queue_usage(struct i40e_pf *pf)
10447{
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010448 int queues_left;
10449
10450 pf->num_lan_qps = 0;
Vasu Dev38e00432014-08-01 13:27:03 -070010451#ifdef I40E_FCOE
10452 pf->num_fcoe_qps = 0;
10453#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010454
10455 /* Find the max queues to be put into basic use. We'll always be
10456 * using TC0, whether or not DCB is running, and TC0 will get the
10457 * big RSS set.
10458 */
10459 queues_left = pf->hw.func_caps.num_tx_qp;
10460
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010461 if ((queues_left == 1) ||
Frank Zhang9aa7e932014-05-20 08:01:42 +000010462 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010463 /* one qp for PF, no queues for anything else */
10464 queues_left = 0;
Helin Zhangacd65442015-10-26 19:44:28 -040010465 pf->alloc_rss_size = pf->num_lan_qps = 1;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010466
10467 /* make sure all the fancies are disabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -080010468 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -060010469 I40E_FLAG_IWARP_ENABLED |
Vasu Dev38e00432014-08-01 13:27:03 -070010470#ifdef I40E_FCOE
10471 I40E_FLAG_FCOE_ENABLED |
10472#endif
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -080010473 I40E_FLAG_FD_SB_ENABLED |
10474 I40E_FLAG_FD_ATR_ENABLED |
Neerav Parikh4d9b6042014-05-22 06:31:51 +000010475 I40E_FLAG_DCB_CAPABLE |
Dave Ertmana0362442016-08-29 17:38:26 -070010476 I40E_FLAG_DCB_ENABLED |
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -080010477 I40E_FLAG_SRIOV_ENABLED |
10478 I40E_FLAG_VMDQ_ENABLED);
Frank Zhang9aa7e932014-05-20 08:01:42 +000010479 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
10480 I40E_FLAG_FD_SB_ENABLED |
Anjali Singhai Jainbbe7d0e2014-05-20 08:01:44 +000010481 I40E_FLAG_FD_ATR_ENABLED |
Neerav Parikh4d9b6042014-05-22 06:31:51 +000010482 I40E_FLAG_DCB_CAPABLE))) {
Frank Zhang9aa7e932014-05-20 08:01:42 +000010483 /* one qp for PF */
Helin Zhangacd65442015-10-26 19:44:28 -040010484 pf->alloc_rss_size = pf->num_lan_qps = 1;
Frank Zhang9aa7e932014-05-20 08:01:42 +000010485 queues_left -= pf->num_lan_qps;
10486
10487 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -060010488 I40E_FLAG_IWARP_ENABLED |
Vasu Dev38e00432014-08-01 13:27:03 -070010489#ifdef I40E_FCOE
10490 I40E_FLAG_FCOE_ENABLED |
10491#endif
Frank Zhang9aa7e932014-05-20 08:01:42 +000010492 I40E_FLAG_FD_SB_ENABLED |
10493 I40E_FLAG_FD_ATR_ENABLED |
10494 I40E_FLAG_DCB_ENABLED |
10495 I40E_FLAG_VMDQ_ENABLED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010496 } else {
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010497 /* Not enough queues for all TCs */
Neerav Parikh4d9b6042014-05-22 06:31:51 +000010498 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010499 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
Dave Ertmana0362442016-08-29 17:38:26 -070010500 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
10501 I40E_FLAG_DCB_ENABLED);
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010502 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10503 }
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +000010504 pf->num_lan_qps = max_t(int, pf->rss_size_max,
10505 num_online_cpus());
10506 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
10507 pf->hw.func_caps.num_tx_qp);
10508
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010509 queues_left -= pf->num_lan_qps;
10510 }
10511
Vasu Dev38e00432014-08-01 13:27:03 -070010512#ifdef I40E_FCOE
10513 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
10514 if (I40E_DEFAULT_FCOE <= queues_left) {
10515 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
10516 } else if (I40E_MINIMUM_FCOE <= queues_left) {
10517 pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
10518 } else {
10519 pf->num_fcoe_qps = 0;
10520 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
10521 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
10522 }
10523
10524 queues_left -= pf->num_fcoe_qps;
10525 }
10526
10527#endif
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010528 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10529 if (queues_left > 1) {
10530 queues_left -= 1; /* save 1 queue for FD */
10531 } else {
10532 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10533 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
10534 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010535 }
10536
10537 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10538 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010539 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
10540 (queues_left / pf->num_vf_qps));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010541 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
10542 }
10543
10544 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10545 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
10546 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
10547 (queues_left / pf->num_vmdq_qps));
10548 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
10549 }
10550
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +000010551 pf->queues_left = queues_left;
Neerav Parikh8279e492015-09-03 17:18:50 -040010552 dev_dbg(&pf->pdev->dev,
10553 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
10554 pf->hw.func_caps.num_tx_qp,
10555 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
Helin Zhangacd65442015-10-26 19:44:28 -040010556 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
10557 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
10558 queues_left);
Vasu Dev38e00432014-08-01 13:27:03 -070010559#ifdef I40E_FCOE
Neerav Parikh8279e492015-09-03 17:18:50 -040010560 dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
Vasu Dev38e00432014-08-01 13:27:03 -070010561#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010562}
10563
10564/**
10565 * i40e_setup_pf_filter_control - Setup PF static filter control
10566 * @pf: PF to be setup
10567 *
Jeff Kirsherb40c82e62015-02-27 09:18:34 +000010568 * i40e_setup_pf_filter_control sets up a PF's initial filter control
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010569 * settings. If PE/FCoE are enabled then it will also set the per PF
10570 * based filter sizes required for them. It also enables Flow director,
10571 * ethertype and macvlan type filter settings for the pf.
10572 *
10573 * Returns 0 on success, negative on failure
10574 **/
10575static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
10576{
10577 struct i40e_filter_control_settings *settings = &pf->filter_settings;
10578
10579 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
10580
10581 /* Flow Director is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -080010582 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010583 settings->enable_fdir = true;
10584
10585 /* Ethtype and MACVLAN filters enabled for PF */
10586 settings->enable_ethtype = true;
10587 settings->enable_macvlan = true;
10588
10589 if (i40e_set_filter_control(&pf->hw, settings))
10590 return -ENOENT;
10591
10592 return 0;
10593}
10594
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010595#define INFO_STRING_LEN 255
Shannon Nelson7fd89542015-10-21 19:47:04 -040010596#define REMAIN(__x) (INFO_STRING_LEN - (__x))
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010597static void i40e_print_features(struct i40e_pf *pf)
10598{
10599 struct i40e_hw *hw = &pf->hw;
Joe Perches3b195842015-12-03 04:20:57 -080010600 char *buf;
10601 int i;
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010602
Joe Perches3b195842015-12-03 04:20:57 -080010603 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
10604 if (!buf)
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010605 return;
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010606
Joe Perches3b195842015-12-03 04:20:57 -080010607 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010608#ifdef CONFIG_PCI_IOV
Joe Perches3b195842015-12-03 04:20:57 -080010609 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010610#endif
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -070010611 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
Shannon Nelson7fd89542015-10-21 19:47:04 -040010612 pf->hw.func_caps.num_vsis,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -070010613 pf->vsi[pf->lan_vsi]->num_queue_pairs);
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010614 if (pf->flags & I40E_FLAG_RSS_ENABLED)
Joe Perches3b195842015-12-03 04:20:57 -080010615 i += snprintf(&buf[i], REMAIN(i), " RSS");
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010616 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
Joe Perches3b195842015-12-03 04:20:57 -080010617 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
Akeem G Abodunrinc6423ff2014-05-10 04:49:08 +000010618 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
Joe Perches3b195842015-12-03 04:20:57 -080010619 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
10620 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
Akeem G Abodunrinc6423ff2014-05-10 04:49:08 +000010621 }
Neerav Parikh4d9b6042014-05-22 06:31:51 +000010622 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
Joe Perches3b195842015-12-03 04:20:57 -080010623 i += snprintf(&buf[i], REMAIN(i), " DCB");
Joe Perches3b195842015-12-03 04:20:57 -080010624 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
Singhai, Anjali6a899022015-12-14 12:21:18 -080010625 i += snprintf(&buf[i], REMAIN(i), " Geneve");
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010626 if (pf->flags & I40E_FLAG_PTP)
Joe Perches3b195842015-12-03 04:20:57 -080010627 i += snprintf(&buf[i], REMAIN(i), " PTP");
Vasu Dev38e00432014-08-01 13:27:03 -070010628#ifdef I40E_FCOE
10629 if (pf->flags & I40E_FLAG_FCOE_ENABLED)
Joe Perches3b195842015-12-03 04:20:57 -080010630 i += snprintf(&buf[i], REMAIN(i), " FCOE");
Vasu Dev38e00432014-08-01 13:27:03 -070010631#endif
Shannon Nelson6dec1012015-09-28 14:12:30 -040010632 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
Joe Perches3b195842015-12-03 04:20:57 -080010633 i += snprintf(&buf[i], REMAIN(i), " VEB");
Shannon Nelson6dec1012015-09-28 14:12:30 -040010634 else
Joe Perches3b195842015-12-03 04:20:57 -080010635 i += snprintf(&buf[i], REMAIN(i), " VEPA");
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010636
Joe Perches3b195842015-12-03 04:20:57 -080010637 dev_info(&pf->pdev->dev, "%s\n", buf);
10638 kfree(buf);
Shannon Nelson7fd89542015-10-21 19:47:04 -040010639 WARN_ON(i > INFO_STRING_LEN);
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010640}
10641
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010642/**
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050010643 * i40e_get_platform_mac_addr - get platform-specific MAC address
10644 *
10645 * @pdev: PCI device information struct
10646 * @pf: board private structure
10647 *
10648 * Look up the MAC address in Open Firmware on systems that support it,
10649 * and use IDPROM on SPARC if no OF address is found. On return, the
10650 * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
10651 * has been selected.
10652 **/
10653static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
10654{
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050010655 pf->flags &= ~I40E_FLAG_PF_MAC;
Sowmini Varadhanba942722016-01-12 19:32:31 -080010656 if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050010657 pf->flags |= I40E_FLAG_PF_MAC;
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050010658}
10659
10660/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010661 * i40e_probe - Device initialization routine
10662 * @pdev: PCI device information struct
10663 * @ent: entry in i40e_pci_tbl
10664 *
Jeff Kirsherb40c82e62015-02-27 09:18:34 +000010665 * i40e_probe initializes a PF identified by a pci_dev structure.
10666 * The OS initialization, configuring of the PF private structure,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010667 * and a hardware reset occur.
10668 *
10669 * Returns 0 on success, negative on failure
10670 **/
10671static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10672{
Catherine Sullivane8278452015-02-06 08:52:08 +000010673 struct i40e_aq_get_phy_abilities_resp abilities;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010674 struct i40e_pf *pf;
10675 struct i40e_hw *hw;
Anjali Singhai Jain93cd7652013-11-20 10:03:01 +000010676 static u16 pfs_found;
Shannon Nelson1d5109d2015-08-26 15:14:08 -040010677 u16 wol_nvm_bits;
Catherine Sullivand4dfb812013-11-28 06:39:21 +000010678 u16 link_status;
Jean Sacren6f66a482015-09-19 05:08:45 -060010679 int err;
Anjali Singhai Jain4f2f017c2015-10-21 19:47:07 -040010680 u32 val;
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +000010681 u32 i;
Helin Zhang58fc3262015-10-01 14:37:38 -040010682 u8 set_fc_aq_fail;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010683
10684 err = pci_enable_device_mem(pdev);
10685 if (err)
10686 return err;
10687
10688 /* set up for high or low dma */
Mitch Williams64942942014-02-11 08:26:33 +000010689 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Mitch Williams64942942014-02-11 08:26:33 +000010690 if (err) {
Jean Sacrene3e3bfd2014-03-25 04:30:27 +000010691 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10692 if (err) {
10693 dev_err(&pdev->dev,
10694 "DMA configuration failed: 0x%x\n", err);
10695 goto err_dma;
10696 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010697 }
10698
10699 /* set up pci connections */
Johannes Thumshirn56d766d2016-06-07 09:44:05 +020010700 err = pci_request_mem_regions(pdev, i40e_driver_name);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010701 if (err) {
10702 dev_info(&pdev->dev,
10703 "pci_request_selected_regions failed %d\n", err);
10704 goto err_pci_reg;
10705 }
10706
10707 pci_enable_pcie_error_reporting(pdev);
10708 pci_set_master(pdev);
10709
10710 /* Now that we have a PCI connection, we need to do the
10711 * low level device setup. This is primarily setting up
10712 * the Admin Queue structures and then querying for the
10713 * device's current profile information.
10714 */
10715 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
10716 if (!pf) {
10717 err = -ENOMEM;
10718 goto err_pf_alloc;
10719 }
10720 pf->next_vsi = 0;
10721 pf->pdev = pdev;
10722 set_bit(__I40E_DOWN, &pf->state);
10723
10724 hw = &pf->hw;
10725 hw->back = pf;
Anjali Singhai232f4702015-02-26 16:15:39 +000010726
Shannon Nelson2ac8b672015-07-23 16:54:37 -040010727 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10728 I40E_MAX_CSR_SPACE);
Anjali Singhai232f4702015-02-26 16:15:39 +000010729
Shannon Nelson2ac8b672015-07-23 16:54:37 -040010730 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010731 if (!hw->hw_addr) {
10732 err = -EIO;
10733 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10734 (unsigned int)pci_resource_start(pdev, 0),
Shannon Nelson2ac8b672015-07-23 16:54:37 -040010735 pf->ioremap_len, err);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010736 goto err_ioremap;
10737 }
10738 hw->vendor_id = pdev->vendor;
10739 hw->device_id = pdev->device;
10740 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10741 hw->subsystem_vendor_id = pdev->subsystem_vendor;
10742 hw->subsystem_device_id = pdev->subsystem_device;
10743 hw->bus.device = PCI_SLOT(pdev->devfn);
10744 hw->bus.func = PCI_FUNC(pdev->devfn);
Anjali Singhai Jain93cd7652013-11-20 10:03:01 +000010745 pf->instance = pfs_found;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010746
Shannon Nelsonde03d2b2016-03-10 14:59:44 -080010747 /* set up the locks for the AQ, do this only once in probe
10748 * and destroy them only once in remove
10749 */
10750 mutex_init(&hw->aq.asq_mutex);
10751 mutex_init(&hw->aq.arq_mutex);
10752
Alexander Duyck5d4ca232016-09-30 08:21:46 -040010753 pf->msg_enable = netif_msg_init(debug,
10754 NETIF_MSG_DRV |
10755 NETIF_MSG_PROBE |
10756 NETIF_MSG_LINK);
10757 if (debug < -1)
10758 pf->hw.debug_mask = debug;
Shannon Nelson5b5faa42014-10-17 03:14:51 +000010759
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +000010760 /* do a special CORER for clearing PXE mode once at init */
10761 if (hw->revision_id == 0 &&
10762 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10763 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10764 i40e_flush(hw);
10765 msleep(200);
10766 pf->corer_count++;
10767
10768 i40e_clear_pxe_mode(hw);
10769 }
10770
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010771 /* Reset here to make sure all is clean and to define PF 'n' */
Shannon Nelson838d41d2014-06-04 20:41:27 +000010772 i40e_clear_hw(hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010773 err = i40e_pf_reset(hw);
10774 if (err) {
10775 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10776 goto err_pf_reset;
10777 }
10778 pf->pfr_count++;
10779
10780 hw->aq.num_arq_entries = I40E_AQ_LEN;
10781 hw->aq.num_asq_entries = I40E_AQ_LEN;
10782 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10783 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10784 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
Carolyn Wybornyb2008cb2014-11-11 20:05:26 +000010785
Carolyn Wybornyb294ac72014-12-11 07:06:39 +000010786 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
Carolyn Wybornyb2008cb2014-11-11 20:05:26 +000010787 "%s-%s:misc",
10788 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010789
10790 err = i40e_init_shared_code(hw);
10791 if (err) {
Anjali Singhai Jainb2a75c52015-04-27 14:57:20 -040010792 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10793 err);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010794 goto err_pf_reset;
10795 }
10796
Jesse Brandeburgd52c20b2013-11-26 10:49:15 +000010797 /* set up a default setting for link flow control */
10798 pf->hw.fc.requested_mode = I40E_FC_NONE;
10799
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010800 err = i40e_init_adminq(hw);
Carolyn Wyborny2b2426a762015-10-26 19:44:35 -040010801 if (err) {
10802 if (err == I40E_ERR_FIRMWARE_API_VERSION)
10803 dev_info(&pdev->dev,
10804 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10805 else
10806 dev_info(&pdev->dev,
10807 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
10808
10809 goto err_pf_reset;
10810 }
Carolyn Wybornyf0b44442015-08-31 19:54:49 -040010811
Shannon Nelson6dec1012015-09-28 14:12:30 -040010812 /* provide nvm, fw, api versions */
10813 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
10814 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
10815 hw->aq.api_maj_ver, hw->aq.api_min_ver,
10816 i40e_nvm_version_str(hw));
Carolyn Wybornyf0b44442015-08-31 19:54:49 -040010817
Catherine Sullivan7aa67612014-07-09 07:46:17 +000010818 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10819 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
Shannon Nelson278b6f62014-06-04 01:41:03 +000010820 dev_info(&pdev->dev,
Catherine Sullivan7aa67612014-07-09 07:46:17 +000010821 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10822 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10823 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
Shannon Nelson278b6f62014-06-04 01:41:03 +000010824 dev_info(&pdev->dev,
Catherine Sullivan7aa67612014-07-09 07:46:17 +000010825 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
Shannon Nelson278b6f62014-06-04 01:41:03 +000010826
Shannon Nelson4eb3f762014-03-06 08:59:58 +000010827 i40e_verify_eeprom(pf);
10828
Jesse Brandeburg2c5fe332014-04-23 04:49:57 +000010829 /* Rev 0 hardware was never productized */
10830 if (hw->revision_id < 1)
10831 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10832
Shannon Nelson6ff4ef82013-12-21 05:44:49 +000010833 i40e_clear_pxe_mode(hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010834 err = i40e_get_capabilities(pf);
10835 if (err)
10836 goto err_adminq_setup;
10837
10838 err = i40e_sw_init(pf);
10839 if (err) {
10840 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10841 goto err_sw_init;
10842 }
10843
10844 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10845 hw->func_caps.num_rx_qp,
10846 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10847 if (err) {
10848 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10849 goto err_init_lan_hmc;
10850 }
10851
10852 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10853 if (err) {
10854 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10855 err = -ENOENT;
10856 goto err_configure_lan_hmc;
10857 }
10858
Neerav Parikhb686ece2014-12-14 01:55:11 +000010859 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
10860 * Ignore error return codes because if it was already disabled via
10861 * hardware settings this will fail
10862 */
Neerav Parikhf1bbad32016-01-13 16:51:39 -080010863 if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
Neerav Parikhb686ece2014-12-14 01:55:11 +000010864 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10865 i40e_aq_stop_lldp(hw, true, NULL);
10866 }
10867
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010868 i40e_get_mac_addr(hw, hw->mac.addr);
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050010869 /* allow a platform config to override the HW addr */
10870 i40e_get_platform_mac_addr(pdev, pf);
Jesse Brandeburgf62b5062013-11-28 06:39:27 +000010871 if (!is_valid_ether_addr(hw->mac.addr)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010872 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10873 err = -EIO;
10874 goto err_mac_addr;
10875 }
10876 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
Greg Rose9a173902014-05-22 06:32:02 +000010877 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
Neerav Parikh1f224ad2014-02-12 01:45:31 +000010878 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10879 if (is_valid_ether_addr(hw->mac.port_addr))
10880 pf->flags |= I40E_FLAG_PORT_ID_VALID;
Vasu Dev38e00432014-08-01 13:27:03 -070010881#ifdef I40E_FCOE
10882 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10883 if (err)
10884 dev_info(&pdev->dev,
10885 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10886 if (!is_valid_ether_addr(hw->mac.san_addr)) {
10887 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10888 hw->mac.san_addr);
10889 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10890 }
10891 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10892#endif /* I40E_FCOE */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010893
10894 pci_set_drvdata(pdev, pf);
10895 pci_save_state(pdev);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -080010896#ifdef CONFIG_I40E_DCB
10897 err = i40e_init_pf_dcb(pf);
10898 if (err) {
Shannon Nelsonaebfc812014-12-11 07:06:38 +000010899 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
David Ertmanc17ef432016-09-30 01:36:21 -070010900 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
Neerav Parikh014269f2014-04-01 07:11:48 +000010901 /* Continue without DCB enabled */
Neerav Parikh4e3b35b2014-01-17 15:36:37 -080010902 }
10903#endif /* CONFIG_I40E_DCB */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010904
10905 /* set up periodic task facility */
10906 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10907 pf->service_timer_period = HZ;
10908
10909 INIT_WORK(&pf->service_task, i40e_service_task);
10910 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10911 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010912
Shannon Nelson1d5109d2015-08-26 15:14:08 -040010913 /* NVM bit on means WoL disabled for the port */
10914 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -080010915 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
Shannon Nelson1d5109d2015-08-26 15:14:08 -040010916 pf->wol_en = false;
10917 else
10918 pf->wol_en = true;
Shannon Nelson8e2773a2013-11-28 06:39:22 +000010919 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10920
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010921 /* set up the main switch operations */
10922 i40e_determine_queue_usage(pf);
Jesse Brandeburgc11472802015-04-07 19:45:39 -040010923 err = i40e_init_interrupt_scheme(pf);
10924 if (err)
10925 goto err_switch_setup;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010926
Mitch Williams505682c2014-05-20 08:01:37 +000010927 /* The number of VSIs reported by the FW is the minimum guaranteed
10928 * to us; HW supports far more and we share the remaining pool with
10929 * the other PFs. We allocate space for more than the guarantee with
10930 * the understanding that we might not get them all later.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010931 */
Mitch Williams505682c2014-05-20 08:01:37 +000010932 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10933 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10934 else
10935 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10936
10937 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
Jesse Brandeburgd17038d2015-12-23 12:05:55 -080010938 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
10939 GFP_KERNEL);
Wei Yongjuned87ac02013-09-24 05:17:25 +000010940 if (!pf->vsi) {
10941 err = -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010942 goto err_switch_setup;
Wei Yongjuned87ac02013-09-24 05:17:25 +000010943 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010944
Anjali Singhai Jainfa11cb32015-05-27 12:06:14 -040010945#ifdef CONFIG_PCI_IOV
10946 /* prep for VF support */
10947 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10948 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10949 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10950 if (pci_num_vf(pdev))
10951 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10952 }
10953#endif
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000010954 err = i40e_setup_pf_switch(pf, false);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010955 if (err) {
10956 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10957 goto err_vsis;
10958 }
Helin Zhang58fc3262015-10-01 14:37:38 -040010959
10960 /* Make sure flow control is set according to current settings */
10961 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
10962 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
10963 dev_dbg(&pf->pdev->dev,
10964 "Set fc with err %s aq_err %s on get_phy_cap\n",
10965 i40e_stat_str(hw, err),
10966 i40e_aq_str(hw, hw->aq.asq_last_status));
10967 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
10968 dev_dbg(&pf->pdev->dev,
10969 "Set fc with err %s aq_err %s on set_phy_config\n",
10970 i40e_stat_str(hw, err),
10971 i40e_aq_str(hw, hw->aq.asq_last_status));
10972 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
10973 dev_dbg(&pf->pdev->dev,
10974 "Set fc with err %s aq_err %s on get_link_info\n",
10975 i40e_stat_str(hw, err),
10976 i40e_aq_str(hw, hw->aq.asq_last_status));
10977
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +000010978 /* if FDIR VSI was set up, start it now */
Mitch Williams505682c2014-05-20 08:01:37 +000010979 for (i = 0; i < pf->num_alloc_vsi; i++) {
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +000010980 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
10981 i40e_vsi_open(pf->vsi[i]);
10982 break;
10983 }
10984 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010985
Shannon Nelson2f0aff42016-01-04 10:33:08 -080010986 /* The driver only wants link up/down and module qualification
10987 * reports from firmware. Note the negative logic.
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +000010988 */
10989 err = i40e_aq_set_phy_int_mask(&pf->hw,
Shannon Nelson2f0aff42016-01-04 10:33:08 -080010990 ~(I40E_AQ_EVENT_LINK_UPDOWN |
Shannon Nelson867a79e2016-03-18 12:18:15 -070010991 I40E_AQ_EVENT_MEDIA_NA |
Shannon Nelson2f0aff42016-01-04 10:33:08 -080010992 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +000010993 if (err)
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010994 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10995 i40e_stat_str(&pf->hw, err),
10996 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +000010997
Anjali Singhai Jain4f2f017c2015-10-21 19:47:07 -040010998 /* Reconfigure hardware for allowing smaller MSS in the case
10999 * of TSO, so that we avoid the MDD being fired and causing
11000 * a reset in the case of small MSS+TSO.
11001 */
11002 val = rd32(hw, I40E_REG_MSS);
11003 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11004 val &= ~I40E_REG_MSS_MIN_MASK;
11005 val |= I40E_64BYTE_MSS;
11006 wr32(hw, I40E_REG_MSS, val);
11007 }
11008
Anjali Singhai Jain8eed76f2015-12-09 15:50:31 -080011009 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
Anjali Singhai Jain025b4a52015-02-24 06:58:46 +000011010 msleep(75);
11011 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11012 if (err)
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040011013 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11014 i40e_stat_str(&pf->hw, err),
11015 i40e_aq_str(&pf->hw,
11016 pf->hw.aq.asq_last_status));
Anjali Singhai Jaincafa2ee2014-09-13 07:40:45 +000011017 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011018 /* The main driver is (mostly) up and happy. We need to set this state
11019 * before setting up the misc vector or we get a race and the vector
11020 * ends up disabled forever.
11021 */
11022 clear_bit(__I40E_DOWN, &pf->state);
11023
11024 /* In case of MSIX we are going to setup the misc vector right here
11025 * to handle admin queue events etc. In case of legacy and MSI
11026 * the misc functionality and queue processing is combined in
11027 * the same vector and that gets setup at open.
11028 */
11029 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11030 err = i40e_setup_misc_vector(pf);
11031 if (err) {
11032 dev_info(&pdev->dev,
11033 "setup of misc vector failed: %d\n", err);
11034 goto err_vsis;
11035 }
11036 }
11037
Greg Rosedf805f62014-04-04 04:43:16 +000011038#ifdef CONFIG_PCI_IOV
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011039 /* prep for VF support */
11040 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
Shannon Nelson4eb3f762014-03-06 08:59:58 +000011041 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11042 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011043 /* disable link interrupts for VFs */
11044 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11045 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11046 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11047 i40e_flush(hw);
Mitch Williams4aeec012014-02-13 03:48:47 -080011048
11049 if (pci_num_vf(pdev)) {
11050 dev_info(&pdev->dev,
11051 "Active VFs found, allocating resources.\n");
11052 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11053 if (err)
11054 dev_info(&pdev->dev,
11055 "Error %d allocating resources for existing VFs\n",
11056 err);
11057 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011058 }
Greg Rosedf805f62014-04-04 04:43:16 +000011059#endif /* CONFIG_PCI_IOV */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011060
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -060011061 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11062 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11063 pf->num_iwarp_msix,
11064 I40E_IWARP_IRQ_PILE_ID);
11065 if (pf->iwarp_base_vector < 0) {
11066 dev_info(&pdev->dev,
11067 "failed to get tracking for %d vectors for IWARP err=%d\n",
11068 pf->num_iwarp_msix, pf->iwarp_base_vector);
11069 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11070 }
11071 }
Anjali Singhai Jain93cd7652013-11-20 10:03:01 +000011072
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011073 i40e_dbg_pf_init(pf);
11074
11075 /* tell the firmware that we're starting */
Jesse Brandeburg44033fa2014-04-23 04:50:15 +000011076 i40e_send_version(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011077
11078 /* since everything's happy, start the service_task timer */
11079 mod_timer(&pf->service_timer,
11080 round_jiffies(jiffies + pf->service_timer_period));
11081
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -060011082 /* add this PF to client device list and launch a client service task */
11083 err = i40e_lan_add_device(pf);
11084 if (err)
11085 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11086 err);
11087
Vasu Dev38e00432014-08-01 13:27:03 -070011088#ifdef I40E_FCOE
11089 /* create FCoE interface */
11090 i40e_fcoe_vsi_setup(pf);
11091
11092#endif
Anjali Singhai Jain3fced532015-09-03 17:18:59 -040011093#define PCI_SPEED_SIZE 8
11094#define PCI_WIDTH_SIZE 8
11095 /* Devices on the IOSF bus do not have this information
11096 * and will report PCI Gen 1 x 1 by default so don't bother
11097 * checking them.
11098 */
11099 if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
11100 char speed[PCI_SPEED_SIZE] = "Unknown";
11101 char width[PCI_WIDTH_SIZE] = "Unknown";
Catherine Sullivand4dfb812013-11-28 06:39:21 +000011102
Anjali Singhai Jain3fced532015-09-03 17:18:59 -040011103 /* Get the negotiated link width and speed from PCI config
11104 * space
11105 */
11106 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11107 &link_status);
Catherine Sullivand4dfb812013-11-28 06:39:21 +000011108
Anjali Singhai Jain3fced532015-09-03 17:18:59 -040011109 i40e_set_pci_config_data(hw, link_status);
Catherine Sullivand4dfb812013-11-28 06:39:21 +000011110
Anjali Singhai Jain3fced532015-09-03 17:18:59 -040011111 switch (hw->bus.speed) {
11112 case i40e_bus_speed_8000:
11113 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11114 case i40e_bus_speed_5000:
11115 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11116 case i40e_bus_speed_2500:
11117 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11118 default:
11119 break;
11120 }
11121 switch (hw->bus.width) {
11122 case i40e_bus_width_pcie_x8:
11123 strncpy(width, "8", PCI_WIDTH_SIZE); break;
11124 case i40e_bus_width_pcie_x4:
11125 strncpy(width, "4", PCI_WIDTH_SIZE); break;
11126 case i40e_bus_width_pcie_x2:
11127 strncpy(width, "2", PCI_WIDTH_SIZE); break;
11128 case i40e_bus_width_pcie_x1:
11129 strncpy(width, "1", PCI_WIDTH_SIZE); break;
11130 default:
11131 break;
11132 }
11133
11134 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11135 speed, width);
11136
11137 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11138 hw->bus.speed < i40e_bus_speed_8000) {
11139 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11140 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11141 }
Catherine Sullivand4dfb812013-11-28 06:39:21 +000011142 }
11143
Catherine Sullivane8278452015-02-06 08:52:08 +000011144 /* get the requested speeds from the fw */
11145 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11146 if (err)
Neerav Parikh8279e492015-09-03 17:18:50 -040011147 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
11148 i40e_stat_str(&pf->hw, err),
11149 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Catherine Sullivane8278452015-02-06 08:52:08 +000011150 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11151
Catherine Sullivanfc72dbc2015-09-01 11:36:30 -040011152 /* get the supported phy types from the fw */
11153 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11154 if (err)
11155 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
11156 i40e_stat_str(&pf->hw, err),
11157 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11158 pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
11159
Anjali Singhai Jaine7358f52015-10-01 14:37:34 -040011160 /* Add a filter to drop all Flow control frames from any VSI from being
11161 * transmitted. By doing so we stop a malicious VF from sending out
11162 * PAUSE or PFC frames and potentially controlling traffic for other
11163 * PF/VF VSIs.
11164 * The FW can still send Flow control frames if enabled.
11165 */
11166 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11167 pf->main_vsi_seid);
11168
Carolyn Wyborny31b606d2016-02-17 16:12:12 -080011169 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11170 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11171 pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
11172
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000011173 /* print a string summarizing features */
11174 i40e_print_features(pf);
11175
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011176 return 0;
11177
11178 /* Unwind what we've done if something failed in the setup */
11179err_vsis:
11180 set_bit(__I40E_DOWN, &pf->state);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011181 i40e_clear_interrupt_scheme(pf);
11182 kfree(pf->vsi);
Shannon Nelson04b03012013-11-28 06:39:34 +000011183err_switch_setup:
11184 i40e_reset_interrupt_capability(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011185 del_timer_sync(&pf->service_timer);
11186err_mac_addr:
11187err_configure_lan_hmc:
11188 (void)i40e_shutdown_lan_hmc(hw);
11189err_init_lan_hmc:
11190 kfree(pf->qp_pile);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011191err_sw_init:
11192err_adminq_setup:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011193err_pf_reset:
11194 iounmap(hw->hw_addr);
11195err_ioremap:
11196 kfree(pf);
11197err_pf_alloc:
11198 pci_disable_pcie_error_reporting(pdev);
Johannes Thumshirn56d766d2016-06-07 09:44:05 +020011199 pci_release_mem_regions(pdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011200err_pci_reg:
11201err_dma:
11202 pci_disable_device(pdev);
11203 return err;
11204}
11205
11206/**
11207 * i40e_remove - Device removal routine
11208 * @pdev: PCI device information struct
11209 *
11210 * i40e_remove is called by the PCI subsystem to alert the driver
11211 * that is should release a PCI device. This could be caused by a
11212 * Hot-Plug event, or because the driver is going to be removed from
11213 * memory.
11214 **/
11215static void i40e_remove(struct pci_dev *pdev)
11216{
11217 struct i40e_pf *pf = pci_get_drvdata(pdev);
Carolyn Wybornybcab2db2015-09-28 14:16:55 -040011218 struct i40e_hw *hw = &pf->hw;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011219 i40e_status ret_code;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011220 int i;
11221
11222 i40e_dbg_pf_exit(pf);
11223
Jacob Kellerbeb0dff2014-01-11 05:43:19 +000011224 i40e_ptp_stop(pf);
11225
Carolyn Wybornybcab2db2015-09-28 14:16:55 -040011226 /* Disable RSS in hw */
Shannon Nelson272cdaf22016-02-17 16:12:21 -080011227 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11228 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
Carolyn Wybornybcab2db2015-09-28 14:16:55 -040011229
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011230 /* no more scheduling of any task */
Pandi Kumar Maharajana4618ec2016-02-18 09:19:25 -080011231 set_bit(__I40E_SUSPENDED, &pf->state);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011232 set_bit(__I40E_DOWN, &pf->state);
Shannon Nelsonc99abb42016-03-10 14:59:45 -080011233 if (pf->service_timer.data)
11234 del_timer_sync(&pf->service_timer);
11235 if (pf->service_task.func)
11236 cancel_work_sync(&pf->service_task);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011237
Mitch Williamseb2d80b2014-02-13 03:48:48 -080011238 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11239 i40e_free_vfs(pf);
11240 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11241 }
11242
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011243 i40e_fdir_teardown(pf);
11244
11245 /* If there is a switch structure or any orphans, remove them.
11246 * This will leave only the PF's VSI remaining.
11247 */
11248 for (i = 0; i < I40E_MAX_VEB; i++) {
11249 if (!pf->veb[i])
11250 continue;
11251
11252 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11253 pf->veb[i]->uplink_seid == 0)
11254 i40e_switch_branch_release(pf->veb[i]);
11255 }
11256
11257 /* Now we can shutdown the PF's VSI, just before we kill
11258 * adminq and hmc.
11259 */
11260 if (pf->vsi[pf->lan_vsi])
11261 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11262
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -060011263 /* remove attached clients */
11264 ret_code = i40e_lan_del_device(pf);
11265 if (ret_code) {
11266 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11267 ret_code);
11268 }
11269
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011270 /* shutdown and destroy the HMC */
Jesse Brandeburgf734dff2016-01-15 14:33:11 -080011271 if (hw->hmc.hmc_obj) {
11272 ret_code = i40e_shutdown_lan_hmc(hw);
Shannon Nelson60442de2014-04-23 04:50:13 +000011273 if (ret_code)
11274 dev_warn(&pdev->dev,
11275 "Failed to destroy the HMC resources: %d\n",
11276 ret_code);
11277 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011278
11279 /* shutdown the adminq */
Henry Tiemanac9c5c62016-09-06 18:05:11 -070011280 i40e_shutdown_adminq(hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011281
Jesse Brandeburg8ddb3322015-11-18 15:47:06 -080011282 /* destroy the locks only once, here */
11283 mutex_destroy(&hw->aq.arq_mutex);
11284 mutex_destroy(&hw->aq.asq_mutex);
11285
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011286 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11287 i40e_clear_interrupt_scheme(pf);
Mitch Williams505682c2014-05-20 08:01:37 +000011288 for (i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011289 if (pf->vsi[i]) {
11290 i40e_vsi_clear_rings(pf->vsi[i]);
11291 i40e_vsi_clear(pf->vsi[i]);
11292 pf->vsi[i] = NULL;
11293 }
11294 }
11295
11296 for (i = 0; i < I40E_MAX_VEB; i++) {
11297 kfree(pf->veb[i]);
11298 pf->veb[i] = NULL;
11299 }
11300
11301 kfree(pf->qp_pile);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011302 kfree(pf->vsi);
11303
Jesse Brandeburgf734dff2016-01-15 14:33:11 -080011304 iounmap(hw->hw_addr);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011305 kfree(pf);
Johannes Thumshirn56d766d2016-06-07 09:44:05 +020011306 pci_release_mem_regions(pdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011307
11308 pci_disable_pcie_error_reporting(pdev);
11309 pci_disable_device(pdev);
11310}
11311
11312/**
11313 * i40e_pci_error_detected - warning that something funky happened in PCI land
11314 * @pdev: PCI device information struct
11315 *
11316 * Called to warn that something happened and the error handling steps
11317 * are in progress. Allows the driver to quiesce things, be ready for
11318 * remediation.
11319 **/
11320static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11321 enum pci_channel_state error)
11322{
11323 struct i40e_pf *pf = pci_get_drvdata(pdev);
11324
11325 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11326
Guilherme G Piccoliedfc23ee2016-10-03 00:31:12 -070011327 if (!pf) {
11328 dev_info(&pdev->dev,
11329 "Cannot recover - error happened during device probe\n");
11330 return PCI_ERS_RESULT_DISCONNECT;
11331 }
11332
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011333 /* shutdown all operations */
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011334 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
11335 rtnl_lock();
11336 i40e_prep_for_reset(pf);
11337 rtnl_unlock();
11338 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011339
11340 /* Request a slot reset */
11341 return PCI_ERS_RESULT_NEED_RESET;
11342}
11343
11344/**
11345 * i40e_pci_error_slot_reset - a PCI slot reset just happened
11346 * @pdev: PCI device information struct
11347 *
11348 * Called to find if the driver can work with the device now that
11349 * the pci slot has been reset. If a basic connection seems good
11350 * (registers are readable and have sane content) then return a
11351 * happy little PCI_ERS_RESULT_xxx.
11352 **/
11353static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11354{
11355 struct i40e_pf *pf = pci_get_drvdata(pdev);
11356 pci_ers_result_t result;
11357 int err;
11358 u32 reg;
11359
Shannon Nelsonfb43201f2015-08-26 15:14:17 -040011360 dev_dbg(&pdev->dev, "%s\n", __func__);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011361 if (pci_enable_device_mem(pdev)) {
11362 dev_info(&pdev->dev,
11363 "Cannot re-enable PCI device after reset.\n");
11364 result = PCI_ERS_RESULT_DISCONNECT;
11365 } else {
11366 pci_set_master(pdev);
11367 pci_restore_state(pdev);
11368 pci_save_state(pdev);
11369 pci_wake_from_d3(pdev, false);
11370
11371 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11372 if (reg == 0)
11373 result = PCI_ERS_RESULT_RECOVERED;
11374 else
11375 result = PCI_ERS_RESULT_DISCONNECT;
11376 }
11377
11378 err = pci_cleanup_aer_uncorrect_error_status(pdev);
11379 if (err) {
11380 dev_info(&pdev->dev,
11381 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11382 err);
11383 /* non-fatal, continue */
11384 }
11385
11386 return result;
11387}
11388
11389/**
11390 * i40e_pci_error_resume - restart operations after PCI error recovery
11391 * @pdev: PCI device information struct
11392 *
11393 * Called to allow the driver to bring things back up after PCI error
11394 * and/or reset recovery has finished.
11395 **/
11396static void i40e_pci_error_resume(struct pci_dev *pdev)
11397{
11398 struct i40e_pf *pf = pci_get_drvdata(pdev);
11399
Shannon Nelsonfb43201f2015-08-26 15:14:17 -040011400 dev_dbg(&pdev->dev, "%s\n", __func__);
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011401 if (test_bit(__I40E_SUSPENDED, &pf->state))
11402 return;
11403
11404 rtnl_lock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011405 i40e_handle_reset_warning(pf);
Vasily Averin4c4935a2015-07-08 15:04:26 +030011406 rtnl_unlock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011407}
11408
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011409/**
11410 * i40e_shutdown - PCI callback for shutting down
11411 * @pdev: PCI device information struct
11412 **/
11413static void i40e_shutdown(struct pci_dev *pdev)
11414{
11415 struct i40e_pf *pf = pci_get_drvdata(pdev);
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011416 struct i40e_hw *hw = &pf->hw;
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011417
11418 set_bit(__I40E_SUSPENDED, &pf->state);
11419 set_bit(__I40E_DOWN, &pf->state);
11420 rtnl_lock();
11421 i40e_prep_for_reset(pf);
11422 rtnl_unlock();
11423
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011424 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11425 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11426
Catherine Sullivan02b42492015-07-10 19:35:59 -040011427 del_timer_sync(&pf->service_timer);
11428 cancel_work_sync(&pf->service_task);
11429 i40e_fdir_teardown(pf);
11430
11431 rtnl_lock();
11432 i40e_prep_for_reset(pf);
11433 rtnl_unlock();
11434
11435 wr32(hw, I40E_PFPM_APM,
11436 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11437 wr32(hw, I40E_PFPM_WUFC,
11438 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11439
Shannon Nelsone1477582015-02-21 06:44:33 +000011440 i40e_clear_interrupt_scheme(pf);
11441
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011442 if (system_state == SYSTEM_POWER_OFF) {
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011443 pci_wake_from_d3(pdev, pf->wol_en);
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011444 pci_set_power_state(pdev, PCI_D3hot);
11445 }
11446}
11447
11448#ifdef CONFIG_PM
11449/**
11450 * i40e_suspend - PCI callback for moving to D3
11451 * @pdev: PCI device information struct
11452 **/
11453static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11454{
11455 struct i40e_pf *pf = pci_get_drvdata(pdev);
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011456 struct i40e_hw *hw = &pf->hw;
Greg Rose059ff692016-05-16 10:26:38 -070011457 int retval = 0;
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011458
11459 set_bit(__I40E_SUSPENDED, &pf->state);
11460 set_bit(__I40E_DOWN, &pf->state);
Mitch Williams3932dbf2015-03-31 00:45:04 -070011461
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011462 rtnl_lock();
11463 i40e_prep_for_reset(pf);
11464 rtnl_unlock();
11465
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011466 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11467 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11468
Greg Roseb33d3b72016-05-16 10:26:37 -070011469 i40e_stop_misc_vector(pf);
11470
Greg Rose059ff692016-05-16 10:26:38 -070011471 retval = pci_save_state(pdev);
11472 if (retval)
11473 return retval;
11474
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011475 pci_wake_from_d3(pdev, pf->wol_en);
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011476 pci_set_power_state(pdev, PCI_D3hot);
11477
Greg Rose059ff692016-05-16 10:26:38 -070011478 return retval;
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011479}
11480
11481/**
11482 * i40e_resume - PCI callback for waking up from D3
11483 * @pdev: PCI device information struct
11484 **/
11485static int i40e_resume(struct pci_dev *pdev)
11486{
11487 struct i40e_pf *pf = pci_get_drvdata(pdev);
11488 u32 err;
11489
11490 pci_set_power_state(pdev, PCI_D0);
11491 pci_restore_state(pdev);
11492 /* pci_restore_state() clears dev->state_saves, so
11493 * call pci_save_state() again to restore it.
11494 */
11495 pci_save_state(pdev);
11496
11497 err = pci_enable_device_mem(pdev);
11498 if (err) {
Shannon Nelsonfb43201f2015-08-26 15:14:17 -040011499 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011500 return err;
11501 }
11502 pci_set_master(pdev);
11503
11504 /* no wakeup events while running */
11505 pci_wake_from_d3(pdev, false);
11506
11507 /* handling the reset will rebuild the device state */
11508 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
11509 clear_bit(__I40E_DOWN, &pf->state);
11510 rtnl_lock();
11511 i40e_reset_and_rebuild(pf, false);
11512 rtnl_unlock();
11513 }
11514
11515 return 0;
11516}
11517
11518#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011519static const struct pci_error_handlers i40e_err_handler = {
11520 .error_detected = i40e_pci_error_detected,
11521 .slot_reset = i40e_pci_error_slot_reset,
11522 .resume = i40e_pci_error_resume,
11523};
11524
11525static struct pci_driver i40e_driver = {
11526 .name = i40e_driver_name,
11527 .id_table = i40e_pci_tbl,
11528 .probe = i40e_probe,
11529 .remove = i40e_remove,
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011530#ifdef CONFIG_PM
11531 .suspend = i40e_suspend,
11532 .resume = i40e_resume,
11533#endif
11534 .shutdown = i40e_shutdown,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011535 .err_handler = &i40e_err_handler,
11536 .sriov_configure = i40e_pci_sriov_configure,
11537};
11538
11539/**
11540 * i40e_init_module - Driver registration routine
11541 *
11542 * i40e_init_module is the first routine called when the driver is
11543 * loaded. All it does is register with the PCI subsystem.
11544 **/
11545static int __init i40e_init_module(void)
11546{
11547 pr_info("%s: %s - version %s\n", i40e_driver_name,
11548 i40e_driver_string, i40e_driver_version_str);
11549 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
Greg Rose96664482015-02-06 08:52:13 +000011550
Jesse Brandeburg2803b162015-12-22 14:25:08 -080011551 /* we will see if single thread per module is enough for now,
11552 * it can't be any worse than using the system workqueue which
11553 * was already single threaded
11554 */
Jacob Keller6992a6c2016-08-04 11:37:01 -070011555 i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
11556 i40e_driver_name);
Jesse Brandeburg2803b162015-12-22 14:25:08 -080011557 if (!i40e_wq) {
11558 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
11559 return -ENOMEM;
11560 }
11561
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011562 i40e_dbg_init();
11563 return pci_register_driver(&i40e_driver);
11564}
11565module_init(i40e_init_module);
11566
11567/**
11568 * i40e_exit_module - Driver exit cleanup routine
11569 *
11570 * i40e_exit_module is called just before the driver is removed
11571 * from memory.
11572 **/
11573static void __exit i40e_exit_module(void)
11574{
11575 pci_unregister_driver(&i40e_driver);
Jesse Brandeburg2803b162015-12-22 14:25:08 -080011576 destroy_workqueue(i40e_wq);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011577 i40e_dbg_exit();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011578}
11579module_exit(i40e_exit_module);