blob: c63dc01537657f04c4bb7acf99a672782e9a122d [file] [log] [blame]
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Jesse Brandeburg2818ccd2016-01-13 16:51:38 -08004 * Copyright(c) 2013 - 2016 Intel Corporation.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050027#include <linux/etherdevice.h>
28#include <linux/of_net.h>
29#include <linux/pci.h>
30
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000031/* Local includes */
32#include "i40e.h"
Shannon Nelson4eb3f762014-03-06 08:59:58 +000033#include "i40e_diag.h"
Alexander Duyck06a5f7f2016-06-16 12:22:06 -070034#include <net/udp_tunnel.h>
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000035
36const char i40e_driver_name[] = "i40e";
37static const char i40e_driver_string[] =
38 "Intel(R) Ethernet Connection XL710 Network Driver";
39
40#define DRV_KERN "-k"
41
Catherine Sullivane8e724d2014-07-10 07:58:26 +000042#define DRV_VERSION_MAJOR 1
Bimmy Pujari07061952016-05-16 10:26:45 -070043#define DRV_VERSION_MINOR 6
Bimmy Pujaricf465fe2016-09-27 11:28:54 -070044#define DRV_VERSION_BUILD 21
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000045#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
46 __stringify(DRV_VERSION_MINOR) "." \
47 __stringify(DRV_VERSION_BUILD) DRV_KERN
48const char i40e_driver_version_str[] = DRV_VERSION;
Jesse Brandeburg8fb905b2014-01-17 15:36:33 -080049static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000050
51/* a bit of forward declarations */
52static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
53static void i40e_handle_reset_warning(struct i40e_pf *pf);
54static int i40e_add_vsi(struct i40e_vsi *vsi);
55static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000056static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000057static int i40e_setup_misc_vector(struct i40e_pf *pf);
58static void i40e_determine_queue_usage(struct i40e_pf *pf);
59static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080060static void i40e_fdir_sb_setup(struct i40e_pf *pf);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -080061static int i40e_veb_get_bw_info(struct i40e_veb *veb);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000062
63/* i40e_pci_tbl - PCI Device ID Table
64 *
65 * Last entry must be all 0s
66 *
67 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
68 * Class, Class Mask, private data (not used) }
69 */
Benoit Taine9baa3c32014-08-08 15:56:03 +020070static const struct pci_device_id i40e_pci_tbl[] = {
Shannon Nelsonab600852014-01-17 15:36:39 -080071 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
Shannon Nelsonab600852014-01-17 15:36:39 -080072 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
Shannon Nelsonab600852014-01-17 15:36:39 -080073 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
Shannon Nelsonab600852014-01-17 15:36:39 -080075 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
Mitch Williams5960d332014-09-13 07:40:47 +000078 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
Shannon Nelsonbc5166b92015-08-26 15:14:10 -040079 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
Jesse Brandeburgae24b402015-03-27 00:12:09 -070080 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
Anjali Singhai Jain35dae512015-12-22 14:25:03 -080081 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
Anjali Singhai Jain87e6c1d2015-06-05 12:20:25 -040083 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
Catherine Sullivand6bf58c2016-03-18 12:18:08 -070086 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
Shannon Nelson48a3b512015-07-23 16:54:39 -040087 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000089 /* required last entry */
90 {0, }
91};
92MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
93
94#define I40E_MAX_VF_COUNT 128
95static int debug = -1;
Alexander Duyck5d4ca232016-09-30 08:21:46 -040096module_param(debug, uint, 0);
97MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000098
99MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
100MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
101MODULE_LICENSE("GPL");
102MODULE_VERSION(DRV_VERSION);
103
Jesse Brandeburg2803b162015-12-22 14:25:08 -0800104static struct workqueue_struct *i40e_wq;
105
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000106/**
107 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
108 * @hw: pointer to the HW structure
109 * @mem: ptr to mem struct to fill out
110 * @size: size of memory requested
111 * @alignment: what to align the allocation to
112 **/
113int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
114 u64 size, u32 alignment)
115{
116 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
117
118 mem->size = ALIGN(size, alignment);
119 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
120 &mem->pa, GFP_KERNEL);
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000121 if (!mem->va)
122 return -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000123
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000124 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000125}
126
127/**
128 * i40e_free_dma_mem_d - OS specific memory free for shared code
129 * @hw: pointer to the HW structure
130 * @mem: ptr to mem struct to free
131 **/
132int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
133{
134 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
135
136 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
137 mem->va = NULL;
138 mem->pa = 0;
139 mem->size = 0;
140
141 return 0;
142}
143
144/**
145 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
146 * @hw: pointer to the HW structure
147 * @mem: ptr to mem struct to fill out
148 * @size: size of memory requested
149 **/
150int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
151 u32 size)
152{
153 mem->size = size;
154 mem->va = kzalloc(size, GFP_KERNEL);
155
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000156 if (!mem->va)
157 return -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000158
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000159 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000160}
161
162/**
163 * i40e_free_virt_mem_d - OS specific memory free for shared code
164 * @hw: pointer to the HW structure
165 * @mem: ptr to mem struct to free
166 **/
167int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
168{
169 /* it's ok to kfree a NULL pointer */
170 kfree(mem->va);
171 mem->va = NULL;
172 mem->size = 0;
173
174 return 0;
175}
176
177/**
178 * i40e_get_lump - find a lump of free generic resource
179 * @pf: board private structure
180 * @pile: the pile of resource to search
181 * @needed: the number of items needed
182 * @id: an owner id to stick on the items assigned
183 *
184 * Returns the base item index of the lump, or negative for error
185 *
186 * The search_hint trick and lack of advanced fit-finding only work
187 * because we're highly likely to have all the same size lump requests.
188 * Linear search time and any fragmentation should be minimal.
189 **/
190static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
191 u16 needed, u16 id)
192{
193 int ret = -ENOMEM;
Jesse Brandeburgddf434a2013-09-13 08:23:19 +0000194 int i, j;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000195
196 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
197 dev_info(&pf->pdev->dev,
198 "param err: pile=%p needed=%d id=0x%04x\n",
199 pile, needed, id);
200 return -EINVAL;
201 }
202
203 /* start the linear search with an imperfect hint */
204 i = pile->search_hint;
Jesse Brandeburgddf434a2013-09-13 08:23:19 +0000205 while (i < pile->num_entries) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000206 /* skip already allocated entries */
207 if (pile->list[i] & I40E_PILE_VALID_BIT) {
208 i++;
209 continue;
210 }
211
212 /* do we have enough in this lump? */
213 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
214 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
215 break;
216 }
217
218 if (j == needed) {
219 /* there was enough, so assign it to the requestor */
220 for (j = 0; j < needed; j++)
221 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
222 ret = i;
223 pile->search_hint = i + j;
Jesse Brandeburgddf434a2013-09-13 08:23:19 +0000224 break;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000225 }
Jesse Brandeburg6995b362015-08-28 17:55:54 -0400226
227 /* not enough, so skip over it and continue looking */
228 i += j;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000229 }
230
231 return ret;
232}
233
234/**
235 * i40e_put_lump - return a lump of generic resource
236 * @pile: the pile of resource to search
237 * @index: the base item index
238 * @id: the owner id of the items assigned
239 *
240 * Returns the count of items in the lump
241 **/
242static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
243{
244 int valid_id = (id | I40E_PILE_VALID_BIT);
245 int count = 0;
246 int i;
247
248 if (!pile || index >= pile->num_entries)
249 return -EINVAL;
250
251 for (i = index;
252 i < pile->num_entries && pile->list[i] == valid_id;
253 i++) {
254 pile->list[i] = 0;
255 count++;
256 }
257
258 if (count && index < pile->search_hint)
259 pile->search_hint = index;
260
261 return count;
262}
263
264/**
Anjali Singhai Jainfdf0e0b2015-03-31 00:45:05 -0700265 * i40e_find_vsi_from_id - searches for the vsi with the given id
266 * @pf - the pf structure to search for the vsi
267 * @id - id of the vsi it is searching for
268 **/
269struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
270{
271 int i;
272
273 for (i = 0; i < pf->num_alloc_vsi; i++)
274 if (pf->vsi[i] && (pf->vsi[i]->id == id))
275 return pf->vsi[i];
276
277 return NULL;
278}
279
280/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000281 * i40e_service_event_schedule - Schedule the service task to wake up
282 * @pf: board private structure
283 *
284 * If not already scheduled, this puts the task into the work queue
285 **/
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -0600286void i40e_service_event_schedule(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000287{
288 if (!test_bit(__I40E_DOWN, &pf->state) &&
289 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
290 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
Jesse Brandeburg2803b162015-12-22 14:25:08 -0800291 queue_work(i40e_wq, &pf->service_task);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000292}
293
294/**
295 * i40e_tx_timeout - Respond to a Tx Hang
296 * @netdev: network interface device structure
297 *
298 * If any port has noticed a Tx timeout, it is likely that the whole
299 * device is munged, not just the one netdev port, so go for the full
300 * reset.
301 **/
Vasu Dev38e00432014-08-01 13:27:03 -0700302#ifdef I40E_FCOE
303void i40e_tx_timeout(struct net_device *netdev)
304#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000305static void i40e_tx_timeout(struct net_device *netdev)
Vasu Dev38e00432014-08-01 13:27:03 -0700306#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000307{
308 struct i40e_netdev_priv *np = netdev_priv(netdev);
309 struct i40e_vsi *vsi = np->vsi;
310 struct i40e_pf *pf = vsi->back;
Kiran Patilb03a8c12015-09-24 18:13:15 -0400311 struct i40e_ring *tx_ring = NULL;
312 unsigned int i, hung_queue = 0;
313 u32 head, val;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000314
315 pf->tx_timeout_count++;
316
Kiran Patilb03a8c12015-09-24 18:13:15 -0400317 /* find the stopped queue the same way the stack does */
318 for (i = 0; i < netdev->num_tx_queues; i++) {
319 struct netdev_queue *q;
320 unsigned long trans_start;
321
322 q = netdev_get_tx_queue(netdev, i);
Florian Westphal9b366272016-05-03 16:33:14 +0200323 trans_start = q->trans_start;
Kiran Patilb03a8c12015-09-24 18:13:15 -0400324 if (netif_xmit_stopped(q) &&
325 time_after(jiffies,
326 (trans_start + netdev->watchdog_timeo))) {
327 hung_queue = i;
328 break;
329 }
330 }
331
332 if (i == netdev->num_tx_queues) {
333 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
334 } else {
335 /* now that we have an index, find the tx_ring struct */
336 for (i = 0; i < vsi->num_queue_pairs; i++) {
337 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
338 if (hung_queue ==
339 vsi->tx_rings[i]->queue_index) {
340 tx_ring = vsi->tx_rings[i];
341 break;
342 }
343 }
344 }
345 }
346
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000347 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
Kiran Patilb03a8c12015-09-24 18:13:15 -0400348 pf->tx_timeout_recovery_level = 1; /* reset after some time */
349 else if (time_before(jiffies,
350 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
351 return; /* don't do any new action before the next timeout */
352
353 if (tx_ring) {
354 head = i40e_get_head(tx_ring);
355 /* Read interrupt register */
356 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
357 val = rd32(&pf->hw,
358 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
359 tx_ring->vsi->base_vector - 1));
360 else
361 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
362
363 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
364 vsi->seid, hung_queue, tx_ring->next_to_clean,
365 head, tx_ring->next_to_use,
366 readl(tx_ring->tail), val);
367 }
368
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000369 pf->tx_timeout_last_recovery = jiffies;
Kiran Patilb03a8c12015-09-24 18:13:15 -0400370 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
371 pf->tx_timeout_recovery_level, hung_queue);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000372
373 switch (pf->tx_timeout_recovery_level) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000374 case 1:
375 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
376 break;
377 case 2:
378 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
379 break;
380 case 3:
381 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
382 break;
383 default:
384 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000385 break;
386 }
Kiran Patilb03a8c12015-09-24 18:13:15 -0400387
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000388 i40e_service_event_schedule(pf);
389 pf->tx_timeout_recovery_level++;
390}
391
392/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000393 * i40e_get_vsi_stats_struct - Get System Network Statistics
394 * @vsi: the VSI we care about
395 *
396 * Returns the address of the device statistics structure.
397 * The statistics are actually updated from the service task.
398 **/
399struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
400{
401 return &vsi->net_stats;
402}
403
404/**
405 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
406 * @netdev: network interface device structure
407 *
408 * Returns the address of the device statistics structure.
409 * The statistics are actually updated from the service task.
410 **/
Vasu Dev38e00432014-08-01 13:27:03 -0700411#ifdef I40E_FCOE
412struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
413 struct net_device *netdev,
414 struct rtnl_link_stats64 *stats)
415#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000416static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
417 struct net_device *netdev,
Alexander Duyck980e9b12013-09-28 06:01:03 +0000418 struct rtnl_link_stats64 *stats)
Vasu Dev38e00432014-08-01 13:27:03 -0700419#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000420{
421 struct i40e_netdev_priv *np = netdev_priv(netdev);
Akeem G Abodunrine7046ee2014-04-09 05:58:58 +0000422 struct i40e_ring *tx_ring, *rx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000423 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000424 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
425 int i;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000426
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +0000427 if (test_bit(__I40E_DOWN, &vsi->state))
428 return stats;
429
Jesse Brandeburg3c325ce2013-12-14 03:26:45 -0800430 if (!vsi->tx_rings)
431 return stats;
432
Alexander Duyck980e9b12013-09-28 06:01:03 +0000433 rcu_read_lock();
434 for (i = 0; i < vsi->num_queue_pairs; i++) {
Alexander Duyck980e9b12013-09-28 06:01:03 +0000435 u64 bytes, packets;
436 unsigned int start;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000437
Alexander Duyck980e9b12013-09-28 06:01:03 +0000438 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
439 if (!tx_ring)
440 continue;
441
442 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700443 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
Alexander Duyck980e9b12013-09-28 06:01:03 +0000444 packets = tx_ring->stats.packets;
445 bytes = tx_ring->stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700446 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
Alexander Duyck980e9b12013-09-28 06:01:03 +0000447
448 stats->tx_packets += packets;
449 stats->tx_bytes += bytes;
450 rx_ring = &tx_ring[1];
451
452 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700453 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
Alexander Duyck980e9b12013-09-28 06:01:03 +0000454 packets = rx_ring->stats.packets;
455 bytes = rx_ring->stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700456 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
Alexander Duyck980e9b12013-09-28 06:01:03 +0000457
458 stats->rx_packets += packets;
459 stats->rx_bytes += bytes;
460 }
461 rcu_read_unlock();
462
Akeem G Abodunrina5282f42014-05-10 04:49:03 +0000463 /* following stats updated by i40e_watchdog_subtask() */
Alexander Duyck980e9b12013-09-28 06:01:03 +0000464 stats->multicast = vsi_stats->multicast;
465 stats->tx_errors = vsi_stats->tx_errors;
466 stats->tx_dropped = vsi_stats->tx_dropped;
467 stats->rx_errors = vsi_stats->rx_errors;
Jesse Brandeburgd8201e22015-07-23 16:54:35 -0400468 stats->rx_dropped = vsi_stats->rx_dropped;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000469 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
470 stats->rx_length_errors = vsi_stats->rx_length_errors;
471
472 return stats;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000473}
474
475/**
476 * i40e_vsi_reset_stats - Resets all stats of the given vsi
477 * @vsi: the VSI to have its stats reset
478 **/
479void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
480{
481 struct rtnl_link_stats64 *ns;
482 int i;
483
484 if (!vsi)
485 return;
486
487 ns = i40e_get_vsi_stats_struct(vsi);
488 memset(ns, 0, sizeof(*ns));
489 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
490 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
491 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
Greg Rose8e9dca52013-12-18 13:45:53 +0000492 if (vsi->rx_rings && vsi->rx_rings[0]) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000493 for (i = 0; i < vsi->num_queue_pairs; i++) {
Jesse Brandeburg6995b362015-08-28 17:55:54 -0400494 memset(&vsi->rx_rings[i]->stats, 0,
Alexander Duyck9f65e152013-09-28 06:00:58 +0000495 sizeof(vsi->rx_rings[i]->stats));
Jesse Brandeburg6995b362015-08-28 17:55:54 -0400496 memset(&vsi->rx_rings[i]->rx_stats, 0,
Alexander Duyck9f65e152013-09-28 06:00:58 +0000497 sizeof(vsi->rx_rings[i]->rx_stats));
Jesse Brandeburg6995b362015-08-28 17:55:54 -0400498 memset(&vsi->tx_rings[i]->stats, 0,
Alexander Duyck9f65e152013-09-28 06:00:58 +0000499 sizeof(vsi->tx_rings[i]->stats));
500 memset(&vsi->tx_rings[i]->tx_stats, 0,
501 sizeof(vsi->tx_rings[i]->tx_stats));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000502 }
Greg Rose8e9dca52013-12-18 13:45:53 +0000503 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000504 vsi->stat_offsets_loaded = false;
505}
506
507/**
Jeff Kirsherb40c82e62015-02-27 09:18:34 +0000508 * i40e_pf_reset_stats - Reset all of the stats for the given PF
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000509 * @pf: the PF to be reset
510 **/
511void i40e_pf_reset_stats(struct i40e_pf *pf)
512{
Shannon Nelsone91fdf72014-06-03 23:50:18 +0000513 int i;
514
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000515 memset(&pf->stats, 0, sizeof(pf->stats));
516 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
517 pf->stat_offsets_loaded = false;
Shannon Nelsone91fdf72014-06-03 23:50:18 +0000518
519 for (i = 0; i < I40E_MAX_VEB; i++) {
520 if (pf->veb[i]) {
521 memset(&pf->veb[i]->stats, 0,
522 sizeof(pf->veb[i]->stats));
523 memset(&pf->veb[i]->stats_offsets, 0,
524 sizeof(pf->veb[i]->stats_offsets));
525 pf->veb[i]->stat_offsets_loaded = false;
526 }
527 }
Catherine Sullivan42bce042016-07-27 12:02:32 -0700528 pf->hw_csum_rx_error = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000529}
530
531/**
532 * i40e_stat_update48 - read and update a 48 bit stat from the chip
533 * @hw: ptr to the hardware info
534 * @hireg: the high 32 bit reg to read
535 * @loreg: the low 32 bit reg to read
536 * @offset_loaded: has the initial offset been loaded yet
537 * @offset: ptr to current offset value
538 * @stat: ptr to the stat
539 *
540 * Since the device stats are not reset at PFReset, they likely will not
541 * be zeroed when the driver starts. We'll save the first values read
542 * and use them as offsets to be subtracted from the raw values in order
543 * to report stats that count from zero. In the process, we also manage
544 * the potential roll-over.
545 **/
546static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
547 bool offset_loaded, u64 *offset, u64 *stat)
548{
549 u64 new_data;
550
Shannon Nelsonab600852014-01-17 15:36:39 -0800551 if (hw->device_id == I40E_DEV_ID_QEMU) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000552 new_data = rd32(hw, loreg);
553 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
554 } else {
555 new_data = rd64(hw, loreg);
556 }
557 if (!offset_loaded)
558 *offset = new_data;
559 if (likely(new_data >= *offset))
560 *stat = new_data - *offset;
561 else
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400562 *stat = (new_data + BIT_ULL(48)) - *offset;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000563 *stat &= 0xFFFFFFFFFFFFULL;
564}
565
566/**
567 * i40e_stat_update32 - read and update a 32 bit stat from the chip
568 * @hw: ptr to the hardware info
569 * @reg: the hw reg to read
570 * @offset_loaded: has the initial offset been loaded yet
571 * @offset: ptr to current offset value
572 * @stat: ptr to the stat
573 **/
574static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
575 bool offset_loaded, u64 *offset, u64 *stat)
576{
577 u32 new_data;
578
579 new_data = rd32(hw, reg);
580 if (!offset_loaded)
581 *offset = new_data;
582 if (likely(new_data >= *offset))
583 *stat = (u32)(new_data - *offset);
584 else
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400585 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000586}
587
588/**
589 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
590 * @vsi: the VSI to be updated
591 **/
592void i40e_update_eth_stats(struct i40e_vsi *vsi)
593{
594 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
595 struct i40e_pf *pf = vsi->back;
596 struct i40e_hw *hw = &pf->hw;
597 struct i40e_eth_stats *oes;
598 struct i40e_eth_stats *es; /* device's eth stats */
599
600 es = &vsi->eth_stats;
601 oes = &vsi->eth_stats_offsets;
602
603 /* Gather up the stats that the hw collects */
604 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
605 vsi->stat_offsets_loaded,
606 &oes->tx_errors, &es->tx_errors);
607 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
608 vsi->stat_offsets_loaded,
609 &oes->rx_discards, &es->rx_discards);
Shannon Nelson41a9e552014-04-23 04:50:20 +0000610 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
611 vsi->stat_offsets_loaded,
612 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
613 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
614 vsi->stat_offsets_loaded,
615 &oes->tx_errors, &es->tx_errors);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000616
617 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
618 I40E_GLV_GORCL(stat_idx),
619 vsi->stat_offsets_loaded,
620 &oes->rx_bytes, &es->rx_bytes);
621 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
622 I40E_GLV_UPRCL(stat_idx),
623 vsi->stat_offsets_loaded,
624 &oes->rx_unicast, &es->rx_unicast);
625 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
626 I40E_GLV_MPRCL(stat_idx),
627 vsi->stat_offsets_loaded,
628 &oes->rx_multicast, &es->rx_multicast);
629 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
630 I40E_GLV_BPRCL(stat_idx),
631 vsi->stat_offsets_loaded,
632 &oes->rx_broadcast, &es->rx_broadcast);
633
634 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
635 I40E_GLV_GOTCL(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->tx_bytes, &es->tx_bytes);
638 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
639 I40E_GLV_UPTCL(stat_idx),
640 vsi->stat_offsets_loaded,
641 &oes->tx_unicast, &es->tx_unicast);
642 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
643 I40E_GLV_MPTCL(stat_idx),
644 vsi->stat_offsets_loaded,
645 &oes->tx_multicast, &es->tx_multicast);
646 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
647 I40E_GLV_BPTCL(stat_idx),
648 vsi->stat_offsets_loaded,
649 &oes->tx_broadcast, &es->tx_broadcast);
650 vsi->stat_offsets_loaded = true;
651}
652
653/**
654 * i40e_update_veb_stats - Update Switch component statistics
655 * @veb: the VEB being updated
656 **/
657static void i40e_update_veb_stats(struct i40e_veb *veb)
658{
659 struct i40e_pf *pf = veb->pf;
660 struct i40e_hw *hw = &pf->hw;
661 struct i40e_eth_stats *oes;
662 struct i40e_eth_stats *es; /* device's eth stats */
Neerav Parikhfe860af2015-07-10 19:36:02 -0400663 struct i40e_veb_tc_stats *veb_oes;
664 struct i40e_veb_tc_stats *veb_es;
665 int i, idx = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000666
667 idx = veb->stats_idx;
668 es = &veb->stats;
669 oes = &veb->stats_offsets;
Neerav Parikhfe860af2015-07-10 19:36:02 -0400670 veb_es = &veb->tc_stats;
671 veb_oes = &veb->tc_stats_offsets;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000672
673 /* Gather up the stats that the hw collects */
674 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
675 veb->stat_offsets_loaded,
676 &oes->tx_discards, &es->tx_discards);
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +0000677 if (hw->revision_id > 0)
678 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
679 veb->stat_offsets_loaded,
680 &oes->rx_unknown_protocol,
681 &es->rx_unknown_protocol);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000682 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
683 veb->stat_offsets_loaded,
684 &oes->rx_bytes, &es->rx_bytes);
685 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
686 veb->stat_offsets_loaded,
687 &oes->rx_unicast, &es->rx_unicast);
688 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
689 veb->stat_offsets_loaded,
690 &oes->rx_multicast, &es->rx_multicast);
691 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
692 veb->stat_offsets_loaded,
693 &oes->rx_broadcast, &es->rx_broadcast);
694
695 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
696 veb->stat_offsets_loaded,
697 &oes->tx_bytes, &es->tx_bytes);
698 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
699 veb->stat_offsets_loaded,
700 &oes->tx_unicast, &es->tx_unicast);
701 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
702 veb->stat_offsets_loaded,
703 &oes->tx_multicast, &es->tx_multicast);
704 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
705 veb->stat_offsets_loaded,
706 &oes->tx_broadcast, &es->tx_broadcast);
Neerav Parikhfe860af2015-07-10 19:36:02 -0400707 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
708 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
709 I40E_GLVEBTC_RPCL(i, idx),
710 veb->stat_offsets_loaded,
711 &veb_oes->tc_rx_packets[i],
712 &veb_es->tc_rx_packets[i]);
713 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
714 I40E_GLVEBTC_RBCL(i, idx),
715 veb->stat_offsets_loaded,
716 &veb_oes->tc_rx_bytes[i],
717 &veb_es->tc_rx_bytes[i]);
718 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
719 I40E_GLVEBTC_TPCL(i, idx),
720 veb->stat_offsets_loaded,
721 &veb_oes->tc_tx_packets[i],
722 &veb_es->tc_tx_packets[i]);
723 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
724 I40E_GLVEBTC_TBCL(i, idx),
725 veb->stat_offsets_loaded,
726 &veb_oes->tc_tx_bytes[i],
727 &veb_es->tc_tx_bytes[i]);
728 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000729 veb->stat_offsets_loaded = true;
730}
731
Vasu Dev38e00432014-08-01 13:27:03 -0700732#ifdef I40E_FCOE
733/**
734 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
735 * @vsi: the VSI that is capable of doing FCoE
736 **/
737static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
738{
739 struct i40e_pf *pf = vsi->back;
740 struct i40e_hw *hw = &pf->hw;
741 struct i40e_fcoe_stats *ofs;
742 struct i40e_fcoe_stats *fs; /* device's eth stats */
743 int idx;
744
745 if (vsi->type != I40E_VSI_FCOE)
746 return;
747
Kiran Patil4147e2c2016-01-15 14:33:14 -0800748 idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
Vasu Dev38e00432014-08-01 13:27:03 -0700749 fs = &vsi->fcoe_stats;
750 ofs = &vsi->fcoe_stats_offsets;
751
752 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
753 vsi->fcoe_stat_offsets_loaded,
754 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
755 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
756 vsi->fcoe_stat_offsets_loaded,
757 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
758 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
759 vsi->fcoe_stat_offsets_loaded,
760 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
761 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
762 vsi->fcoe_stat_offsets_loaded,
763 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
764 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
765 vsi->fcoe_stat_offsets_loaded,
766 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
767 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
768 vsi->fcoe_stat_offsets_loaded,
769 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
770 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
771 vsi->fcoe_stat_offsets_loaded,
772 &ofs->fcoe_last_error, &fs->fcoe_last_error);
773 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
774 vsi->fcoe_stat_offsets_loaded,
775 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
776
777 vsi->fcoe_stat_offsets_loaded = true;
778}
779
780#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000781/**
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000782 * i40e_update_vsi_stats - Update the vsi statistics counters.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000783 * @vsi: the VSI to be updated
784 *
785 * There are a few instances where we store the same stat in a
786 * couple of different structs. This is partly because we have
787 * the netdev stats that need to be filled out, which is slightly
788 * different from the "eth_stats" defined by the chip and used in
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000789 * VF communications. We sort it out here.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000790 **/
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000791static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000792{
793 struct i40e_pf *pf = vsi->back;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000794 struct rtnl_link_stats64 *ons;
795 struct rtnl_link_stats64 *ns; /* netdev stats */
796 struct i40e_eth_stats *oes;
797 struct i40e_eth_stats *es; /* device's eth stats */
798 u32 tx_restart, tx_busy;
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800799 u64 tx_lost_interrupt;
Akeem G Abodunrinbf00b372014-10-17 03:14:39 +0000800 struct i40e_ring *p;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000801 u32 rx_page, rx_buf;
Akeem G Abodunrinbf00b372014-10-17 03:14:39 +0000802 u64 bytes, packets;
803 unsigned int start;
Anjali Singhai Jain2fc3d712015-08-27 11:42:29 -0400804 u64 tx_linearize;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -0400805 u64 tx_force_wb;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000806 u64 rx_p, rx_b;
807 u64 tx_p, tx_b;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000808 u16 q;
809
810 if (test_bit(__I40E_DOWN, &vsi->state) ||
811 test_bit(__I40E_CONFIG_BUSY, &pf->state))
812 return;
813
814 ns = i40e_get_vsi_stats_struct(vsi);
815 ons = &vsi->net_stats_offsets;
816 es = &vsi->eth_stats;
817 oes = &vsi->eth_stats_offsets;
818
819 /* Gather up the netdev and vsi stats that the driver collects
820 * on the fly during packet processing
821 */
822 rx_b = rx_p = 0;
823 tx_b = tx_p = 0;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -0400824 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800825 tx_lost_interrupt = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000826 rx_page = 0;
827 rx_buf = 0;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000828 rcu_read_lock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000829 for (q = 0; q < vsi->num_queue_pairs; q++) {
Alexander Duyck980e9b12013-09-28 06:01:03 +0000830 /* locate Tx ring */
831 p = ACCESS_ONCE(vsi->tx_rings[q]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000832
Alexander Duyck980e9b12013-09-28 06:01:03 +0000833 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700834 start = u64_stats_fetch_begin_irq(&p->syncp);
Alexander Duyck980e9b12013-09-28 06:01:03 +0000835 packets = p->stats.packets;
836 bytes = p->stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700837 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
Alexander Duyck980e9b12013-09-28 06:01:03 +0000838 tx_b += bytes;
839 tx_p += packets;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000840 tx_restart += p->tx_stats.restart_queue;
841 tx_busy += p->tx_stats.tx_busy;
Anjali Singhai Jain2fc3d712015-08-27 11:42:29 -0400842 tx_linearize += p->tx_stats.tx_linearize;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -0400843 tx_force_wb += p->tx_stats.tx_force_wb;
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800844 tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000845
846 /* Rx queue is part of the same block as Tx queue */
847 p = &p[1];
848 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700849 start = u64_stats_fetch_begin_irq(&p->syncp);
Alexander Duyck980e9b12013-09-28 06:01:03 +0000850 packets = p->stats.packets;
851 bytes = p->stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700852 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
Alexander Duyck980e9b12013-09-28 06:01:03 +0000853 rx_b += bytes;
854 rx_p += packets;
Mitch Williams420136c2013-12-18 13:45:59 +0000855 rx_buf += p->rx_stats.alloc_buff_failed;
856 rx_page += p->rx_stats.alloc_page_failed;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000857 }
Alexander Duyck980e9b12013-09-28 06:01:03 +0000858 rcu_read_unlock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000859 vsi->tx_restart = tx_restart;
860 vsi->tx_busy = tx_busy;
Anjali Singhai Jain2fc3d712015-08-27 11:42:29 -0400861 vsi->tx_linearize = tx_linearize;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -0400862 vsi->tx_force_wb = tx_force_wb;
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800863 vsi->tx_lost_interrupt = tx_lost_interrupt;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000864 vsi->rx_page_failed = rx_page;
865 vsi->rx_buf_failed = rx_buf;
866
867 ns->rx_packets = rx_p;
868 ns->rx_bytes = rx_b;
869 ns->tx_packets = tx_p;
870 ns->tx_bytes = tx_b;
871
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000872 /* update netdev stats from eth stats */
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000873 i40e_update_eth_stats(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000874 ons->tx_errors = oes->tx_errors;
875 ns->tx_errors = es->tx_errors;
876 ons->multicast = oes->rx_multicast;
877 ns->multicast = es->rx_multicast;
Shannon Nelson41a9e552014-04-23 04:50:20 +0000878 ons->rx_dropped = oes->rx_discards;
879 ns->rx_dropped = es->rx_discards;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000880 ons->tx_dropped = oes->tx_discards;
881 ns->tx_dropped = es->tx_discards;
882
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000883 /* pull in a couple PF stats if this is the main vsi */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000884 if (vsi == pf->vsi[pf->lan_vsi]) {
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000885 ns->rx_crc_errors = pf->stats.crc_errors;
886 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
887 ns->rx_length_errors = pf->stats.rx_length_errors;
888 }
889}
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000890
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000891/**
Jeff Kirsherb40c82e62015-02-27 09:18:34 +0000892 * i40e_update_pf_stats - Update the PF statistics counters.
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000893 * @pf: the PF to be updated
894 **/
895static void i40e_update_pf_stats(struct i40e_pf *pf)
896{
897 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
898 struct i40e_hw_port_stats *nsd = &pf->stats;
899 struct i40e_hw *hw = &pf->hw;
900 u32 val;
901 int i;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000902
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000903 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
904 I40E_GLPRT_GORCL(hw->port),
905 pf->stat_offsets_loaded,
906 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
907 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
908 I40E_GLPRT_GOTCL(hw->port),
909 pf->stat_offsets_loaded,
910 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
911 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.rx_discards,
914 &nsd->eth.rx_discards);
Shannon Nelson532d2832014-04-23 04:50:09 +0000915 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
916 I40E_GLPRT_UPRCL(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->eth.rx_unicast,
919 &nsd->eth.rx_unicast);
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000920 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
921 I40E_GLPRT_MPRCL(hw->port),
922 pf->stat_offsets_loaded,
923 &osd->eth.rx_multicast,
924 &nsd->eth.rx_multicast);
Shannon Nelson532d2832014-04-23 04:50:09 +0000925 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
926 I40E_GLPRT_BPRCL(hw->port),
927 pf->stat_offsets_loaded,
928 &osd->eth.rx_broadcast,
929 &nsd->eth.rx_broadcast);
930 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
931 I40E_GLPRT_UPTCL(hw->port),
932 pf->stat_offsets_loaded,
933 &osd->eth.tx_unicast,
934 &nsd->eth.tx_unicast);
935 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
936 I40E_GLPRT_MPTCL(hw->port),
937 pf->stat_offsets_loaded,
938 &osd->eth.tx_multicast,
939 &nsd->eth.tx_multicast);
940 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
941 I40E_GLPRT_BPTCL(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->eth.tx_broadcast,
944 &nsd->eth.tx_broadcast);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000945
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000946 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->tx_dropped_link_down,
949 &nsd->tx_dropped_link_down);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000950
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000951 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->crc_errors, &nsd->crc_errors);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000954
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000955 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
956 pf->stat_offsets_loaded,
957 &osd->illegal_bytes, &nsd->illegal_bytes);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000958
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000959 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
960 pf->stat_offsets_loaded,
961 &osd->mac_local_faults,
962 &nsd->mac_local_faults);
963 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
964 pf->stat_offsets_loaded,
965 &osd->mac_remote_faults,
966 &nsd->mac_remote_faults);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000967
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000968 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
969 pf->stat_offsets_loaded,
970 &osd->rx_length_errors,
971 &nsd->rx_length_errors);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000972
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000973 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
974 pf->stat_offsets_loaded,
975 &osd->link_xon_rx, &nsd->link_xon_rx);
976 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
977 pf->stat_offsets_loaded,
978 &osd->link_xon_tx, &nsd->link_xon_tx);
Neerav Parikh95db2392015-11-06 15:26:09 -0800979 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->link_xoff_rx, &nsd->link_xoff_rx);
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000982 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
983 pf->stat_offsets_loaded,
984 &osd->link_xoff_tx, &nsd->link_xoff_tx);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000985
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000986 for (i = 0; i < 8; i++) {
Neerav Parikh95db2392015-11-06 15:26:09 -0800987 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
988 pf->stat_offsets_loaded,
989 &osd->priority_xoff_rx[i],
990 &nsd->priority_xoff_rx[i]);
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000991 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000992 pf->stat_offsets_loaded,
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000993 &osd->priority_xon_rx[i],
994 &nsd->priority_xon_rx[i]);
995 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000996 pf->stat_offsets_loaded,
Shannon Nelson7812fdd2014-04-23 04:50:18 +0000997 &osd->priority_xon_tx[i],
998 &nsd->priority_xon_tx[i]);
999 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001000 pf->stat_offsets_loaded,
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001001 &osd->priority_xoff_tx[i],
1002 &nsd->priority_xoff_tx[i]);
1003 i40e_stat_update32(hw,
1004 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001005 pf->stat_offsets_loaded,
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001006 &osd->priority_xon_2_xoff[i],
1007 &nsd->priority_xon_2_xoff[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001008 }
1009
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001010 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1011 I40E_GLPRT_PRC64L(hw->port),
1012 pf->stat_offsets_loaded,
1013 &osd->rx_size_64, &nsd->rx_size_64);
1014 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1015 I40E_GLPRT_PRC127L(hw->port),
1016 pf->stat_offsets_loaded,
1017 &osd->rx_size_127, &nsd->rx_size_127);
1018 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1019 I40E_GLPRT_PRC255L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->rx_size_255, &nsd->rx_size_255);
1022 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1023 I40E_GLPRT_PRC511L(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->rx_size_511, &nsd->rx_size_511);
1026 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1027 I40E_GLPRT_PRC1023L(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->rx_size_1023, &nsd->rx_size_1023);
1030 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1031 I40E_GLPRT_PRC1522L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->rx_size_1522, &nsd->rx_size_1522);
1034 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1035 I40E_GLPRT_PRC9522L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->rx_size_big, &nsd->rx_size_big);
1038
1039 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1040 I40E_GLPRT_PTC64L(hw->port),
1041 pf->stat_offsets_loaded,
1042 &osd->tx_size_64, &nsd->tx_size_64);
1043 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1044 I40E_GLPRT_PTC127L(hw->port),
1045 pf->stat_offsets_loaded,
1046 &osd->tx_size_127, &nsd->tx_size_127);
1047 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1048 I40E_GLPRT_PTC255L(hw->port),
1049 pf->stat_offsets_loaded,
1050 &osd->tx_size_255, &nsd->tx_size_255);
1051 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1052 I40E_GLPRT_PTC511L(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->tx_size_511, &nsd->tx_size_511);
1055 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1056 I40E_GLPRT_PTC1023L(hw->port),
1057 pf->stat_offsets_loaded,
1058 &osd->tx_size_1023, &nsd->tx_size_1023);
1059 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1060 I40E_GLPRT_PTC1522L(hw->port),
1061 pf->stat_offsets_loaded,
1062 &osd->tx_size_1522, &nsd->tx_size_1522);
1063 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1064 I40E_GLPRT_PTC9522L(hw->port),
1065 pf->stat_offsets_loaded,
1066 &osd->tx_size_big, &nsd->tx_size_big);
1067
1068 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1069 pf->stat_offsets_loaded,
1070 &osd->rx_undersize, &nsd->rx_undersize);
1071 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1072 pf->stat_offsets_loaded,
1073 &osd->rx_fragments, &nsd->rx_fragments);
1074 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1075 pf->stat_offsets_loaded,
1076 &osd->rx_oversize, &nsd->rx_oversize);
1077 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1078 pf->stat_offsets_loaded,
1079 &osd->rx_jabber, &nsd->rx_jabber);
1080
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00001081 /* FDIR stats */
Anjali Singhai Jain0bf4b1b2015-04-16 20:06:02 -04001082 i40e_stat_update32(hw,
1083 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00001084 pf->stat_offsets_loaded,
1085 &osd->fd_atr_match, &nsd->fd_atr_match);
Anjali Singhai Jain0bf4b1b2015-04-16 20:06:02 -04001086 i40e_stat_update32(hw,
1087 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00001088 pf->stat_offsets_loaded,
1089 &osd->fd_sb_match, &nsd->fd_sb_match);
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04001090 i40e_stat_update32(hw,
1091 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1092 pf->stat_offsets_loaded,
1093 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00001094
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001095 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1096 nsd->tx_lpi_status =
1097 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1098 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1099 nsd->rx_lpi_status =
1100 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1101 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1102 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1103 pf->stat_offsets_loaded,
1104 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1105 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1106 pf->stat_offsets_loaded,
1107 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1108
Anjali Singhai Jaind0389e52015-04-22 19:34:05 -04001109 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1110 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1111 nsd->fd_sb_status = true;
1112 else
1113 nsd->fd_sb_status = false;
1114
1115 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1116 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1117 nsd->fd_atr_status = true;
1118 else
1119 nsd->fd_atr_status = false;
1120
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001121 pf->stat_offsets_loaded = true;
1122}
1123
1124/**
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001125 * i40e_update_stats - Update the various statistics counters.
1126 * @vsi: the VSI to be updated
1127 *
1128 * Update the various stats for this VSI and its related entities.
1129 **/
1130void i40e_update_stats(struct i40e_vsi *vsi)
1131{
1132 struct i40e_pf *pf = vsi->back;
1133
1134 if (vsi == pf->vsi[pf->lan_vsi])
1135 i40e_update_pf_stats(pf);
1136
1137 i40e_update_vsi_stats(vsi);
Vasu Dev38e00432014-08-01 13:27:03 -07001138#ifdef I40E_FCOE
1139 i40e_update_fcoe_stats(vsi);
1140#endif
Shannon Nelson7812fdd2014-04-23 04:50:18 +00001141}
1142
1143/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001144 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1145 * @vsi: the VSI to be searched
1146 * @macaddr: the MAC address
1147 * @vlan: the vlan
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001148 *
1149 * Returns ptr to the filter object or NULL
1150 **/
1151static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
Jacob Keller6622f5c2016-10-05 09:30:32 -07001152 const u8 *macaddr, s16 vlan)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001153{
1154 struct i40e_mac_filter *f;
1155
1156 if (!vsi || !macaddr)
1157 return NULL;
1158
1159 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1160 if ((ether_addr_equal(macaddr, f->macaddr)) &&
Jacob Keller1bc87e82016-10-05 09:30:31 -07001161 (vlan == f->vlan))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001162 return f;
1163 }
1164 return NULL;
1165}
1166
1167/**
1168 * i40e_find_mac - Find a mac addr in the macvlan filters list
1169 * @vsi: the VSI to be searched
1170 * @macaddr: the MAC address we are searching for
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001171 *
1172 * Returns the first filter with the provided MAC address or NULL if
1173 * MAC address was not found
1174 **/
Jacob Keller6622f5c2016-10-05 09:30:32 -07001175struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001176{
1177 struct i40e_mac_filter *f;
1178
1179 if (!vsi || !macaddr)
1180 return NULL;
1181
1182 list_for_each_entry(f, &vsi->mac_filter_list, list) {
Jacob Keller1bc87e82016-10-05 09:30:31 -07001183 if ((ether_addr_equal(macaddr, f->macaddr)))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001184 return f;
1185 }
1186 return NULL;
1187}
1188
1189/**
1190 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1191 * @vsi: the VSI to be searched
1192 *
1193 * Returns true if VSI is in vlan mode or false otherwise
1194 **/
1195bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1196{
1197 struct i40e_mac_filter *f;
1198
1199 /* Only -1 for all the filters denotes not in vlan mode
1200 * so we have to go through all the list in order to make sure
1201 */
1202 list_for_each_entry(f, &vsi->mac_filter_list, list) {
Greg Rosed9b68f82015-07-23 16:54:31 -04001203 if (f->vlan >= 0 || vsi->info.pvid)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001204 return true;
1205 }
1206
1207 return false;
1208}
1209
1210/**
1211 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1212 * @vsi: the VSI to be searched
1213 * @macaddr: the mac address to be filtered
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001214 *
1215 * Goes through all the macvlan filters and adds a
1216 * macvlan filter for each unique vlan that already exists
1217 *
1218 * Returns first filter found on success, else NULL
1219 **/
Jacob Keller6622f5c2016-10-05 09:30:32 -07001220struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
1221 const u8 *macaddr)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001222{
1223 struct i40e_mac_filter *f;
1224
1225 list_for_each_entry(f, &vsi->mac_filter_list, list) {
Mitch Williamsecbb44e2015-07-10 19:35:56 -04001226 if (vsi->info.pvid)
1227 f->vlan = le16_to_cpu(vsi->info.pvid);
Jacob Keller1bc87e82016-10-05 09:30:31 -07001228 if (!i40e_find_filter(vsi, macaddr, f->vlan)) {
1229 if (!i40e_add_filter(vsi, macaddr, f->vlan))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001230 return NULL;
1231 }
1232 }
1233
1234 return list_first_entry_or_null(&vsi->mac_filter_list,
1235 struct i40e_mac_filter, list);
1236}
1237
1238/**
Mitch Williamsb36e9ab2015-11-19 11:34:16 -08001239 * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
1240 * @vsi: the VSI to be searched
1241 * @macaddr: the mac address to be removed
Mitch Williamsb36e9ab2015-11-19 11:34:16 -08001242 *
1243 * Removes a given MAC address from a VSI, regardless of VLAN
1244 *
1245 * Returns 0 for success, or error
1246 **/
Jacob Keller6622f5c2016-10-05 09:30:32 -07001247int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
Mitch Williamsb36e9ab2015-11-19 11:34:16 -08001248{
1249 struct i40e_mac_filter *f = NULL;
1250 int changed = 0;
1251
1252 WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
1253 "Missing mac_filter_list_lock\n");
1254 list_for_each_entry(f, &vsi->mac_filter_list, list) {
Jacob Keller1bc87e82016-10-05 09:30:31 -07001255 if ((ether_addr_equal(macaddr, f->macaddr))) {
1256 f->state = I40E_FILTER_REMOVE;
Mitch Williamsb36e9ab2015-11-19 11:34:16 -08001257 }
1258 }
1259 if (changed) {
1260 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1261 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1262 return 0;
1263 }
1264 return -ENOENT;
1265}
1266
1267/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001268 * i40e_add_filter - Add a mac/vlan filter to the VSI
1269 * @vsi: the VSI to be searched
1270 * @macaddr: the MAC address
1271 * @vlan: the vlan
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001272 *
1273 * Returns ptr to the filter object or NULL when no memory available.
Kiran Patil21659032015-09-30 14:09:03 -04001274 *
1275 * NOTE: This function is expected to be called with mac_filter_list_lock
1276 * being held.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001277 **/
1278struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
Jacob Keller6622f5c2016-10-05 09:30:32 -07001279 const u8 *macaddr, s16 vlan)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001280{
1281 struct i40e_mac_filter *f;
1282
1283 if (!vsi || !macaddr)
1284 return NULL;
1285
Kiran Patilf6bd0962016-06-20 09:10:34 -07001286 /* Do not allow broadcast filter to be added since broadcast filter
1287 * is added as part of add VSI for any newly created VSI except
1288 * FDIR VSI
1289 */
1290 if (is_broadcast_ether_addr(macaddr))
1291 return NULL;
1292
Jacob Keller1bc87e82016-10-05 09:30:31 -07001293 f = i40e_find_filter(vsi, macaddr, vlan);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001294 if (!f) {
1295 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1296 if (!f)
Jacob Keller1bc87e82016-10-05 09:30:31 -07001297 return NULL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001298
Greg Rose9a173902014-05-22 06:32:02 +00001299 ether_addr_copy(f->macaddr, macaddr);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001300 f->vlan = vlan;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001301 /* If we're in overflow promisc mode, set the state directly
1302 * to failed, so we don't bother to try sending the filter
1303 * to the hardware.
1304 */
1305 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
1306 f->state = I40E_FILTER_FAILED;
1307 else
1308 f->state = I40E_FILTER_NEW;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001309 INIT_LIST_HEAD(&f->list);
Kiran Patil04d5a212015-12-09 15:50:23 -08001310 list_add_tail(&f->list, &vsi->mac_filter_list);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001311
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001312 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1313 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1314 }
1315
Jacob Keller1bc87e82016-10-05 09:30:31 -07001316 /* If we're asked to add a filter that has been marked for removal, it
1317 * is safe to simply restore it to active state. __i40e_del_filter
1318 * will have simply deleted any filters which were previously marked
1319 * NEW or FAILED, so if it is currently marked REMOVE it must have
1320 * previously been ACTIVE. Since we haven't yet run the sync filters
1321 * task, just restore this filter to the ACTIVE state so that the
1322 * sync task leaves it in place
1323 */
1324 if (f->state == I40E_FILTER_REMOVE)
1325 f->state = I40E_FILTER_ACTIVE;
1326
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001327 return f;
1328}
1329
1330/**
1331 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1332 * @vsi: the VSI to be searched
1333 * @macaddr: the MAC address
1334 * @vlan: the vlan
Kiran Patil21659032015-09-30 14:09:03 -04001335 *
1336 * NOTE: This function is expected to be called with mac_filter_list_lock
1337 * being held.
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001338 * ANOTHER NOTE: This function MUST be called from within the context of
1339 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1340 * instead of list_for_each_entry().
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001341 **/
Jacob Keller6622f5c2016-10-05 09:30:32 -07001342void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001343{
1344 struct i40e_mac_filter *f;
1345
1346 if (!vsi || !macaddr)
1347 return;
1348
Jacob Keller1bc87e82016-10-05 09:30:31 -07001349 f = i40e_find_filter(vsi, macaddr, vlan);
1350 if (!f)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001351 return;
1352
Jacob Keller1bc87e82016-10-05 09:30:31 -07001353 if ((f->state == I40E_FILTER_FAILED) ||
1354 (f->state == I40E_FILTER_NEW)) {
1355 /* this one never got added by the FW. Just remove it,
1356 * no need to sync anything.
1357 */
1358 list_del(&f->list);
1359 kfree(f);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001360 } else {
Jacob Keller1bc87e82016-10-05 09:30:31 -07001361 f->state = I40E_FILTER_REMOVE;
1362 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1363 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001364 }
1365}
1366
1367/**
1368 * i40e_set_mac - NDO callback to set mac address
1369 * @netdev: network interface device structure
1370 * @p: pointer to an address structure
1371 *
1372 * Returns 0 on success, negative on failure
1373 **/
Vasu Dev38e00432014-08-01 13:27:03 -07001374#ifdef I40E_FCOE
1375int i40e_set_mac(struct net_device *netdev, void *p)
1376#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001377static int i40e_set_mac(struct net_device *netdev, void *p)
Vasu Dev38e00432014-08-01 13:27:03 -07001378#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001379{
1380 struct i40e_netdev_priv *np = netdev_priv(netdev);
1381 struct i40e_vsi *vsi = np->vsi;
Shannon Nelson30650cc2014-07-29 04:01:50 +00001382 struct i40e_pf *pf = vsi->back;
1383 struct i40e_hw *hw = &pf->hw;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001384 struct sockaddr *addr = p;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001385
1386 if (!is_valid_ether_addr(addr->sa_data))
1387 return -EADDRNOTAVAIL;
1388
Shannon Nelson30650cc2014-07-29 04:01:50 +00001389 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1390 netdev_info(netdev, "already using mac address %pM\n",
1391 addr->sa_data);
1392 return 0;
1393 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001394
Anjali Singhai Jain80f64282013-11-28 06:39:47 +00001395 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1396 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1397 return -EADDRNOTAVAIL;
1398
Shannon Nelson30650cc2014-07-29 04:01:50 +00001399 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1400 netdev_info(netdev, "returning to hw mac address %pM\n",
1401 hw->mac.addr);
1402 else
1403 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1404
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001405 spin_lock_bh(&vsi->mac_filter_list_lock);
Jacob Keller1bc87e82016-10-05 09:30:31 -07001406 i40e_del_mac_all_vlan(vsi, netdev->dev_addr);
1407 i40e_put_mac_in_vlan(vsi, addr->sa_data);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001408 spin_unlock_bh(&vsi->mac_filter_list_lock);
1409 ether_addr_copy(netdev->dev_addr, addr->sa_data);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001410 if (vsi->type == I40E_VSI_MAIN) {
1411 i40e_status ret;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04001412
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001413 ret = i40e_aq_mac_address_write(&vsi->back->hw,
Shannon Nelsoncc412222014-06-04 01:23:21 +00001414 I40E_AQC_WRITE_TYPE_LAA_WOL,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001415 addr->sa_data, NULL);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001416 if (ret)
1417 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1418 i40e_stat_str(hw, ret),
1419 i40e_aq_str(hw, hw->aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001420 }
1421
Jesse Brandeburgc53934c2016-01-04 10:33:06 -08001422 /* schedule our worker thread which will take care of
1423 * applying the new filter changes
1424 */
1425 i40e_service_event_schedule(vsi->back);
1426 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001427}
1428
1429/**
1430 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1431 * @vsi: the VSI being setup
1432 * @ctxt: VSI context structure
1433 * @enabled_tc: Enabled TCs bitmap
1434 * @is_add: True if called before Add VSI
1435 *
1436 * Setup VSI queue mapping for enabled traffic classes.
1437 **/
Vasu Dev38e00432014-08-01 13:27:03 -07001438#ifdef I40E_FCOE
1439void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1440 struct i40e_vsi_context *ctxt,
1441 u8 enabled_tc,
1442 bool is_add)
1443#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001444static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1445 struct i40e_vsi_context *ctxt,
1446 u8 enabled_tc,
1447 bool is_add)
Vasu Dev38e00432014-08-01 13:27:03 -07001448#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001449{
1450 struct i40e_pf *pf = vsi->back;
1451 u16 sections = 0;
1452 u8 netdev_tc = 0;
1453 u16 numtc = 0;
1454 u16 qcount;
1455 u8 offset;
1456 u16 qmap;
1457 int i;
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001458 u16 num_tc_qps = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001459
1460 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1461 offset = 0;
1462
1463 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1464 /* Find numtc from enabled TC bitmap */
1465 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08001466 if (enabled_tc & BIT(i)) /* TC is enabled */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001467 numtc++;
1468 }
1469 if (!numtc) {
1470 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1471 numtc = 1;
1472 }
1473 } else {
1474 /* At least TC0 is enabled in case of non-DCB case */
1475 numtc = 1;
1476 }
1477
1478 vsi->tc_config.numtc = numtc;
1479 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001480 /* Number of queues per enabled TC */
Catherine Sullivan7d644022016-05-16 10:26:41 -07001481 qcount = vsi->alloc_queue_pairs;
1482
Anjali Singhai7f9ff472015-02-21 06:43:19 +00001483 num_tc_qps = qcount / numtc;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04001484 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001485
1486 /* Setup queue offset/count for all TCs for given VSI */
1487 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1488 /* See if the given TC is enabled for the given VSI */
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08001489 if (vsi->tc_config.enabled_tc & BIT(i)) {
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001490 /* TC is enabled */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001491 int pow, num_qps;
1492
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001493 switch (vsi->type) {
1494 case I40E_VSI_MAIN:
Helin Zhangacd65442015-10-26 19:44:28 -04001495 qcount = min_t(int, pf->alloc_rss_size,
1496 num_tc_qps);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001497 break;
Vasu Dev38e00432014-08-01 13:27:03 -07001498#ifdef I40E_FCOE
1499 case I40E_VSI_FCOE:
1500 qcount = num_tc_qps;
1501 break;
1502#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001503 case I40E_VSI_FDIR:
1504 case I40E_VSI_SRIOV:
1505 case I40E_VSI_VMDQ2:
1506 default:
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001507 qcount = num_tc_qps;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001508 WARN_ON(i != 0);
1509 break;
1510 }
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001511 vsi->tc_config.tc_info[i].qoffset = offset;
1512 vsi->tc_config.tc_info[i].qcount = qcount;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001513
Shannon Nelson1e200e42015-02-27 09:15:24 +00001514 /* find the next higher power-of-2 of num queue pairs */
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001515 num_qps = qcount;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001516 pow = 0;
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001517 while (num_qps && (BIT_ULL(pow) < qcount)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001518 pow++;
1519 num_qps >>= 1;
1520 }
1521
1522 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1523 qmap =
1524 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1525 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1526
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08001527 offset += qcount;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001528 } else {
1529 /* TC is not enabled so set the offset to
1530 * default queue and allocate one queue
1531 * for the given TC.
1532 */
1533 vsi->tc_config.tc_info[i].qoffset = 0;
1534 vsi->tc_config.tc_info[i].qcount = 1;
1535 vsi->tc_config.tc_info[i].netdev_tc = 0;
1536
1537 qmap = 0;
1538 }
1539 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1540 }
1541
1542 /* Set actual Tx/Rx queue pairs */
1543 vsi->num_queue_pairs = offset;
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +00001544 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1545 if (vsi->req_queue_pairs > 0)
1546 vsi->num_queue_pairs = vsi->req_queue_pairs;
Anjali Singhai Jain26cdc442015-07-10 19:36:00 -04001547 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +00001548 vsi->num_queue_pairs = pf->num_lan_msix;
1549 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001550
1551 /* Scheduler section valid can only be set for ADD VSI */
1552 if (is_add) {
1553 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1554
1555 ctxt->info.up_enable_bits = enabled_tc;
1556 }
1557 if (vsi->type == I40E_VSI_SRIOV) {
1558 ctxt->info.mapping_flags |=
1559 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1560 for (i = 0; i < vsi->num_queue_pairs; i++)
1561 ctxt->info.queue_mapping[i] =
1562 cpu_to_le16(vsi->base_queue + i);
1563 } else {
1564 ctxt->info.mapping_flags |=
1565 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1566 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1567 }
1568 ctxt->info.valid_sections |= cpu_to_le16(sections);
1569}
1570
1571/**
Jacob Keller6622f5c2016-10-05 09:30:32 -07001572 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1573 * @netdev: the netdevice
1574 * @addr: address to add
1575 *
1576 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1577 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1578 */
1579static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1580{
1581 struct i40e_netdev_priv *np = netdev_priv(netdev);
1582 struct i40e_vsi *vsi = np->vsi;
1583 struct i40e_mac_filter *f;
1584
1585 if (i40e_is_vsi_in_vlan(vsi))
1586 f = i40e_put_mac_in_vlan(vsi, addr);
1587 else
1588 f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY);
1589
1590 if (f)
1591 return 0;
1592 else
1593 return -ENOMEM;
1594}
1595
1596/**
1597 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1598 * @netdev: the netdevice
1599 * @addr: address to add
1600 *
1601 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1602 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1603 */
1604static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1605{
1606 struct i40e_netdev_priv *np = netdev_priv(netdev);
1607 struct i40e_vsi *vsi = np->vsi;
1608
1609 if (i40e_is_vsi_in_vlan(vsi))
1610 i40e_del_mac_all_vlan(vsi, addr);
1611 else
1612 i40e_del_filter(vsi, addr, I40E_VLAN_ANY);
1613
1614 return 0;
1615}
1616
1617/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001618 * i40e_set_rx_mode - NDO callback to set the netdev filters
1619 * @netdev: network interface device structure
1620 **/
Vasu Dev38e00432014-08-01 13:27:03 -07001621#ifdef I40E_FCOE
1622void i40e_set_rx_mode(struct net_device *netdev)
1623#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001624static void i40e_set_rx_mode(struct net_device *netdev)
Vasu Dev38e00432014-08-01 13:27:03 -07001625#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001626{
1627 struct i40e_netdev_priv *np = netdev_priv(netdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001628 struct i40e_vsi *vsi = np->vsi;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001629
Kiran Patil21659032015-09-30 14:09:03 -04001630 spin_lock_bh(&vsi->mac_filter_list_lock);
1631
Jacob Keller6622f5c2016-10-05 09:30:32 -07001632 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1633 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001634
Kiran Patil21659032015-09-30 14:09:03 -04001635 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001636
1637 /* check for other flag changes */
1638 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1639 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1640 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1641 }
Jesse Brandeburgc53934c2016-01-04 10:33:06 -08001642
1643 /* schedule our worker thread which will take care of
1644 * applying the new filter changes
1645 */
1646 i40e_service_event_schedule(vsi->back);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001647}
1648
1649/**
Kiran Patil21659032015-09-30 14:09:03 -04001650 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1651 * @vsi: pointer to vsi struct
1652 * @from: Pointer to list which contains MAC filter entries - changes to
1653 * those entries needs to be undone.
1654 *
1655 * MAC filter entries from list were slated to be removed from device.
1656 **/
1657static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1658 struct list_head *from)
1659{
1660 struct i40e_mac_filter *f, *ftmp;
1661
1662 list_for_each_entry_safe(f, ftmp, from, list) {
Kiran Patil21659032015-09-30 14:09:03 -04001663 /* Move the element back into MAC filter list*/
1664 list_move_tail(&f->list, &vsi->mac_filter_list);
1665 }
1666}
1667
1668/**
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001669 * i40e_update_filter_state - Update filter state based on return data
1670 * from firmware
1671 * @count: Number of filters added
1672 * @add_list: return data from fw
1673 * @head: pointer to first filter in current batch
1674 * @aq_err: status from fw
Kiran Patil21659032015-09-30 14:09:03 -04001675 *
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001676 * MAC filter entries from list were slated to be added to device. Returns
1677 * number of successful filters. Note that 0 does NOT mean success!
Kiran Patil21659032015-09-30 14:09:03 -04001678 **/
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001679static int
1680i40e_update_filter_state(int count,
1681 struct i40e_aqc_add_macvlan_element_data *add_list,
1682 struct i40e_mac_filter *add_head, int aq_err)
Kiran Patil21659032015-09-30 14:09:03 -04001683{
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001684 int retval = 0;
1685 int i;
Kiran Patil21659032015-09-30 14:09:03 -04001686
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001687
1688 if (!aq_err) {
1689 retval = count;
1690 /* Everything's good, mark all filters active. */
1691 for (i = 0; i < count ; i++) {
1692 add_head->state = I40E_FILTER_ACTIVE;
1693 add_head = list_next_entry(add_head, list);
1694 }
1695 } else if (aq_err == I40E_AQ_RC_ENOSPC) {
1696 /* Device ran out of filter space. Check the return value
1697 * for each filter to see which ones are active.
1698 */
1699 for (i = 0; i < count ; i++) {
1700 if (add_list[i].match_method ==
1701 I40E_AQC_MM_ERR_NO_RES) {
1702 add_head->state = I40E_FILTER_FAILED;
1703 } else {
1704 add_head->state = I40E_FILTER_ACTIVE;
1705 retval++;
1706 }
1707 add_head = list_next_entry(add_head, list);
1708 }
1709 } else {
1710 /* Some other horrible thing happened, fail all filters */
1711 retval = 0;
1712 for (i = 0; i < count ; i++) {
1713 add_head->state = I40E_FILTER_FAILED;
1714 add_head = list_next_entry(add_head, list);
1715 }
Kiran Patil21659032015-09-30 14:09:03 -04001716 }
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001717 return retval;
Kiran Patil21659032015-09-30 14:09:03 -04001718}
1719
1720/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001721 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1722 * @vsi: ptr to the VSI
1723 *
1724 * Push any outstanding VSI filter changes through the AdminQ.
1725 *
1726 * Returns 0 or error value
1727 **/
Jesse Brandeburg17652c62015-11-05 17:01:02 -08001728int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001729{
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001730 struct i40e_mac_filter *f, *ftmp, *add_head = NULL;
1731 struct list_head tmp_add_list, tmp_del_list;
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001732 struct i40e_hw *hw = &vsi->back->hw;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001733 bool promisc_changed = false;
Shannon Nelson2d1de822016-05-16 10:26:44 -07001734 char vsi_name[16] = "PF";
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001735 int filter_list_len = 0;
1736 u32 changed_flags = 0;
Mitch Williamsea02e902015-11-09 15:35:50 -08001737 i40e_status aq_ret = 0;
Mitch Williamsea02e902015-11-09 15:35:50 -08001738 int retval = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001739 struct i40e_pf *pf;
1740 int num_add = 0;
1741 int num_del = 0;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04001742 int aq_err = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001743 u16 cmd_flags;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001744 int list_size;
1745 int fcnt;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001746
1747 /* empty array typed pointers, kcalloc later */
1748 struct i40e_aqc_add_macvlan_element_data *add_list;
1749 struct i40e_aqc_remove_macvlan_element_data *del_list;
1750
1751 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1752 usleep_range(1000, 2000);
1753 pf = vsi->back;
1754
1755 if (vsi->netdev) {
1756 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1757 vsi->current_netdev_flags = vsi->netdev->flags;
1758 }
1759
Kiran Patil21659032015-09-30 14:09:03 -04001760 INIT_LIST_HEAD(&tmp_add_list);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001761 INIT_LIST_HEAD(&tmp_del_list);
Kiran Patil21659032015-09-30 14:09:03 -04001762
Shannon Nelson2d1de822016-05-16 10:26:44 -07001763 if (vsi->type == I40E_VSI_SRIOV)
1764 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
1765 else if (vsi->type != I40E_VSI_MAIN)
1766 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
1767
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001768 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1769 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1770
Kiran Patil21659032015-09-30 14:09:03 -04001771 spin_lock_bh(&vsi->mac_filter_list_lock);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001772 /* Create a list of filters to delete. */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001773 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001774 if (f->state == I40E_FILTER_REMOVE) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001775 /* Move the element into temporary del_list */
1776 list_move_tail(&f->list, &tmp_del_list);
1777 vsi->active_filters--;
Kiran Patil21659032015-09-30 14:09:03 -04001778 }
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001779 if (f->state == I40E_FILTER_NEW) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001780 /* Move the element into temporary add_list */
1781 list_move_tail(&f->list, &tmp_add_list);
1782 }
Kiran Patil21659032015-09-30 14:09:03 -04001783 }
1784 spin_unlock_bh(&vsi->mac_filter_list_lock);
Kiran Patil21659032015-09-30 14:09:03 -04001785 }
1786
1787 /* Now process 'del_list' outside the lock */
1788 if (!list_empty(&tmp_del_list)) {
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001789 filter_list_len = hw->aq.asq_buf_size /
Kiran Patil21659032015-09-30 14:09:03 -04001790 sizeof(struct i40e_aqc_remove_macvlan_element_data);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001791 list_size = filter_list_len *
Shannon Nelsonf1199992015-11-19 11:34:23 -08001792 sizeof(struct i40e_aqc_remove_macvlan_element_data);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001793 del_list = kzalloc(list_size, GFP_ATOMIC);
Kiran Patil21659032015-09-30 14:09:03 -04001794 if (!del_list) {
Kiran Patil21659032015-09-30 14:09:03 -04001795 /* Undo VSI's MAC filter entry element updates */
1796 spin_lock_bh(&vsi->mac_filter_list_lock);
1797 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
Kiran Patil21659032015-09-30 14:09:03 -04001798 spin_unlock_bh(&vsi->mac_filter_list_lock);
Mitch Williamsea02e902015-11-09 15:35:50 -08001799 retval = -ENOMEM;
1800 goto out;
Kiran Patil21659032015-09-30 14:09:03 -04001801 }
1802
1803 list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001804 cmd_flags = 0;
1805
1806 /* add to delete list */
Greg Rose9a173902014-05-22 06:32:02 +00001807 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001808 if (f->vlan == I40E_VLAN_ANY) {
1809 del_list[num_del].vlan_tag = 0;
Alan Bradya6cb9142016-09-06 18:05:07 -07001810 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001811 } else {
1812 del_list[num_del].vlan_tag =
1813 cpu_to_le16((u16)(f->vlan));
1814 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001815
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001816 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1817 del_list[num_del].flags = cmd_flags;
1818 num_del++;
1819
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001820 /* flush a full buffer */
1821 if (num_del == filter_list_len) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001822 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid,
1823 del_list,
1824 num_del, NULL);
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001825 aq_err = hw->aq.asq_last_status;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001826 num_del = 0;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001827 memset(del_list, 0, list_size);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001828
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001829 /* Explicitly ignore and do not report when
1830 * firmware returns ENOENT.
1831 */
1832 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
Mitch Williamsea02e902015-11-09 15:35:50 -08001833 retval = -EIO;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001834 dev_info(&pf->pdev->dev,
1835 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
Shannon Nelson2d1de822016-05-16 10:26:44 -07001836 vsi_name,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001837 i40e_stat_str(hw, aq_ret),
1838 i40e_aq_str(hw, aq_err));
Mitch Williamsea02e902015-11-09 15:35:50 -08001839 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001840 }
Kiran Patil21659032015-09-30 14:09:03 -04001841 /* Release memory for MAC filter entries which were
1842 * synced up with HW.
1843 */
1844 list_del(&f->list);
1845 kfree(f);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001846 }
Kiran Patil21659032015-09-30 14:09:03 -04001847
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001848 if (num_del) {
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001849 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
1850 num_del, NULL);
1851 aq_err = hw->aq.asq_last_status;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001852 num_del = 0;
1853
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001854 /* Explicitly ignore and do not report when firmware
1855 * returns ENOENT.
1856 */
1857 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1858 retval = -EIO;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001859 dev_info(&pf->pdev->dev,
Shannon Nelson2d1de822016-05-16 10:26:44 -07001860 "ignoring delete macvlan error on %s, err %s aq_err %s\n",
1861 vsi_name,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001862 i40e_stat_str(hw, aq_ret),
1863 i40e_aq_str(hw, aq_err));
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001864 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001865 }
1866
1867 kfree(del_list);
1868 del_list = NULL;
Kiran Patil21659032015-09-30 14:09:03 -04001869 }
1870
1871 if (!list_empty(&tmp_add_list)) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001872 /* Do all the adds now. */
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001873 filter_list_len = hw->aq.asq_buf_size /
Shannon Nelsonf1199992015-11-19 11:34:23 -08001874 sizeof(struct i40e_aqc_add_macvlan_element_data);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001875 list_size = filter_list_len *
1876 sizeof(struct i40e_aqc_add_macvlan_element_data);
1877 add_list = kzalloc(list_size, GFP_ATOMIC);
Kiran Patil21659032015-09-30 14:09:03 -04001878 if (!add_list) {
Mitch Williamsea02e902015-11-09 15:35:50 -08001879 retval = -ENOMEM;
1880 goto out;
Kiran Patil21659032015-09-30 14:09:03 -04001881 }
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001882 num_add = 0;
1883 list_for_each_entry(f, &tmp_add_list, list) {
1884 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1885 &vsi->state)) {
1886 f->state = I40E_FILTER_FAILED;
1887 continue;
1888 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001889 /* add to add array */
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001890 if (num_add == 0)
1891 add_head = f;
1892 cmd_flags = 0;
Greg Rose9a173902014-05-22 06:32:02 +00001893 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001894 if (f->vlan == I40E_VLAN_ANY) {
1895 add_list[num_add].vlan_tag = 0;
1896 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1897 } else {
1898 add_list[num_add].vlan_tag =
1899 cpu_to_le16((u16)(f->vlan));
1900 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001901 add_list[num_add].queue_number = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001902 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001903 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1904 num_add++;
1905
1906 /* flush a full buffer */
1907 if (num_add == filter_list_len) {
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001908 aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
Mitch Williamsea02e902015-11-09 15:35:50 -08001909 add_list, num_add,
1910 NULL);
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001911 aq_err = hw->aq.asq_last_status;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001912 fcnt = i40e_update_filter_state(num_add,
1913 add_list,
1914 add_head,
1915 aq_ret);
1916 vsi->active_filters += fcnt;
1917
1918 if (fcnt != num_add) {
1919 promisc_changed = true;
1920 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1921 &vsi->state);
1922 vsi->promisc_threshold =
1923 (vsi->active_filters * 3) / 4;
1924 dev_warn(&pf->pdev->dev,
1925 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1926 i40e_aq_str(hw, aq_err),
1927 vsi_name);
1928 }
1929 memset(add_list, 0, list_size);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001930 num_add = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001931 }
1932 }
1933 if (num_add) {
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001934 aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
Mitch Williamsea02e902015-11-09 15:35:50 -08001935 add_list, num_add, NULL);
Mitch Williams3e25a8f2016-05-16 10:26:32 -07001936 aq_err = hw->aq.asq_last_status;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001937 fcnt = i40e_update_filter_state(num_add, add_list,
1938 add_head, aq_ret);
1939 vsi->active_filters += fcnt;
1940 if (fcnt != num_add) {
1941 promisc_changed = true;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001942 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1943 &vsi->state);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001944 vsi->promisc_threshold =
1945 (vsi->active_filters * 3) / 4;
1946 dev_warn(&pf->pdev->dev,
1947 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1948 i40e_aq_str(hw, aq_err), vsi_name);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001949 }
1950 }
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07001951 /* Now move all of the filters from the temp add list back to
1952 * the VSI's list.
1953 */
1954 spin_lock_bh(&vsi->mac_filter_list_lock);
1955 list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
1956 list_move_tail(&f->list, &vsi->mac_filter_list);
1957 }
1958 spin_unlock_bh(&vsi->mac_filter_list_lock);
1959 kfree(add_list);
1960 add_list = NULL;
1961 }
1962
1963 /* Check to see if we can drop out of overflow promiscuous mode. */
1964 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
1965 (vsi->active_filters < vsi->promisc_threshold)) {
1966 int failed_count = 0;
1967 /* See if we have any failed filters. We can't drop out of
1968 * promiscuous until these have all been deleted.
1969 */
1970 spin_lock_bh(&vsi->mac_filter_list_lock);
1971 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1972 if (f->state == I40E_FILTER_FAILED)
1973 failed_count++;
1974 }
1975 spin_unlock_bh(&vsi->mac_filter_list_lock);
1976 if (!failed_count) {
1977 dev_info(&pf->pdev->dev,
1978 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
1979 vsi_name);
1980 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
1981 promisc_changed = true;
1982 vsi->promisc_threshold = 0;
1983 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001984 }
1985
Anjali Singhai Jaina856b5c2016-04-13 03:08:23 -07001986 /* if the VF is not trusted do not do promisc */
1987 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
1988 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
1989 goto out;
1990 }
1991
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001992 /* check for changes in promiscuous modes */
1993 if (changed_flags & IFF_ALLMULTI) {
1994 bool cur_multipromisc;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04001995
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001996 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
Mitch Williamsea02e902015-11-09 15:35:50 -08001997 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1998 vsi->seid,
1999 cur_multipromisc,
2000 NULL);
2001 if (aq_ret) {
2002 retval = i40e_aq_rc_to_posix(aq_ret,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002003 hw->aq.asq_last_status);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002004 dev_info(&pf->pdev->dev,
Shannon Nelson2d1de822016-05-16 10:26:44 -07002005 "set multi promisc failed on %s, err %s aq_err %s\n",
2006 vsi_name,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002007 i40e_stat_str(hw, aq_ret),
2008 i40e_aq_str(hw, hw->aq.asq_last_status));
Mitch Williamsea02e902015-11-09 15:35:50 -08002009 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002010 }
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002011 if ((changed_flags & IFF_PROMISC) ||
2012 (promisc_changed &&
2013 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002014 bool cur_promisc;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002015
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002016 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2017 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2018 &vsi->state));
Anjali Singhai Jain6784ed52016-01-15 14:33:13 -08002019 if ((vsi->type == I40E_VSI_MAIN) &&
2020 (pf->lan_veb != I40E_NO_VEB) &&
2021 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002022 /* set defport ON for Main VSI instead of true promisc
2023 * this way we will get all unicast/multicast and VLAN
2024 * promisc behavior but will not get VF or VMDq traffic
2025 * replicated on the Main VSI.
2026 */
2027 if (pf->cur_promisc != cur_promisc) {
2028 pf->cur_promisc = cur_promisc;
Mitch Williams5bc16032016-05-16 10:26:43 -07002029 if (cur_promisc)
2030 aq_ret =
2031 i40e_aq_set_default_vsi(hw,
2032 vsi->seid,
2033 NULL);
2034 else
2035 aq_ret =
2036 i40e_aq_clear_default_vsi(hw,
2037 vsi->seid,
2038 NULL);
2039 if (aq_ret) {
2040 retval = i40e_aq_rc_to_posix(aq_ret,
2041 hw->aq.asq_last_status);
2042 dev_info(&pf->pdev->dev,
Shannon Nelson2d1de822016-05-16 10:26:44 -07002043 "Set default VSI failed on %s, err %s, aq_err %s\n",
2044 vsi_name,
Mitch Williams5bc16032016-05-16 10:26:43 -07002045 i40e_stat_str(hw, aq_ret),
2046 i40e_aq_str(hw,
2047 hw->aq.asq_last_status));
2048 }
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002049 }
2050 } else {
Mitch Williamsea02e902015-11-09 15:35:50 -08002051 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002052 hw,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002053 vsi->seid,
Anjali Singhai Jainb5569892016-05-03 15:13:12 -07002054 cur_promisc, NULL,
2055 true);
Mitch Williamsea02e902015-11-09 15:35:50 -08002056 if (aq_ret) {
2057 retval =
2058 i40e_aq_rc_to_posix(aq_ret,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002059 hw->aq.asq_last_status);
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002060 dev_info(&pf->pdev->dev,
Shannon Nelson2d1de822016-05-16 10:26:44 -07002061 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2062 vsi_name,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002063 i40e_stat_str(hw, aq_ret),
2064 i40e_aq_str(hw,
2065 hw->aq.asq_last_status));
Mitch Williamsea02e902015-11-09 15:35:50 -08002066 }
2067 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002068 hw,
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002069 vsi->seid,
2070 cur_promisc, NULL);
Mitch Williamsea02e902015-11-09 15:35:50 -08002071 if (aq_ret) {
2072 retval =
2073 i40e_aq_rc_to_posix(aq_ret,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002074 hw->aq.asq_last_status);
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002075 dev_info(&pf->pdev->dev,
Shannon Nelson2d1de822016-05-16 10:26:44 -07002076 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2077 vsi_name,
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002078 i40e_stat_str(hw, aq_ret),
2079 i40e_aq_str(hw,
2080 hw->aq.asq_last_status));
Mitch Williamsea02e902015-11-09 15:35:50 -08002081 }
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04002082 }
Mitch Williamsea02e902015-11-09 15:35:50 -08002083 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2084 vsi->seid,
2085 cur_promisc, NULL);
2086 if (aq_ret) {
2087 retval = i40e_aq_rc_to_posix(aq_ret,
2088 pf->hw.aq.asq_last_status);
Greg Rose1a103702013-11-28 06:42:39 +00002089 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002090 "set brdcast promisc failed, err %s, aq_err %s\n",
Mitch Williams3e25a8f2016-05-16 10:26:32 -07002091 i40e_stat_str(hw, aq_ret),
2092 i40e_aq_str(hw,
2093 hw->aq.asq_last_status));
Mitch Williamsea02e902015-11-09 15:35:50 -08002094 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002095 }
Mitch Williamsea02e902015-11-09 15:35:50 -08002096out:
Jesse Brandeburg2818ccd2016-01-13 16:51:38 -08002097 /* if something went wrong then set the changed flag so we try again */
2098 if (retval)
2099 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2100
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002101 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
Mitch Williamsea02e902015-11-09 15:35:50 -08002102 return retval;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002103}
2104
2105/**
2106 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2107 * @pf: board private structure
2108 **/
2109static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2110{
2111 int v;
2112
2113 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2114 return;
2115 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2116
Mitch Williams505682c2014-05-20 08:01:37 +00002117 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002118 if (pf->vsi[v] &&
Jesse Brandeburg17652c62015-11-05 17:01:02 -08002119 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2120 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2121
2122 if (ret) {
2123 /* come back and try again later */
2124 pf->flags |= I40E_FLAG_FILTER_SYNC;
2125 break;
2126 }
2127 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002128 }
2129}
2130
2131/**
2132 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2133 * @netdev: network interface device structure
2134 * @new_mtu: new value for maximum frame size
2135 *
2136 * Returns 0 on success, negative on failure
2137 **/
2138static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2139{
2140 struct i40e_netdev_priv *np = netdev_priv(netdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002141 struct i40e_vsi *vsi = np->vsi;
2142
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002143 netdev_info(netdev, "changing MTU from %d to %d\n",
2144 netdev->mtu, new_mtu);
2145 netdev->mtu = new_mtu;
2146 if (netif_running(netdev))
2147 i40e_vsi_reinit_locked(vsi);
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06002148 i40e_notify_client_of_l2_param_changes(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002149 return 0;
2150}
2151
2152/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002153 * i40e_ioctl - Access the hwtstamp interface
2154 * @netdev: network interface device structure
2155 * @ifr: interface request data
2156 * @cmd: ioctl command
2157 **/
2158int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2159{
2160 struct i40e_netdev_priv *np = netdev_priv(netdev);
2161 struct i40e_pf *pf = np->vsi->back;
2162
2163 switch (cmd) {
2164 case SIOCGHWTSTAMP:
2165 return i40e_ptp_get_ts_config(pf, ifr);
2166 case SIOCSHWTSTAMP:
2167 return i40e_ptp_set_ts_config(pf, ifr);
2168 default:
2169 return -EOPNOTSUPP;
2170 }
2171}
2172
2173/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002174 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2175 * @vsi: the vsi being adjusted
2176 **/
2177void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2178{
2179 struct i40e_vsi_context ctxt;
2180 i40e_status ret;
2181
2182 if ((vsi->info.valid_sections &
2183 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2184 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2185 return; /* already enabled */
2186
2187 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2188 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2189 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2190
2191 ctxt.seid = vsi->seid;
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07002192 ctxt.info = vsi->info;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002193 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2194 if (ret) {
2195 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002196 "update vlan stripping failed, err %s aq_err %s\n",
2197 i40e_stat_str(&vsi->back->hw, ret),
2198 i40e_aq_str(&vsi->back->hw,
2199 vsi->back->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002200 }
2201}
2202
2203/**
2204 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2205 * @vsi: the vsi being adjusted
2206 **/
2207void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2208{
2209 struct i40e_vsi_context ctxt;
2210 i40e_status ret;
2211
2212 if ((vsi->info.valid_sections &
2213 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2214 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2215 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2216 return; /* already disabled */
2217
2218 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2219 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2220 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2221
2222 ctxt.seid = vsi->seid;
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07002223 ctxt.info = vsi->info;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002224 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2225 if (ret) {
2226 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002227 "update vlan stripping failed, err %s aq_err %s\n",
2228 i40e_stat_str(&vsi->back->hw, ret),
2229 i40e_aq_str(&vsi->back->hw,
2230 vsi->back->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002231 }
2232}
2233
2234/**
2235 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2236 * @netdev: network interface to be adjusted
2237 * @features: netdev features to test if VLAN offload is enabled or not
2238 **/
2239static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2240{
2241 struct i40e_netdev_priv *np = netdev_priv(netdev);
2242 struct i40e_vsi *vsi = np->vsi;
2243
2244 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2245 i40e_vlan_stripping_enable(vsi);
2246 else
2247 i40e_vlan_stripping_disable(vsi);
2248}
2249
2250/**
2251 * i40e_vsi_add_vlan - Add vsi membership for given vlan
2252 * @vsi: the vsi being configured
2253 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2254 **/
2255int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2256{
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002257 struct i40e_mac_filter *f, *ftmp, *add_f;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002258
Kiran Patil21659032015-09-30 14:09:03 -04002259 /* Locked once because all functions invoked below iterates list*/
2260 spin_lock_bh(&vsi->mac_filter_list_lock);
2261
Jacob Keller1bc87e82016-10-05 09:30:31 -07002262 if (vsi->netdev) {
2263 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002264 if (!add_f) {
2265 dev_info(&vsi->back->pdev->dev,
2266 "Could not add vlan filter %d for %pM\n",
2267 vid, vsi->netdev->dev_addr);
Kiran Patil21659032015-09-30 14:09:03 -04002268 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002269 return -ENOMEM;
2270 }
2271 }
2272
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002273 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
Jacob Keller1bc87e82016-10-05 09:30:31 -07002274 add_f = i40e_add_filter(vsi, f->macaddr, vid);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002275 if (!add_f) {
2276 dev_info(&vsi->back->pdev->dev,
2277 "Could not add vlan filter %d for %pM\n",
2278 vid, f->macaddr);
Kiran Patil21659032015-09-30 14:09:03 -04002279 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002280 return -ENOMEM;
2281 }
2282 }
2283
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002284 /* Now if we add a vlan tag, make sure to check if it is the first
2285 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2286 * with 0, so we now accept untagged and specified tagged traffic
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002287 * (and not all tags along with untagged)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002288 */
2289 if (vid > 0) {
Jacob Keller1bc87e82016-10-05 09:30:31 -07002290 if (vsi->netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2291 I40E_VLAN_ANY)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002292 i40e_del_filter(vsi, vsi->netdev->dev_addr,
Jacob Keller1bc87e82016-10-05 09:30:31 -07002293 I40E_VLAN_ANY);
2294 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002295 if (!add_f) {
2296 dev_info(&vsi->back->pdev->dev,
2297 "Could not add filter 0 for %pM\n",
2298 vsi->netdev->dev_addr);
Kiran Patil21659032015-09-30 14:09:03 -04002299 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002300 return -ENOMEM;
2301 }
2302 }
Greg Rose8d82a7c2014-01-13 16:13:04 -08002303 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002304
Greg Rose8d82a7c2014-01-13 16:13:04 -08002305 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2306 if (vid > 0 && !vsi->info.pvid) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002307 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
Jacob Keller1bc87e82016-10-05 09:30:31 -07002308 if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY))
Kiran Patil21659032015-09-30 14:09:03 -04002309 continue;
Jacob Keller1bc87e82016-10-05 09:30:31 -07002310 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY);
2311 add_f = i40e_add_filter(vsi, f->macaddr, 0);
Kiran Patil21659032015-09-30 14:09:03 -04002312 if (!add_f) {
2313 dev_info(&vsi->back->pdev->dev,
2314 "Could not add filter 0 for %pM\n",
2315 f->macaddr);
2316 spin_unlock_bh(&vsi->mac_filter_list_lock);
2317 return -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002318 }
2319 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002320 }
2321
Kiran Patil21659032015-09-30 14:09:03 -04002322 spin_unlock_bh(&vsi->mac_filter_list_lock);
2323
Jesse Brandeburg0e4425e2015-11-05 17:01:01 -08002324 /* schedule our worker thread which will take care of
2325 * applying the new filter changes
2326 */
2327 i40e_service_event_schedule(vsi->back);
2328 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002329}
2330
2331/**
2332 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2333 * @vsi: the vsi being configured
2334 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002335 *
2336 * Return: 0 on success or negative otherwise
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002337 **/
2338int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2339{
2340 struct net_device *netdev = vsi->netdev;
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002341 struct i40e_mac_filter *f, *ftmp, *add_f;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002342 int filter_count = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002343
Kiran Patil21659032015-09-30 14:09:03 -04002344 /* Locked once because all functions invoked below iterates list */
2345 spin_lock_bh(&vsi->mac_filter_list_lock);
2346
Jacob Keller1bc87e82016-10-05 09:30:31 -07002347 if (vsi->netdev)
2348 i40e_del_filter(vsi, netdev->dev_addr, vid);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002349
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002350 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
Jacob Keller1bc87e82016-10-05 09:30:31 -07002351 i40e_del_filter(vsi, f->macaddr, vid);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002352
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002353 /* go through all the filters for this VSI and if there is only
2354 * vid == 0 it means there are no other filters, so vid 0 must
2355 * be replaced with -1. This signifies that we should from now
2356 * on accept any traffic (with any tag present, or untagged)
2357 */
2358 list_for_each_entry(f, &vsi->mac_filter_list, list) {
Jacob Keller1bc87e82016-10-05 09:30:31 -07002359 if (vsi->netdev) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002360 if (f->vlan &&
2361 ether_addr_equal(netdev->dev_addr, f->macaddr))
2362 filter_count++;
2363 }
2364
2365 if (f->vlan)
2366 filter_count++;
2367 }
2368
Jacob Keller1bc87e82016-10-05 09:30:31 -07002369 if (!filter_count && vsi->netdev) {
2370 i40e_del_filter(vsi, netdev->dev_addr, 0);
2371 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002372 if (!f) {
2373 dev_info(&vsi->back->pdev->dev,
2374 "Could not add filter %d for %pM\n",
2375 I40E_VLAN_ANY, netdev->dev_addr);
Kiran Patil21659032015-09-30 14:09:03 -04002376 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002377 return -ENOMEM;
2378 }
2379 }
2380
2381 if (!filter_count) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07002382 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
Jacob Keller1bc87e82016-10-05 09:30:31 -07002383 i40e_del_filter(vsi, f->macaddr, 0);
2384 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002385 if (!add_f) {
2386 dev_info(&vsi->back->pdev->dev,
2387 "Could not add filter %d for %pM\n",
2388 I40E_VLAN_ANY, f->macaddr);
Kiran Patil21659032015-09-30 14:09:03 -04002389 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002390 return -ENOMEM;
2391 }
2392 }
2393 }
2394
Kiran Patil21659032015-09-30 14:09:03 -04002395 spin_unlock_bh(&vsi->mac_filter_list_lock);
2396
Jesse Brandeburg0e4425e2015-11-05 17:01:01 -08002397 /* schedule our worker thread which will take care of
2398 * applying the new filter changes
2399 */
2400 i40e_service_event_schedule(vsi->back);
2401 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002402}
2403
2404/**
2405 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2406 * @netdev: network interface to be adjusted
2407 * @vid: vlan id to be added
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002408 *
2409 * net_device_ops implementation for adding vlan ids
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002410 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002411#ifdef I40E_FCOE
2412int i40e_vlan_rx_add_vid(struct net_device *netdev,
2413 __always_unused __be16 proto, u16 vid)
2414#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002415static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2416 __always_unused __be16 proto, u16 vid)
Vasu Dev38e00432014-08-01 13:27:03 -07002417#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002418{
2419 struct i40e_netdev_priv *np = netdev_priv(netdev);
2420 struct i40e_vsi *vsi = np->vsi;
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002421 int ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002422
2423 if (vid > 4095)
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002424 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002425
Anjali Singhai Jain6982d422014-02-06 05:51:10 +00002426 /* If the network stack called us with vid = 0 then
2427 * it is asking to receive priority tagged packets with
2428 * vlan id 0. Our HW receives them by default when configured
2429 * to receive untagged packets so there is no need to add an
2430 * extra filter for vlan 0 tagged packets.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002431 */
Anjali Singhai Jain6982d422014-02-06 05:51:10 +00002432 if (vid)
2433 ret = i40e_vsi_add_vlan(vsi, vid);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002434
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002435 if (!ret && (vid < VLAN_N_VID))
2436 set_bit(vid, vsi->active_vlans);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002437
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002438 return ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002439}
2440
2441/**
2442 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2443 * @netdev: network interface to be adjusted
2444 * @vid: vlan id to be removed
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002445 *
Akeem G Abodunrinfdfd9432014-02-11 08:24:15 +00002446 * net_device_ops implementation for removing vlan ids
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002447 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002448#ifdef I40E_FCOE
2449int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2450 __always_unused __be16 proto, u16 vid)
2451#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002452static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2453 __always_unused __be16 proto, u16 vid)
Vasu Dev38e00432014-08-01 13:27:03 -07002454#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002455{
2456 struct i40e_netdev_priv *np = netdev_priv(netdev);
2457 struct i40e_vsi *vsi = np->vsi;
2458
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002459 /* return code is ignored as there is nothing a user
2460 * can do about failure to remove and a log message was
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002461 * already printed from the other function
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002462 */
2463 i40e_vsi_kill_vlan(vsi, vid);
2464
2465 clear_bit(vid, vsi->active_vlans);
Jesse Brandeburg078b5872013-09-25 23:41:14 +00002466
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002467 return 0;
2468}
2469
2470/**
Tushar Daveb1b15df2016-07-01 10:11:20 -07002471 * i40e_macaddr_init - explicitly write the mac address filters
2472 *
2473 * @vsi: pointer to the vsi
2474 * @macaddr: the MAC address
2475 *
2476 * This is needed when the macaddr has been obtained by other
2477 * means than the default, e.g., from Open Firmware or IDPROM.
2478 * Returns 0 on success, negative on failure
2479 **/
2480static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
2481{
2482 int ret;
2483 struct i40e_aqc_add_macvlan_element_data element;
2484
2485 ret = i40e_aq_mac_address_write(&vsi->back->hw,
2486 I40E_AQC_WRITE_TYPE_LAA_WOL,
2487 macaddr, NULL);
2488 if (ret) {
2489 dev_info(&vsi->back->pdev->dev,
2490 "Addr change for VSI failed: %d\n", ret);
2491 return -EADDRNOTAVAIL;
2492 }
2493
2494 memset(&element, 0, sizeof(element));
2495 ether_addr_copy(element.mac_addr, macaddr);
2496 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
2497 ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
2498 if (ret) {
2499 dev_info(&vsi->back->pdev->dev,
2500 "add filter failed err %s aq_err %s\n",
2501 i40e_stat_str(&vsi->back->hw, ret),
2502 i40e_aq_str(&vsi->back->hw,
2503 vsi->back->hw.aq.asq_last_status));
2504 }
2505 return ret;
2506}
2507
2508/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002509 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2510 * @vsi: the vsi being brought back up
2511 **/
2512static void i40e_restore_vlan(struct i40e_vsi *vsi)
2513{
2514 u16 vid;
2515
2516 if (!vsi->netdev)
2517 return;
2518
2519 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2520
2521 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2522 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2523 vid);
2524}
2525
2526/**
2527 * i40e_vsi_add_pvid - Add pvid for the VSI
2528 * @vsi: the vsi being adjusted
2529 * @vid: the vlan id to set as a PVID
2530 **/
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00002531int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002532{
2533 struct i40e_vsi_context ctxt;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002534 i40e_status ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002535
2536 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2537 vsi->info.pvid = cpu_to_le16(vid);
Greg Rose6c12fcb2013-11-28 06:39:34 +00002538 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2539 I40E_AQ_VSI_PVLAN_INSERT_PVID |
Greg Roseb774c7d2013-11-28 06:39:44 +00002540 I40E_AQ_VSI_PVLAN_EMOD_STR;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002541
2542 ctxt.seid = vsi->seid;
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07002543 ctxt.info = vsi->info;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002544 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2545 if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002546 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04002547 "add pvid failed, err %s aq_err %s\n",
2548 i40e_stat_str(&vsi->back->hw, ret),
2549 i40e_aq_str(&vsi->back->hw,
2550 vsi->back->hw.aq.asq_last_status));
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00002551 return -ENOENT;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002552 }
2553
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00002554 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002555}
2556
2557/**
2558 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2559 * @vsi: the vsi being adjusted
2560 *
2561 * Just use the vlan_rx_register() service to put it back to normal
2562 **/
2563void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2564{
Greg Rose6c12fcb2013-11-28 06:39:34 +00002565 i40e_vlan_stripping_disable(vsi);
2566
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002567 vsi->info.pvid = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002568}
2569
2570/**
2571 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2572 * @vsi: ptr to the VSI
2573 *
2574 * If this function returns with an error, then it's possible one or
2575 * more of the rings is populated (while the rest are not). It is the
2576 * callers duty to clean those orphaned rings.
2577 *
2578 * Return 0 on success, negative on failure
2579 **/
2580static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2581{
2582 int i, err = 0;
2583
2584 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
Alexander Duyck9f65e152013-09-28 06:00:58 +00002585 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002586
2587 return err;
2588}
2589
2590/**
2591 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2592 * @vsi: ptr to the VSI
2593 *
2594 * Free VSI's transmit software resources
2595 **/
2596static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2597{
2598 int i;
2599
Greg Rose8e9dca52013-12-18 13:45:53 +00002600 if (!vsi->tx_rings)
2601 return;
2602
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002603 for (i = 0; i < vsi->num_queue_pairs; i++)
Greg Rose8e9dca52013-12-18 13:45:53 +00002604 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
Alexander Duyck9f65e152013-09-28 06:00:58 +00002605 i40e_free_tx_resources(vsi->tx_rings[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002606}
2607
2608/**
2609 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2610 * @vsi: ptr to the VSI
2611 *
2612 * If this function returns with an error, then it's possible one or
2613 * more of the rings is populated (while the rest are not). It is the
2614 * callers duty to clean those orphaned rings.
2615 *
2616 * Return 0 on success, negative on failure
2617 **/
2618static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2619{
2620 int i, err = 0;
2621
2622 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
Alexander Duyck9f65e152013-09-28 06:00:58 +00002623 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
Vasu Dev38e00432014-08-01 13:27:03 -07002624#ifdef I40E_FCOE
2625 i40e_fcoe_setup_ddp_resources(vsi);
2626#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002627 return err;
2628}
2629
2630/**
2631 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2632 * @vsi: ptr to the VSI
2633 *
2634 * Free all receive software resources
2635 **/
2636static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2637{
2638 int i;
2639
Greg Rose8e9dca52013-12-18 13:45:53 +00002640 if (!vsi->rx_rings)
2641 return;
2642
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002643 for (i = 0; i < vsi->num_queue_pairs; i++)
Greg Rose8e9dca52013-12-18 13:45:53 +00002644 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
Alexander Duyck9f65e152013-09-28 06:00:58 +00002645 i40e_free_rx_resources(vsi->rx_rings[i]);
Vasu Dev38e00432014-08-01 13:27:03 -07002646#ifdef I40E_FCOE
2647 i40e_fcoe_free_ddp_resources(vsi);
2648#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002649}
2650
2651/**
Neerav Parikh3ffa0372014-11-12 00:19:02 +00002652 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2653 * @ring: The Tx ring to configure
2654 *
2655 * This enables/disables XPS for a given Tx descriptor ring
2656 * based on the TCs enabled for the VSI that ring belongs to.
2657 **/
2658static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2659{
2660 struct i40e_vsi *vsi = ring->vsi;
2661 cpumask_var_t mask;
2662
Jesse Brandeburg9a660ee2015-02-26 16:13:22 +00002663 if (!ring->q_vector || !ring->netdev)
2664 return;
2665
2666 /* Single TC mode enable XPS */
2667 if (vsi->tc_config.numtc <= 1) {
2668 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
Neerav Parikh3ffa0372014-11-12 00:19:02 +00002669 netif_set_xps_queue(ring->netdev,
2670 &ring->q_vector->affinity_mask,
2671 ring->queue_index);
Jesse Brandeburg9a660ee2015-02-26 16:13:22 +00002672 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2673 /* Disable XPS to allow selection based on TC */
2674 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2675 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2676 free_cpumask_var(mask);
Neerav Parikh3ffa0372014-11-12 00:19:02 +00002677 }
Jesse Brandeburg0e4425e2015-11-05 17:01:01 -08002678
2679 /* schedule our worker thread which will take care of
2680 * applying the new filter changes
2681 */
2682 i40e_service_event_schedule(vsi->back);
Neerav Parikh3ffa0372014-11-12 00:19:02 +00002683}
2684
2685/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002686 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2687 * @ring: The Tx ring to configure
2688 *
2689 * Configure the Tx descriptor ring in the HMC context.
2690 **/
2691static int i40e_configure_tx_ring(struct i40e_ring *ring)
2692{
2693 struct i40e_vsi *vsi = ring->vsi;
2694 u16 pf_q = vsi->base_queue + ring->queue_index;
2695 struct i40e_hw *hw = &vsi->back->hw;
2696 struct i40e_hmc_obj_txq tx_ctx;
2697 i40e_status err = 0;
2698 u32 qtx_ctl = 0;
2699
2700 /* some ATR related tx ring init */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08002701 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002702 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2703 ring->atr_count = 0;
2704 } else {
2705 ring->atr_sample_rate = 0;
2706 }
2707
Neerav Parikh3ffa0372014-11-12 00:19:02 +00002708 /* configure XPS */
2709 i40e_config_xps_tx_ring(ring);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002710
2711 /* clear the context structure first */
2712 memset(&tx_ctx, 0, sizeof(tx_ctx));
2713
2714 tx_ctx.new_context = 1;
2715 tx_ctx.base = (ring->dma / 128);
2716 tx_ctx.qlen = ring->count;
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08002717 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2718 I40E_FLAG_FD_ATR_ENABLED));
Vasu Dev38e00432014-08-01 13:27:03 -07002719#ifdef I40E_FCOE
2720 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2721#endif
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002722 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +00002723 /* FDIR VSI tx ring can still use RS bit and writebacks */
2724 if (vsi->type != I40E_VSI_FDIR)
2725 tx_ctx.head_wb_ena = 1;
2726 tx_ctx.head_wb_addr = ring->dma +
2727 (ring->count * sizeof(struct i40e_tx_desc));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002728
2729 /* As part of VSI creation/update, FW allocates certain
2730 * Tx arbitration queue sets for each TC enabled for
2731 * the VSI. The FW returns the handles to these queue
2732 * sets as part of the response buffer to Add VSI,
2733 * Update VSI, etc. AQ commands. It is expected that
2734 * these queue set handles be associated with the Tx
2735 * queues by the driver as part of the TX queue context
2736 * initialization. This has to be done regardless of
2737 * DCB as by default everything is mapped to TC0.
2738 */
2739 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2740 tx_ctx.rdylist_act = 0;
2741
2742 /* clear the context in the HMC */
2743 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2744 if (err) {
2745 dev_info(&vsi->back->pdev->dev,
2746 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2747 ring->queue_index, pf_q, err);
2748 return -ENOMEM;
2749 }
2750
2751 /* set the context in the HMC */
2752 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2753 if (err) {
2754 dev_info(&vsi->back->pdev->dev,
2755 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2756 ring->queue_index, pf_q, err);
2757 return -ENOMEM;
2758 }
2759
2760 /* Now associate this queue with this PCI function */
Mitch Williams7a28d882014-10-17 03:14:52 +00002761 if (vsi->type == I40E_VSI_VMDQ2) {
Shannon Nelson9d8bf542014-01-14 00:49:50 -08002762 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
Mitch Williams7a28d882014-10-17 03:14:52 +00002763 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2764 I40E_QTX_CTL_VFVM_INDX_MASK;
2765 } else {
Shannon Nelson9d8bf542014-01-14 00:49:50 -08002766 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
Mitch Williams7a28d882014-10-17 03:14:52 +00002767 }
2768
Shannon Nelson13fd9772013-09-28 07:14:19 +00002769 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2770 I40E_QTX_CTL_PF_INDX_MASK);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002771 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2772 i40e_flush(hw);
2773
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002774 /* cache tail off for easier writes later */
2775 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2776
2777 return 0;
2778}
2779
2780/**
2781 * i40e_configure_rx_ring - Configure a receive ring context
2782 * @ring: The Rx ring to configure
2783 *
2784 * Configure the Rx descriptor ring in the HMC context.
2785 **/
2786static int i40e_configure_rx_ring(struct i40e_ring *ring)
2787{
2788 struct i40e_vsi *vsi = ring->vsi;
2789 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2790 u16 pf_q = vsi->base_queue + ring->queue_index;
2791 struct i40e_hw *hw = &vsi->back->hw;
2792 struct i40e_hmc_obj_rxq rx_ctx;
2793 i40e_status err = 0;
2794
2795 ring->state = 0;
2796
2797 /* clear the context structure first */
2798 memset(&rx_ctx, 0, sizeof(rx_ctx));
2799
2800 ring->rx_buf_len = vsi->rx_buf_len;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002801
2802 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002803
2804 rx_ctx.base = (ring->dma / 128);
2805 rx_ctx.qlen = ring->count;
2806
Jesse Brandeburgbec60fc2016-04-18 11:33:47 -07002807 /* use 32 byte descriptors */
2808 rx_ctx.dsize = 1;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002809
Jesse Brandeburgbec60fc2016-04-18 11:33:47 -07002810 /* descriptor type is always zero
2811 * rx_ctx.dtype = 0;
2812 */
Jesse Brandeburgb32bfa172016-04-18 11:33:42 -07002813 rx_ctx.hsplit_0 = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002814
Jesse Brandeburgb32bfa172016-04-18 11:33:42 -07002815 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00002816 if (hw->revision_id == 0)
2817 rx_ctx.lrxqthresh = 0;
2818 else
2819 rx_ctx.lrxqthresh = 2;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002820 rx_ctx.crcstrip = 1;
2821 rx_ctx.l2tsel = 1;
Jesse Brandeburgc4bbac32015-09-28 11:21:48 -07002822 /* this controls whether VLAN is stripped from inner headers */
2823 rx_ctx.showiv = 0;
Vasu Dev38e00432014-08-01 13:27:03 -07002824#ifdef I40E_FCOE
2825 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2826#endif
Catherine Sullivanacb36762014-03-06 09:02:30 +00002827 /* set the prefena field to 1 because the manual says to */
2828 rx_ctx.prefena = 1;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002829
2830 /* clear the context in the HMC */
2831 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2832 if (err) {
2833 dev_info(&vsi->back->pdev->dev,
2834 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2835 ring->queue_index, pf_q, err);
2836 return -ENOMEM;
2837 }
2838
2839 /* set the context in the HMC */
2840 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2841 if (err) {
2842 dev_info(&vsi->back->pdev->dev,
2843 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2844 ring->queue_index, pf_q, err);
2845 return -ENOMEM;
2846 }
2847
2848 /* cache tail for quicker writes, and clear the reg before use */
2849 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2850 writel(0, ring->tail);
2851
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002852 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002853
2854 return 0;
2855}
2856
2857/**
2858 * i40e_vsi_configure_tx - Configure the VSI for Tx
2859 * @vsi: VSI structure describing this set of rings and resources
2860 *
2861 * Configure the Tx VSI for operation.
2862 **/
2863static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2864{
2865 int err = 0;
2866 u16 i;
2867
Alexander Duyck9f65e152013-09-28 06:00:58 +00002868 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2869 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002870
2871 return err;
2872}
2873
2874/**
2875 * i40e_vsi_configure_rx - Configure the VSI for Rx
2876 * @vsi: the VSI being configured
2877 *
2878 * Configure the Rx VSI for operation.
2879 **/
2880static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2881{
2882 int err = 0;
2883 u16 i;
2884
2885 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2886 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2887 + ETH_FCS_LEN + VLAN_HLEN;
2888 else
2889 vsi->max_frame = I40E_RXBUFFER_2048;
2890
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002891 vsi->rx_buf_len = I40E_RXBUFFER_2048;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002892
Vasu Dev38e00432014-08-01 13:27:03 -07002893#ifdef I40E_FCOE
2894 /* setup rx buffer for FCoE */
2895 if ((vsi->type == I40E_VSI_FCOE) &&
2896 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
Vasu Dev38e00432014-08-01 13:27:03 -07002897 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2898 vsi->max_frame = I40E_RXBUFFER_3072;
Vasu Dev38e00432014-08-01 13:27:03 -07002899 }
2900
2901#endif /* I40E_FCOE */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002902 /* round up for the chip's needs */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002903 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04002904 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002905
2906 /* set up individual rings */
2907 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
Alexander Duyck9f65e152013-09-28 06:00:58 +00002908 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002909
2910 return err;
2911}
2912
2913/**
2914 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2915 * @vsi: ptr to the VSI
2916 **/
2917static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2918{
Akeem G Abodunrine7046ee2014-04-09 05:58:58 +00002919 struct i40e_ring *tx_ring, *rx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002920 u16 qoffset, qcount;
2921 int i, n;
2922
Parikh, Neeravcd238a32015-02-21 06:43:37 +00002923 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2924 /* Reset the TC information */
2925 for (i = 0; i < vsi->num_queue_pairs; i++) {
2926 rx_ring = vsi->rx_rings[i];
2927 tx_ring = vsi->tx_rings[i];
2928 rx_ring->dcb_tc = 0;
2929 tx_ring->dcb_tc = 0;
2930 }
2931 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002932
2933 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04002934 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002935 continue;
2936
2937 qoffset = vsi->tc_config.tc_info[n].qoffset;
2938 qcount = vsi->tc_config.tc_info[n].qcount;
2939 for (i = qoffset; i < (qoffset + qcount); i++) {
Akeem G Abodunrine7046ee2014-04-09 05:58:58 +00002940 rx_ring = vsi->rx_rings[i];
2941 tx_ring = vsi->tx_rings[i];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002942 rx_ring->dcb_tc = n;
2943 tx_ring->dcb_tc = n;
2944 }
2945 }
2946}
2947
2948/**
2949 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2950 * @vsi: ptr to the VSI
2951 **/
2952static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2953{
Tushar Daveb1b15df2016-07-01 10:11:20 -07002954 struct i40e_pf *pf = vsi->back;
2955 int err;
2956
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002957 if (vsi->netdev)
2958 i40e_set_rx_mode(vsi->netdev);
Tushar Daveb1b15df2016-07-01 10:11:20 -07002959
2960 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
2961 err = i40e_macaddr_init(vsi, pf->hw.mac.addr);
2962 if (err) {
2963 dev_warn(&pf->pdev->dev,
2964 "could not set up macaddr; err %d\n", err);
2965 }
2966 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002967}
2968
2969/**
Joseph Gasparakis17a73f62014-02-12 01:45:30 +00002970 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2971 * @vsi: Pointer to the targeted VSI
2972 *
2973 * This function replays the hlist on the hw where all the SB Flow Director
2974 * filters were saved.
2975 **/
2976static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2977{
2978 struct i40e_fdir_filter *filter;
2979 struct i40e_pf *pf = vsi->back;
2980 struct hlist_node *node;
2981
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002982 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2983 return;
2984
Joseph Gasparakis17a73f62014-02-12 01:45:30 +00002985 hlist_for_each_entry_safe(filter, node,
2986 &pf->fdir_filter_list, fdir_node) {
2987 i40e_add_del_fdir(vsi, filter, true);
2988 }
2989}
2990
2991/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00002992 * i40e_vsi_configure - Set up the VSI for action
2993 * @vsi: the VSI being configured
2994 **/
2995static int i40e_vsi_configure(struct i40e_vsi *vsi)
2996{
2997 int err;
2998
2999 i40e_set_vsi_rx_mode(vsi);
3000 i40e_restore_vlan(vsi);
3001 i40e_vsi_config_dcb_rings(vsi);
3002 err = i40e_vsi_configure_tx(vsi);
3003 if (!err)
3004 err = i40e_vsi_configure_rx(vsi);
3005
3006 return err;
3007}
3008
3009/**
3010 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3011 * @vsi: the VSI being configured
3012 **/
3013static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3014{
3015 struct i40e_pf *pf = vsi->back;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003016 struct i40e_hw *hw = &pf->hw;
3017 u16 vector;
3018 int i, q;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003019 u32 qp;
3020
3021 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3022 * and PFINT_LNKLSTn registers, e.g.:
3023 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3024 */
3025 qp = vsi->base_queue;
3026 vector = vsi->base_vector;
Alexander Duyck493fb302013-09-28 07:01:44 +00003027 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
Jesse Brandeburgac26fc12015-09-28 14:12:37 -04003028 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3029
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04003030 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Kan Lianga75e8002016-02-19 09:24:04 -05003031 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003032 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3033 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3034 q_vector->rx.itr);
Kan Lianga75e8002016-02-19 09:24:04 -05003035 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003036 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3037 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3038 q_vector->tx.itr);
Jesse Brandeburgac26fc12015-09-28 14:12:37 -04003039 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3040 INTRL_USEC_TO_REG(vsi->int_rate_limit));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003041
3042 /* Linked list for the queuepairs assigned to this vector */
3043 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3044 for (q = 0; q < q_vector->num_ringpairs; q++) {
Jesse Brandeburgac26fc12015-09-28 14:12:37 -04003045 u32 val;
3046
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003047 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3048 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3049 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3050 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3051 (I40E_QUEUE_TYPE_TX
3052 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3053
3054 wr32(hw, I40E_QINT_RQCTL(qp), val);
3055
3056 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3057 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3058 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3059 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
3060 (I40E_QUEUE_TYPE_RX
3061 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3062
3063 /* Terminate the linked list */
3064 if (q == (q_vector->num_ringpairs - 1))
3065 val |= (I40E_QUEUE_END_OF_LIST
3066 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3067
3068 wr32(hw, I40E_QINT_TQCTL(qp), val);
3069 qp++;
3070 }
3071 }
3072
3073 i40e_flush(hw);
3074}
3075
3076/**
3077 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3078 * @hw: ptr to the hardware info
3079 **/
Jacob Kellerab437b52014-12-14 01:55:08 +00003080static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003081{
Jacob Kellerab437b52014-12-14 01:55:08 +00003082 struct i40e_hw *hw = &pf->hw;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003083 u32 val;
3084
3085 /* clear things first */
3086 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3087 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3088
3089 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3090 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3091 I40E_PFINT_ICR0_ENA_GRST_MASK |
3092 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3093 I40E_PFINT_ICR0_ENA_GPIO_MASK |
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003094 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3095 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3096 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3097
Anjali Singhai Jain0d8e1432015-06-05 12:20:32 -04003098 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3099 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3100
Jacob Kellerab437b52014-12-14 01:55:08 +00003101 if (pf->flags & I40E_FLAG_PTP)
3102 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3103
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003104 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3105
3106 /* SW_ITR_IDX = 0, but don't change INTENA */
Anjali Singhai Jain84ed40e2013-11-26 10:49:32 +00003107 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3108 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003109
3110 /* OTHER_ITR_IDX = 0 */
3111 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3112}
3113
3114/**
3115 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3116 * @vsi: the VSI being configured
3117 **/
3118static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3119{
Alexander Duyck493fb302013-09-28 07:01:44 +00003120 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003121 struct i40e_pf *pf = vsi->back;
3122 struct i40e_hw *hw = &pf->hw;
3123 u32 val;
3124
3125 /* set the ITR configuration */
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04003126 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Kan Lianga75e8002016-02-19 09:24:04 -05003127 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003128 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3129 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
Kan Lianga75e8002016-02-19 09:24:04 -05003130 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003131 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3132 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3133
Jacob Kellerab437b52014-12-14 01:55:08 +00003134 i40e_enable_misc_int_causes(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003135
3136 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3137 wr32(hw, I40E_PFINT_LNKLST0, 0);
3138
Jesse Brandeburgf29eaa32014-02-11 08:24:12 +00003139 /* Associate the queue pair to the vector and enable the queue int */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003140 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3141 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3142 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3143
3144 wr32(hw, I40E_QINT_RQCTL(0), val);
3145
3146 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3147 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3148 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3149
3150 wr32(hw, I40E_QINT_TQCTL(0), val);
3151 i40e_flush(hw);
3152}
3153
3154/**
Mitch Williams2ef28cf2013-11-28 06:39:32 +00003155 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3156 * @pf: board private structure
3157 **/
3158void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3159{
3160 struct i40e_hw *hw = &pf->hw;
3161
3162 wr32(hw, I40E_PFINT_DYN_CTL0,
3163 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3164 i40e_flush(hw);
3165}
3166
3167/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003168 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3169 * @pf: board private structure
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08003170 * @clearpba: true when all pending interrupt events should be cleared
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003171 **/
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08003172void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003173{
3174 struct i40e_hw *hw = &pf->hw;
3175 u32 val;
3176
3177 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08003178 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003179 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3180
3181 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3182 i40e_flush(hw);
3183}
3184
3185/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003186 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3187 * @irq: interrupt number
3188 * @data: pointer to a q_vector
3189 **/
3190static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3191{
3192 struct i40e_q_vector *q_vector = data;
3193
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003194 if (!q_vector->tx.ring && !q_vector->rx.ring)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003195 return IRQ_HANDLED;
3196
Alexander Duyck5d3465a2015-09-29 15:19:50 -07003197 napi_schedule_irqoff(&q_vector->napi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003198
3199 return IRQ_HANDLED;
3200}
3201
3202/**
Alan Brady96db7762016-09-14 16:24:38 -07003203 * i40e_irq_affinity_notify - Callback for affinity changes
3204 * @notify: context as to what irq was changed
3205 * @mask: the new affinity mask
3206 *
3207 * This is a callback function used by the irq_set_affinity_notifier function
3208 * so that we may register to receive changes to the irq affinity masks.
3209 **/
3210static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3211 const cpumask_t *mask)
3212{
3213 struct i40e_q_vector *q_vector =
3214 container_of(notify, struct i40e_q_vector, affinity_notify);
3215
3216 q_vector->affinity_mask = *mask;
3217}
3218
3219/**
3220 * i40e_irq_affinity_release - Callback for affinity notifier release
3221 * @ref: internal core kernel usage
3222 *
3223 * This is a callback function used by the irq_set_affinity_notifier function
3224 * to inform the current notification subscriber that they will no longer
3225 * receive notifications.
3226 **/
3227static void i40e_irq_affinity_release(struct kref *ref) {}
3228
3229/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003230 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3231 * @vsi: the VSI being configured
3232 * @basename: name for the vector
3233 *
3234 * Allocates MSI-X vectors and requests interrupts from the kernel.
3235 **/
3236static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3237{
3238 int q_vectors = vsi->num_q_vectors;
3239 struct i40e_pf *pf = vsi->back;
3240 int base = vsi->base_vector;
3241 int rx_int_idx = 0;
3242 int tx_int_idx = 0;
3243 int vector, err;
Alan Brady96db7762016-09-14 16:24:38 -07003244 int irq_num;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003245
3246 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyck493fb302013-09-28 07:01:44 +00003247 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003248
Alan Brady96db7762016-09-14 16:24:38 -07003249 irq_num = pf->msix_entries[base + vector].vector;
3250
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003251 if (q_vector->tx.ring && q_vector->rx.ring) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003252 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3253 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3254 tx_int_idx++;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003255 } else if (q_vector->rx.ring) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003256 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3257 "%s-%s-%d", basename, "rx", rx_int_idx++);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003258 } else if (q_vector->tx.ring) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003259 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3260 "%s-%s-%d", basename, "tx", tx_int_idx++);
3261 } else {
3262 /* skip this unused q_vector */
3263 continue;
3264 }
Alan Brady96db7762016-09-14 16:24:38 -07003265 err = request_irq(irq_num,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003266 vsi->irq_handler,
3267 0,
3268 q_vector->name,
3269 q_vector);
3270 if (err) {
3271 dev_info(&pf->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04003272 "MSIX request_irq failed, error: %d\n", err);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003273 goto free_queue_irqs;
3274 }
Alan Brady96db7762016-09-14 16:24:38 -07003275
3276 /* register for affinity change notifications */
3277 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3278 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3279 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003280 /* assign the mask for this irq */
Alan Brady96db7762016-09-14 16:24:38 -07003281 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003282 }
3283
Shannon Nelson63741842014-04-23 04:50:16 +00003284 vsi->irqs_ready = true;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003285 return 0;
3286
3287free_queue_irqs:
3288 while (vector) {
3289 vector--;
Alan Brady96db7762016-09-14 16:24:38 -07003290 irq_num = pf->msix_entries[base + vector].vector;
3291 irq_set_affinity_notifier(irq_num, NULL);
3292 irq_set_affinity_hint(irq_num, NULL);
3293 free_irq(irq_num, &vsi->q_vectors[vector]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003294 }
3295 return err;
3296}
3297
3298/**
3299 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3300 * @vsi: the VSI being un-configured
3301 **/
3302static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3303{
3304 struct i40e_pf *pf = vsi->back;
3305 struct i40e_hw *hw = &pf->hw;
3306 int base = vsi->base_vector;
3307 int i;
3308
3309 for (i = 0; i < vsi->num_queue_pairs; i++) {
Alexander Duyck9f65e152013-09-28 06:00:58 +00003310 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3311 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003312 }
3313
3314 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3315 for (i = vsi->base_vector;
3316 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3317 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3318
3319 i40e_flush(hw);
3320 for (i = 0; i < vsi->num_q_vectors; i++)
3321 synchronize_irq(pf->msix_entries[i + base].vector);
3322 } else {
3323 /* Legacy and MSI mode - this stops all interrupt handling */
3324 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3325 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3326 i40e_flush(hw);
3327 synchronize_irq(pf->pdev->irq);
3328 }
3329}
3330
3331/**
3332 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3333 * @vsi: the VSI being configured
3334 **/
3335static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3336{
3337 struct i40e_pf *pf = vsi->back;
3338 int i;
3339
3340 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
Jesse Brandeburg78455482015-07-23 16:54:41 -04003341 for (i = 0; i < vsi->num_q_vectors; i++)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003342 i40e_irq_dynamic_enable(vsi, i);
3343 } else {
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08003344 i40e_irq_dynamic_enable_icr0(pf, true);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003345 }
3346
Jesse Brandeburg1022cb62013-09-28 07:13:08 +00003347 i40e_flush(&pf->hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003348 return 0;
3349}
3350
3351/**
3352 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3353 * @pf: board private structure
3354 **/
3355static void i40e_stop_misc_vector(struct i40e_pf *pf)
3356{
3357 /* Disable ICR 0 */
3358 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3359 i40e_flush(&pf->hw);
3360}
3361
3362/**
3363 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3364 * @irq: interrupt number
3365 * @data: pointer to a q_vector
3366 *
3367 * This is the handler used for all MSI/Legacy interrupts, and deals
3368 * with both queue and non-queue interrupts. This is also used in
3369 * MSIX mode to handle the non-queue interrupts.
3370 **/
3371static irqreturn_t i40e_intr(int irq, void *data)
3372{
3373 struct i40e_pf *pf = (struct i40e_pf *)data;
3374 struct i40e_hw *hw = &pf->hw;
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003375 irqreturn_t ret = IRQ_NONE;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003376 u32 icr0, icr0_remaining;
3377 u32 val, ena_mask;
3378
3379 icr0 = rd32(hw, I40E_PFINT_ICR0);
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003380 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003381
Shannon Nelson116a57d2013-09-28 07:13:59 +00003382 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3383 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003384 goto enable_intr;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003385
Shannon Nelsoncd92e722013-11-16 10:00:44 +00003386 /* if interrupt but no bits showing, must be SWINT */
3387 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3388 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3389 pf->sw_int_count++;
3390
Anjali Singhai Jain0d8e1432015-06-05 12:20:32 -04003391 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3392 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3393 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3394 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3395 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3396 }
3397
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003398 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3399 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
Alexander Duyck5d3465a2015-09-29 15:19:50 -07003400 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3401 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003402
Anjali Singhai Jaina16ae2d2016-01-15 14:33:16 -08003403 /* We do not have a way to disarm Queue causes while leaving
3404 * interrupt enabled for all other causes, ideally
3405 * interrupt should be disabled while we are in NAPI but
3406 * this is not a performance path and napi_schedule()
3407 * can deal with rescheduling.
3408 */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003409 if (!test_bit(__I40E_DOWN, &pf->state))
Alexander Duyck5d3465a2015-09-29 15:19:50 -07003410 napi_schedule_irqoff(&q_vector->napi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003411 }
3412
3413 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3414 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3415 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
Shannon Nelson6e93d0c2016-01-15 14:33:18 -08003416 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003417 }
3418
3419 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3420 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3421 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3422 }
3423
3424 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3425 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3426 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3427 }
3428
3429 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3430 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3431 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3432 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3433 val = rd32(hw, I40E_GLGEN_RSTAT);
3434 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3435 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
Shannon Nelson4eb3f762014-03-06 08:59:58 +00003436 if (val == I40E_RESET_CORER) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003437 pf->corer_count++;
Shannon Nelson4eb3f762014-03-06 08:59:58 +00003438 } else if (val == I40E_RESET_GLOBR) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003439 pf->globr_count++;
Shannon Nelson4eb3f762014-03-06 08:59:58 +00003440 } else if (val == I40E_RESET_EMPR) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003441 pf->empr_count++;
Anjali Singhai Jain9df42d12015-01-24 09:58:40 +00003442 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
Shannon Nelson4eb3f762014-03-06 08:59:58 +00003443 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003444 }
3445
Anjali Singhai Jain9c010ee2013-11-28 06:39:20 +00003446 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3447 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3448 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
Anjali Singhai Jain25fc0e62015-03-31 00:45:01 -07003449 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3450 rd32(hw, I40E_PFHMC_ERRORINFO),
3451 rd32(hw, I40E_PFHMC_ERRORDATA));
Anjali Singhai Jain9c010ee2013-11-28 06:39:20 +00003452 }
3453
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003454 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3455 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3456
3457 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
Jacob Kellercafa1fc2014-04-24 18:05:03 -07003458 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003459 i40e_ptp_tx_hwtstamp(pf);
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003460 }
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003461 }
3462
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003463 /* If a critical error is pending we have no choice but to reset the
3464 * device.
3465 * Report and mask out any remaining unexpected interrupts.
3466 */
3467 icr0_remaining = icr0 & ena_mask;
3468 if (icr0_remaining) {
3469 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3470 icr0_remaining);
Anjali Singhai Jain9c010ee2013-11-28 06:39:20 +00003471 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003472 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
Anjali Singhai Jainc0c28972014-02-12 01:45:34 +00003473 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
Anjali Singhai Jain9c010ee2013-11-28 06:39:20 +00003474 dev_info(&pf->pdev->dev, "device will be reset\n");
3475 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3476 i40e_service_event_schedule(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003477 }
3478 ena_mask &= ~icr0_remaining;
3479 }
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003480 ret = IRQ_HANDLED;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003481
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003482enable_intr:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003483 /* re-enable interrupt causes */
3484 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003485 if (!test_bit(__I40E_DOWN, &pf->state)) {
3486 i40e_service_event_schedule(pf);
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08003487 i40e_irq_dynamic_enable_icr0(pf, false);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003488 }
3489
Anjali Singhai Jain5e823062013-12-18 13:45:49 +00003490 return ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003491}
3492
3493/**
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003494 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3495 * @tx_ring: tx ring to clean
3496 * @budget: how many cleans we're allowed
3497 *
3498 * Returns true if there's any budget left (e.g. the clean is finished)
3499 **/
3500static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3501{
3502 struct i40e_vsi *vsi = tx_ring->vsi;
3503 u16 i = tx_ring->next_to_clean;
3504 struct i40e_tx_buffer *tx_buf;
3505 struct i40e_tx_desc *tx_desc;
3506
3507 tx_buf = &tx_ring->tx_bi[i];
3508 tx_desc = I40E_TX_DESC(tx_ring, i);
3509 i -= tx_ring->count;
3510
3511 do {
3512 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3513
3514 /* if next_to_watch is not set then there is no work pending */
3515 if (!eop_desc)
3516 break;
3517
3518 /* prevent any other reads prior to eop_desc */
3519 read_barrier_depends();
3520
3521 /* if the descriptor isn't done, no work yet to do */
3522 if (!(eop_desc->cmd_type_offset_bsz &
3523 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3524 break;
3525
3526 /* clear next_to_watch to prevent false hangs */
3527 tx_buf->next_to_watch = NULL;
3528
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +00003529 tx_desc->buffer_addr = 0;
3530 tx_desc->cmd_type_offset_bsz = 0;
3531 /* move past filter desc */
3532 tx_buf++;
3533 tx_desc++;
3534 i++;
3535 if (unlikely(!i)) {
3536 i -= tx_ring->count;
3537 tx_buf = tx_ring->tx_bi;
3538 tx_desc = I40E_TX_DESC(tx_ring, 0);
3539 }
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003540 /* unmap skb header data */
3541 dma_unmap_single(tx_ring->dev,
3542 dma_unmap_addr(tx_buf, dma),
3543 dma_unmap_len(tx_buf, len),
3544 DMA_TO_DEVICE);
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +00003545 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3546 kfree(tx_buf->raw_buf);
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003547
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +00003548 tx_buf->raw_buf = NULL;
3549 tx_buf->tx_flags = 0;
3550 tx_buf->next_to_watch = NULL;
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003551 dma_unmap_len_set(tx_buf, len, 0);
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +00003552 tx_desc->buffer_addr = 0;
3553 tx_desc->cmd_type_offset_bsz = 0;
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003554
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +00003555 /* move us past the eop_desc for start of next FD desc */
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003556 tx_buf++;
3557 tx_desc++;
3558 i++;
3559 if (unlikely(!i)) {
3560 i -= tx_ring->count;
3561 tx_buf = tx_ring->tx_bi;
3562 tx_desc = I40E_TX_DESC(tx_ring, 0);
3563 }
3564
3565 /* update budget accounting */
3566 budget--;
3567 } while (likely(budget));
3568
3569 i += tx_ring->count;
3570 tx_ring->next_to_clean = i;
3571
Jesse Brandeburg6995b362015-08-28 17:55:54 -04003572 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
Jesse Brandeburg78455482015-07-23 16:54:41 -04003573 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
Jesse Brandeburg6995b362015-08-28 17:55:54 -04003574
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08003575 return budget > 0;
3576}
3577
3578/**
3579 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3580 * @irq: interrupt number
3581 * @data: pointer to a q_vector
3582 **/
3583static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3584{
3585 struct i40e_q_vector *q_vector = data;
3586 struct i40e_vsi *vsi;
3587
3588 if (!q_vector->tx.ring)
3589 return IRQ_HANDLED;
3590
3591 vsi = q_vector->tx.ring->vsi;
3592 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3593
3594 return IRQ_HANDLED;
3595}
3596
3597/**
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003598 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003599 * @vsi: the VSI being configured
3600 * @v_idx: vector index
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003601 * @qp_idx: queue pair index
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003602 **/
Anjali Singhai Jain26cdc442015-07-10 19:36:00 -04003603static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003604{
Alexander Duyck493fb302013-09-28 07:01:44 +00003605 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
Alexander Duyck9f65e152013-09-28 06:00:58 +00003606 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3607 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003608
3609 tx_ring->q_vector = q_vector;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003610 tx_ring->next = q_vector->tx.ring;
3611 q_vector->tx.ring = tx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003612 q_vector->tx.count++;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003613
3614 rx_ring->q_vector = q_vector;
3615 rx_ring->next = q_vector->rx.ring;
3616 q_vector->rx.ring = rx_ring;
3617 q_vector->rx.count++;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003618}
3619
3620/**
3621 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3622 * @vsi: the VSI being configured
3623 *
3624 * This function maps descriptor rings to the queue-specific vectors
3625 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3626 * one vector per queue pair, but on a constrained vector budget, we
3627 * group the queue pairs as "efficiently" as possible.
3628 **/
3629static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3630{
3631 int qp_remaining = vsi->num_queue_pairs;
3632 int q_vectors = vsi->num_q_vectors;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003633 int num_ringpairs;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003634 int v_start = 0;
3635 int qp_idx = 0;
3636
3637 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3638 * group them so there are multiple queues per vector.
Anjali Singhai Jain70114ec2014-06-03 23:50:14 +00003639 * It is also important to go through all the vectors available to be
3640 * sure that if we don't use all the vectors, that the remaining vectors
3641 * are cleared. This is especially important when decreasing the
3642 * number of queues in use.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003643 */
Anjali Singhai Jain70114ec2014-06-03 23:50:14 +00003644 for (; v_start < q_vectors; v_start++) {
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003645 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3646
3647 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3648
3649 q_vector->num_ringpairs = num_ringpairs;
3650
3651 q_vector->rx.count = 0;
3652 q_vector->tx.count = 0;
3653 q_vector->rx.ring = NULL;
3654 q_vector->tx.ring = NULL;
3655
3656 while (num_ringpairs--) {
Anjali Singhai Jain26cdc442015-07-10 19:36:00 -04003657 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00003658 qp_idx++;
3659 qp_remaining--;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003660 }
3661 }
3662}
3663
3664/**
3665 * i40e_vsi_request_irq - Request IRQ from the OS
3666 * @vsi: the VSI being configured
3667 * @basename: name for the vector
3668 **/
3669static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3670{
3671 struct i40e_pf *pf = vsi->back;
3672 int err;
3673
3674 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3675 err = i40e_vsi_request_irq_msix(vsi, basename);
3676 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3677 err = request_irq(pf->pdev->irq, i40e_intr, 0,
Carolyn Wybornyb294ac72014-12-11 07:06:39 +00003678 pf->int_name, pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003679 else
3680 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
Carolyn Wybornyb294ac72014-12-11 07:06:39 +00003681 pf->int_name, pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003682
3683 if (err)
3684 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3685
3686 return err;
3687}
3688
3689#ifdef CONFIG_NET_POLL_CONTROLLER
3690/**
Jesse Brandeburgd89d9672016-01-04 10:33:02 -08003691 * i40e_netpoll - A Polling 'interrupt' handler
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003692 * @netdev: network interface device structure
3693 *
3694 * This is used by netconsole to send skbs without having to re-enable
3695 * interrupts. It's not called while the normal interrupt routine is executing.
3696 **/
Vasu Dev38e00432014-08-01 13:27:03 -07003697#ifdef I40E_FCOE
3698void i40e_netpoll(struct net_device *netdev)
3699#else
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003700static void i40e_netpoll(struct net_device *netdev)
Vasu Dev38e00432014-08-01 13:27:03 -07003701#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003702{
3703 struct i40e_netdev_priv *np = netdev_priv(netdev);
3704 struct i40e_vsi *vsi = np->vsi;
3705 struct i40e_pf *pf = vsi->back;
3706 int i;
3707
3708 /* if interface is down do nothing */
3709 if (test_bit(__I40E_DOWN, &vsi->state))
3710 return;
3711
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003712 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3713 for (i = 0; i < vsi->num_q_vectors; i++)
Alexander Duyck493fb302013-09-28 07:01:44 +00003714 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003715 } else {
3716 i40e_intr(pf->pdev->irq, netdev);
3717 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003718}
3719#endif
3720
3721/**
Neerav Parikh23527302014-06-03 23:50:15 +00003722 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3723 * @pf: the PF being configured
3724 * @pf_q: the PF queue
3725 * @enable: enable or disable state of the queue
3726 *
3727 * This routine will wait for the given Tx queue of the PF to reach the
3728 * enabled or disabled state.
3729 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3730 * multiple retries; else will return 0 in case of success.
3731 **/
3732static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3733{
3734 int i;
3735 u32 tx_reg;
3736
3737 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3738 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3739 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3740 break;
3741
Neerav Parikhf98a2002014-09-13 07:40:44 +00003742 usleep_range(10, 20);
Neerav Parikh23527302014-06-03 23:50:15 +00003743 }
3744 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3745 return -ETIMEDOUT;
3746
3747 return 0;
3748}
3749
3750/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003751 * i40e_vsi_control_tx - Start or stop a VSI's rings
3752 * @vsi: the VSI being configured
3753 * @enable: start or stop the rings
3754 **/
3755static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3756{
3757 struct i40e_pf *pf = vsi->back;
3758 struct i40e_hw *hw = &pf->hw;
Neerav Parikh23527302014-06-03 23:50:15 +00003759 int i, j, pf_q, ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003760 u32 tx_reg;
3761
3762 pf_q = vsi->base_queue;
3763 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
Matt Jared351499ab2014-04-23 04:50:03 +00003764
3765 /* warn the TX unit of coming changes */
3766 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3767 if (!enable)
Neerav Parikhf98a2002014-09-13 07:40:44 +00003768 usleep_range(10, 20);
Matt Jared351499ab2014-04-23 04:50:03 +00003769
Mitch Williams6c5ef622014-02-20 19:29:16 -08003770 for (j = 0; j < 50; j++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003771 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
Mitch Williams6c5ef622014-02-20 19:29:16 -08003772 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3773 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3774 break;
3775 usleep_range(1000, 2000);
3776 }
Mitch Williamsfda972f2013-11-28 06:39:29 +00003777 /* Skip if the queue is already in the requested state */
Catherine Sullivan7c122002014-03-14 07:32:29 +00003778 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
Mitch Williamsfda972f2013-11-28 06:39:29 +00003779 continue;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003780
3781 /* turn on/off the queue */
Shannon Nelsonc5c9eb92013-12-21 05:44:48 +00003782 if (enable) {
3783 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
Mitch Williams6c5ef622014-02-20 19:29:16 -08003784 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
Shannon Nelsonc5c9eb92013-12-21 05:44:48 +00003785 } else {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003786 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
Shannon Nelsonc5c9eb92013-12-21 05:44:48 +00003787 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003788
3789 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
Neerav Parikh69129dc2014-11-12 00:18:46 +00003790 /* No waiting for the Tx queue to disable */
3791 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3792 continue;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003793
3794 /* wait for the change to finish */
Neerav Parikh23527302014-06-03 23:50:15 +00003795 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3796 if (ret) {
3797 dev_info(&pf->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04003798 "VSI seid %d Tx ring %d %sable timeout\n",
3799 vsi->seid, pf_q, (enable ? "en" : "dis"));
Neerav Parikh23527302014-06-03 23:50:15 +00003800 break;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003801 }
3802 }
3803
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00003804 if (hw->revision_id == 0)
3805 mdelay(50);
Neerav Parikh23527302014-06-03 23:50:15 +00003806 return ret;
3807}
3808
3809/**
3810 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3811 * @pf: the PF being configured
3812 * @pf_q: the PF queue
3813 * @enable: enable or disable state of the queue
3814 *
3815 * This routine will wait for the given Rx queue of the PF to reach the
3816 * enabled or disabled state.
3817 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3818 * multiple retries; else will return 0 in case of success.
3819 **/
3820static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3821{
3822 int i;
3823 u32 rx_reg;
3824
3825 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3826 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3827 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3828 break;
3829
Neerav Parikhf98a2002014-09-13 07:40:44 +00003830 usleep_range(10, 20);
Neerav Parikh23527302014-06-03 23:50:15 +00003831 }
3832 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3833 return -ETIMEDOUT;
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00003834
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003835 return 0;
3836}
3837
3838/**
3839 * i40e_vsi_control_rx - Start or stop a VSI's rings
3840 * @vsi: the VSI being configured
3841 * @enable: start or stop the rings
3842 **/
3843static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3844{
3845 struct i40e_pf *pf = vsi->back;
3846 struct i40e_hw *hw = &pf->hw;
Neerav Parikh23527302014-06-03 23:50:15 +00003847 int i, j, pf_q, ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003848 u32 rx_reg;
3849
3850 pf_q = vsi->base_queue;
3851 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
Mitch Williams6c5ef622014-02-20 19:29:16 -08003852 for (j = 0; j < 50; j++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003853 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
Mitch Williams6c5ef622014-02-20 19:29:16 -08003854 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3855 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3856 break;
3857 usleep_range(1000, 2000);
3858 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003859
Catherine Sullivan7c122002014-03-14 07:32:29 +00003860 /* Skip if the queue is already in the requested state */
3861 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3862 continue;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003863
3864 /* turn on/off the queue */
3865 if (enable)
Mitch Williams6c5ef622014-02-20 19:29:16 -08003866 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003867 else
Mitch Williams6c5ef622014-02-20 19:29:16 -08003868 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003869 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
Neerav Parikh3fe06f42016-02-17 16:12:15 -08003870 /* No waiting for the Tx queue to disable */
3871 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3872 continue;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003873
3874 /* wait for the change to finish */
Neerav Parikh23527302014-06-03 23:50:15 +00003875 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3876 if (ret) {
3877 dev_info(&pf->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04003878 "VSI seid %d Rx ring %d %sable timeout\n",
3879 vsi->seid, pf_q, (enable ? "en" : "dis"));
Neerav Parikh23527302014-06-03 23:50:15 +00003880 break;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003881 }
3882 }
3883
Neerav Parikh23527302014-06-03 23:50:15 +00003884 return ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003885}
3886
3887/**
3888 * i40e_vsi_control_rings - Start or stop a VSI's rings
3889 * @vsi: the VSI being configured
3890 * @enable: start or stop the rings
3891 **/
Mitch Williamsfc18eaa2013-11-28 06:39:27 +00003892int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003893{
Anjali Singhai Jain3b867b22013-12-21 05:44:44 +00003894 int ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003895
3896 /* do rx first for enable and last for disable */
3897 if (request) {
3898 ret = i40e_vsi_control_rx(vsi, request);
3899 if (ret)
3900 return ret;
3901 ret = i40e_vsi_control_tx(vsi, request);
3902 } else {
Anjali Singhai Jain3b867b22013-12-21 05:44:44 +00003903 /* Ignore return value, we need to shutdown whatever we can */
3904 i40e_vsi_control_tx(vsi, request);
3905 i40e_vsi_control_rx(vsi, request);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003906 }
3907
3908 return ret;
3909}
3910
3911/**
3912 * i40e_vsi_free_irq - Free the irq association with the OS
3913 * @vsi: the VSI being configured
3914 **/
3915static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3916{
3917 struct i40e_pf *pf = vsi->back;
3918 struct i40e_hw *hw = &pf->hw;
3919 int base = vsi->base_vector;
3920 u32 val, qp;
3921 int i;
3922
3923 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3924 if (!vsi->q_vectors)
3925 return;
3926
Shannon Nelson63741842014-04-23 04:50:16 +00003927 if (!vsi->irqs_ready)
3928 return;
3929
3930 vsi->irqs_ready = false;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003931 for (i = 0; i < vsi->num_q_vectors; i++) {
Alan Brady96db7762016-09-14 16:24:38 -07003932 int irq_num;
3933 u16 vector;
3934
3935 vector = i + base;
3936 irq_num = pf->msix_entries[vector].vector;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003937
3938 /* free only the irqs that were actually requested */
Shannon Nelson78681b12013-11-28 06:39:36 +00003939 if (!vsi->q_vectors[i] ||
3940 !vsi->q_vectors[i]->num_ringpairs)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003941 continue;
3942
Alan Brady96db7762016-09-14 16:24:38 -07003943 /* clear the affinity notifier in the IRQ descriptor */
3944 irq_set_affinity_notifier(irq_num, NULL);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003945 /* clear the affinity_mask in the IRQ descriptor */
Alan Brady96db7762016-09-14 16:24:38 -07003946 irq_set_affinity_hint(irq_num, NULL);
3947 synchronize_irq(irq_num);
3948 free_irq(irq_num, vsi->q_vectors[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003949
3950 /* Tear down the interrupt queue link list
3951 *
3952 * We know that they come in pairs and always
3953 * the Rx first, then the Tx. To clear the
3954 * link list, stick the EOL value into the
3955 * next_q field of the registers.
3956 */
3957 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3958 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3959 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3960 val |= I40E_QUEUE_END_OF_LIST
3961 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3962 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3963
3964 while (qp != I40E_QUEUE_END_OF_LIST) {
3965 u32 next;
3966
3967 val = rd32(hw, I40E_QINT_RQCTL(qp));
3968
3969 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3970 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3971 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3972 I40E_QINT_RQCTL_INTEVENT_MASK);
3973
3974 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3975 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3976
3977 wr32(hw, I40E_QINT_RQCTL(qp), val);
3978
3979 val = rd32(hw, I40E_QINT_TQCTL(qp));
3980
3981 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3982 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3983
3984 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3985 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3986 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3987 I40E_QINT_TQCTL_INTEVENT_MASK);
3988
3989 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3990 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3991
3992 wr32(hw, I40E_QINT_TQCTL(qp), val);
3993 qp = next;
3994 }
3995 }
3996 } else {
3997 free_irq(pf->pdev->irq, pf);
3998
3999 val = rd32(hw, I40E_PFINT_LNKLST0);
4000 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4001 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4002 val |= I40E_QUEUE_END_OF_LIST
4003 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4004 wr32(hw, I40E_PFINT_LNKLST0, val);
4005
4006 val = rd32(hw, I40E_QINT_RQCTL(qp));
4007 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4008 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4009 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4010 I40E_QINT_RQCTL_INTEVENT_MASK);
4011
4012 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4013 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4014
4015 wr32(hw, I40E_QINT_RQCTL(qp), val);
4016
4017 val = rd32(hw, I40E_QINT_TQCTL(qp));
4018
4019 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4020 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4021 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4022 I40E_QINT_TQCTL_INTEVENT_MASK);
4023
4024 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4025 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4026
4027 wr32(hw, I40E_QINT_TQCTL(qp), val);
4028 }
4029}
4030
4031/**
Alexander Duyck493fb302013-09-28 07:01:44 +00004032 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4033 * @vsi: the VSI being configured
4034 * @v_idx: Index of vector to be freed
4035 *
4036 * This function frees the memory allocated to the q_vector. In addition if
4037 * NAPI is enabled it will delete any references to the NAPI struct prior
4038 * to freeing the q_vector.
4039 **/
4040static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4041{
4042 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00004043 struct i40e_ring *ring;
Alexander Duyck493fb302013-09-28 07:01:44 +00004044
4045 if (!q_vector)
4046 return;
4047
4048 /* disassociate q_vector from rings */
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00004049 i40e_for_each_ring(ring, q_vector->tx)
4050 ring->q_vector = NULL;
4051
4052 i40e_for_each_ring(ring, q_vector->rx)
4053 ring->q_vector = NULL;
Alexander Duyck493fb302013-09-28 07:01:44 +00004054
4055 /* only VSI w/ an associated netdev is set up w/ NAPI */
4056 if (vsi->netdev)
4057 netif_napi_del(&q_vector->napi);
4058
4059 vsi->q_vectors[v_idx] = NULL;
4060
4061 kfree_rcu(q_vector, rcu);
4062}
4063
4064/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004065 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4066 * @vsi: the VSI being un-configured
4067 *
4068 * This frees the memory allocated to the q_vectors and
4069 * deletes references to the NAPI struct.
4070 **/
4071static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4072{
4073 int v_idx;
4074
Alexander Duyck493fb302013-09-28 07:01:44 +00004075 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4076 i40e_free_q_vector(vsi, v_idx);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004077}
4078
4079/**
4080 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4081 * @pf: board private structure
4082 **/
4083static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4084{
4085 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4086 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4087 pci_disable_msix(pf->pdev);
4088 kfree(pf->msix_entries);
4089 pf->msix_entries = NULL;
Shannon Nelson3b444392015-02-26 16:15:57 +00004090 kfree(pf->irq_pile);
4091 pf->irq_pile = NULL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004092 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4093 pci_disable_msi(pf->pdev);
4094 }
4095 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4096}
4097
4098/**
4099 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4100 * @pf: board private structure
4101 *
4102 * We go through and clear interrupt specific resources and reset the structure
4103 * to pre-load conditions
4104 **/
4105static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4106{
4107 int i;
4108
Shannon Nelsone1477582015-02-21 06:44:33 +00004109 i40e_stop_misc_vector(pf);
Shannon Nelson69278392016-03-10 14:59:43 -08004110 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
Shannon Nelsone1477582015-02-21 06:44:33 +00004111 synchronize_irq(pf->msix_entries[0].vector);
4112 free_irq(pf->msix_entries[0].vector, pf);
4113 }
4114
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06004115 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4116 I40E_IWARP_IRQ_PILE_ID);
4117
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004118 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
Mitch Williams505682c2014-05-20 08:01:37 +00004119 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004120 if (pf->vsi[i])
4121 i40e_vsi_free_q_vectors(pf->vsi[i]);
4122 i40e_reset_interrupt_capability(pf);
4123}
4124
4125/**
4126 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4127 * @vsi: the VSI being configured
4128 **/
4129static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4130{
4131 int q_idx;
4132
4133 if (!vsi->netdev)
4134 return;
4135
4136 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
Alexander Duyck493fb302013-09-28 07:01:44 +00004137 napi_enable(&vsi->q_vectors[q_idx]->napi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004138}
4139
4140/**
4141 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4142 * @vsi: the VSI being configured
4143 **/
4144static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4145{
4146 int q_idx;
4147
4148 if (!vsi->netdev)
4149 return;
4150
4151 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
Alexander Duyck493fb302013-09-28 07:01:44 +00004152 napi_disable(&vsi->q_vectors[q_idx]->napi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004153}
4154
4155/**
Shannon Nelson90ef8d42014-03-14 07:32:26 +00004156 * i40e_vsi_close - Shut down a VSI
4157 * @vsi: the vsi to be quelled
4158 **/
4159static void i40e_vsi_close(struct i40e_vsi *vsi)
4160{
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06004161 bool reset = false;
4162
Shannon Nelson90ef8d42014-03-14 07:32:26 +00004163 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4164 i40e_down(vsi);
4165 i40e_vsi_free_irq(vsi);
4166 i40e_vsi_free_tx_resources(vsi);
4167 i40e_vsi_free_rx_resources(vsi);
Anjali Singhai Jain92faef82015-07-28 13:02:00 -04004168 vsi->current_netdev_flags = 0;
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06004169 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4170 reset = true;
4171 i40e_notify_client_of_netdev_close(vsi, reset);
Shannon Nelson90ef8d42014-03-14 07:32:26 +00004172}
4173
4174/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004175 * i40e_quiesce_vsi - Pause a given VSI
4176 * @vsi: the VSI being paused
4177 **/
4178static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4179{
4180 if (test_bit(__I40E_DOWN, &vsi->state))
4181 return;
4182
Neerav Parikhd341b7a2014-11-12 00:18:51 +00004183 /* No need to disable FCoE VSI when Tx suspended */
4184 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4185 vsi->type == I40E_VSI_FCOE) {
4186 dev_dbg(&vsi->back->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04004187 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
Neerav Parikhd341b7a2014-11-12 00:18:51 +00004188 return;
4189 }
4190
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004191 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
Jesse Brandeburg6995b362015-08-28 17:55:54 -04004192 if (vsi->netdev && netif_running(vsi->netdev))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004193 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
Jesse Brandeburg6995b362015-08-28 17:55:54 -04004194 else
Shannon Nelson90ef8d42014-03-14 07:32:26 +00004195 i40e_vsi_close(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004196}
4197
4198/**
4199 * i40e_unquiesce_vsi - Resume a given VSI
4200 * @vsi: the VSI being resumed
4201 **/
4202static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4203{
4204 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4205 return;
4206
4207 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4208 if (vsi->netdev && netif_running(vsi->netdev))
4209 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4210 else
Shannon Nelson8276f752014-03-14 07:32:27 +00004211 i40e_vsi_open(vsi); /* this clears the DOWN bit */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004212}
4213
4214/**
4215 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4216 * @pf: the PF
4217 **/
4218static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4219{
4220 int v;
4221
Mitch Williams505682c2014-05-20 08:01:37 +00004222 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004223 if (pf->vsi[v])
4224 i40e_quiesce_vsi(pf->vsi[v]);
4225 }
4226}
4227
4228/**
4229 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4230 * @pf: the PF
4231 **/
4232static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4233{
4234 int v;
4235
Mitch Williams505682c2014-05-20 08:01:37 +00004236 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004237 if (pf->vsi[v])
4238 i40e_unquiesce_vsi(pf->vsi[v]);
4239 }
4240}
4241
Neerav Parikh69129dc2014-11-12 00:18:46 +00004242#ifdef CONFIG_I40E_DCB
4243/**
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004244 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
Neerav Parikh69129dc2014-11-12 00:18:46 +00004245 * @vsi: the VSI being configured
4246 *
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004247 * This function waits for the given VSI's queues to be disabled.
Neerav Parikh69129dc2014-11-12 00:18:46 +00004248 **/
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004249static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
Neerav Parikh69129dc2014-11-12 00:18:46 +00004250{
4251 struct i40e_pf *pf = vsi->back;
4252 int i, pf_q, ret;
4253
4254 pf_q = vsi->base_queue;
4255 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4256 /* Check and wait for the disable status of the queue */
4257 ret = i40e_pf_txq_wait(pf, pf_q, false);
4258 if (ret) {
4259 dev_info(&pf->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04004260 "VSI seid %d Tx ring %d disable timeout\n",
4261 vsi->seid, pf_q);
Neerav Parikh69129dc2014-11-12 00:18:46 +00004262 return ret;
4263 }
4264 }
4265
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004266 pf_q = vsi->base_queue;
4267 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4268 /* Check and wait for the disable status of the queue */
4269 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4270 if (ret) {
4271 dev_info(&pf->pdev->dev,
4272 "VSI seid %d Rx ring %d disable timeout\n",
4273 vsi->seid, pf_q);
4274 return ret;
4275 }
4276 }
4277
Neerav Parikh69129dc2014-11-12 00:18:46 +00004278 return 0;
4279}
4280
4281/**
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004282 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
Neerav Parikh69129dc2014-11-12 00:18:46 +00004283 * @pf: the PF
4284 *
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004285 * This function waits for the queues to be in disabled state for all the
Neerav Parikh69129dc2014-11-12 00:18:46 +00004286 * VSIs that are managed by this PF.
4287 **/
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004288static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
Neerav Parikh69129dc2014-11-12 00:18:46 +00004289{
4290 int v, ret = 0;
4291
4292 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
Neerav Parikhd341b7a2014-11-12 00:18:51 +00004293 /* No need to wait for FCoE VSI queues */
4294 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
Neerav Parikh3fe06f42016-02-17 16:12:15 -08004295 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
Neerav Parikh69129dc2014-11-12 00:18:46 +00004296 if (ret)
4297 break;
4298 }
4299 }
4300
4301 return ret;
4302}
4303
4304#endif
Kiran Patilb03a8c12015-09-24 18:13:15 -04004305
4306/**
4307 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4308 * @q_idx: TX queue number
4309 * @vsi: Pointer to VSI struct
4310 *
4311 * This function checks specified queue for given VSI. Detects hung condition.
4312 * Sets hung bit since it is two step process. Before next run of service task
4313 * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
4314 * hung condition remain unchanged and during subsequent run, this function
4315 * issues SW interrupt to recover from hung condition.
4316 **/
4317static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4318{
4319 struct i40e_ring *tx_ring = NULL;
4320 struct i40e_pf *pf;
Anjali Singhai Jaindd353102016-01-15 14:33:12 -08004321 u32 head, val, tx_pending_hw;
Kiran Patilb03a8c12015-09-24 18:13:15 -04004322 int i;
4323
4324 pf = vsi->back;
4325
4326 /* now that we have an index, find the tx_ring struct */
4327 for (i = 0; i < vsi->num_queue_pairs; i++) {
4328 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4329 if (q_idx == vsi->tx_rings[i]->queue_index) {
4330 tx_ring = vsi->tx_rings[i];
4331 break;
4332 }
4333 }
4334 }
4335
4336 if (!tx_ring)
4337 return;
4338
4339 /* Read interrupt register */
4340 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4341 val = rd32(&pf->hw,
4342 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4343 tx_ring->vsi->base_vector - 1));
4344 else
4345 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4346
4347 head = i40e_get_head(tx_ring);
4348
Anjali Singhai Jaindd353102016-01-15 14:33:12 -08004349 tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
Kiran Patilb03a8c12015-09-24 18:13:15 -04004350
Kiran Patil9c6c1252015-11-06 15:26:02 -08004351 /* HW is done executing descriptors, updated HEAD write back,
4352 * but SW hasn't processed those descriptors. If interrupt is
4353 * not generated from this point ON, it could result into
4354 * dev_watchdog detecting timeout on those netdev_queue,
4355 * hence proactively trigger SW interrupt.
Kiran Patilb03a8c12015-09-24 18:13:15 -04004356 */
Anjali Singhai Jaindd353102016-01-15 14:33:12 -08004357 if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
Kiran Patil9c6c1252015-11-06 15:26:02 -08004358 /* NAPI Poll didn't run and clear since it was set */
4359 if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
4360 &tx_ring->q_vector->hung_detected)) {
Anjali Singhai Jaindd353102016-01-15 14:33:12 -08004361 netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
4362 vsi->seid, q_idx, tx_pending_hw,
Kiran Patil9c6c1252015-11-06 15:26:02 -08004363 tx_ring->next_to_clean, head,
4364 tx_ring->next_to_use,
4365 readl(tx_ring->tail));
4366 netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
4367 vsi->seid, q_idx, val);
4368 i40e_force_wb(vsi, tx_ring->q_vector);
4369 } else {
4370 /* First Chance - detected possible hung */
4371 set_bit(I40E_Q_VECTOR_HUNG_DETECT,
4372 &tx_ring->q_vector->hung_detected);
4373 }
4374 }
Anjali Singhai Jaindd353102016-01-15 14:33:12 -08004375
4376 /* This is the case where we have interrupts missing,
4377 * so the tx_pending in HW will most likely be 0, but we
4378 * will have tx_pending in SW since the WB happened but the
4379 * interrupt got lost.
4380 */
4381 if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
4382 (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4383 if (napi_reschedule(&tx_ring->q_vector->napi))
4384 tx_ring->tx_stats.tx_lost_interrupt++;
4385 }
Kiran Patilb03a8c12015-09-24 18:13:15 -04004386}
4387
4388/**
4389 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4390 * @pf: pointer to PF struct
4391 *
4392 * LAN VSI has netdev and netdev has TX queues. This function is to check
4393 * each of those TX queues if they are hung, trigger recovery by issuing
4394 * SW interrupt.
4395 **/
4396static void i40e_detect_recover_hung(struct i40e_pf *pf)
4397{
4398 struct net_device *netdev;
4399 struct i40e_vsi *vsi;
4400 int i;
4401
4402 /* Only for LAN VSI */
4403 vsi = pf->vsi[pf->lan_vsi];
4404
4405 if (!vsi)
4406 return;
4407
4408 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4409 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4410 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4411 return;
4412
4413 /* Make sure type is MAIN VSI */
4414 if (vsi->type != I40E_VSI_MAIN)
4415 return;
4416
4417 netdev = vsi->netdev;
4418 if (!netdev)
4419 return;
4420
4421 /* Bail out if netif_carrier is not OK */
4422 if (!netif_carrier_ok(netdev))
4423 return;
4424
4425 /* Go thru' TX queues for netdev */
4426 for (i = 0; i < netdev->num_tx_queues; i++) {
4427 struct netdev_queue *q;
4428
4429 q = netdev_get_tx_queue(netdev, i);
4430 if (q)
4431 i40e_detect_recover_hung_queue(i, vsi);
4432 }
4433}
4434
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004435/**
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004436 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00004437 * @pf: pointer to PF
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004438 *
4439 * Get TC map for ISCSI PF type that will include iSCSI TC
4440 * and LAN TC.
4441 **/
4442static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4443{
4444 struct i40e_dcb_app_priority_table app;
4445 struct i40e_hw *hw = &pf->hw;
4446 u8 enabled_tc = 1; /* TC0 is always enabled */
4447 u8 tc, i;
4448 /* Get the iSCSI APP TLV */
4449 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4450
4451 for (i = 0; i < dcbcfg->numapps; i++) {
4452 app = dcbcfg->app[i];
4453 if (app.selector == I40E_APP_SEL_TCPIP &&
4454 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4455 tc = dcbcfg->etscfg.prioritytable[app.priority];
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08004456 enabled_tc |= BIT(tc);
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004457 break;
4458 }
4459 }
4460
4461 return enabled_tc;
4462}
4463
4464/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004465 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4466 * @dcbcfg: the corresponding DCBx configuration structure
4467 *
4468 * Return the number of TCs from given DCBx configuration
4469 **/
4470static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4471{
Dave Ertmanfbfe12c2016-08-12 09:56:32 -07004472 int i, tc_unused = 0;
Jesse Brandeburg078b5872013-09-25 23:41:14 +00004473 u8 num_tc = 0;
Dave Ertmanfbfe12c2016-08-12 09:56:32 -07004474 u8 ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004475
4476 /* Scan the ETS Config Priority Table to find
4477 * traffic class enabled for a given priority
Dave Ertmanfbfe12c2016-08-12 09:56:32 -07004478 * and create a bitmask of enabled TCs
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004479 */
Dave Ertmanfbfe12c2016-08-12 09:56:32 -07004480 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4481 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4482
4483 /* Now scan the bitmask to check for
4484 * contiguous TCs starting with TC0
4485 */
4486 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4487 if (num_tc & BIT(i)) {
4488 if (!tc_unused) {
4489 ret++;
4490 } else {
4491 pr_err("Non-contiguous TC - Disabling DCB\n");
4492 return 1;
4493 }
4494 } else {
4495 tc_unused = 1;
4496 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004497 }
4498
Dave Ertmanfbfe12c2016-08-12 09:56:32 -07004499 /* There is always at least TC0 */
4500 if (!ret)
4501 ret = 1;
4502
4503 return ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004504}
4505
4506/**
4507 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4508 * @dcbcfg: the corresponding DCBx configuration structure
4509 *
4510 * Query the current DCB configuration and return the number of
4511 * traffic classes enabled from the given DCBX config
4512 **/
4513static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4514{
4515 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4516 u8 enabled_tc = 1;
4517 u8 i;
4518
4519 for (i = 0; i < num_tc; i++)
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04004520 enabled_tc |= BIT(i);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004521
4522 return enabled_tc;
4523}
4524
4525/**
4526 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4527 * @pf: PF being queried
4528 *
4529 * Return number of traffic classes enabled for the given PF
4530 **/
4531static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4532{
4533 struct i40e_hw *hw = &pf->hw;
Dave Ertman52a08ca2016-07-27 12:02:34 -07004534 u8 i, enabled_tc = 1;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004535 u8 num_tc = 0;
4536 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4537
4538 /* If DCB is not enabled then always in single TC */
4539 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4540 return 1;
4541
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004542 /* SFP mode will be enabled for all TCs on port */
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004543 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4544 return i40e_dcb_get_num_tc(dcbcfg);
4545
4546 /* MFP mode return count of enabled TCs for this PF */
4547 if (pf->hw.func_caps.iscsi)
4548 enabled_tc = i40e_get_iscsi_tc_map(pf);
4549 else
Neerav Parikhfc51de92015-02-24 06:58:53 +00004550 return 1; /* Only TC0 */
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004551
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004552 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08004553 if (enabled_tc & BIT(i))
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004554 num_tc++;
4555 }
4556 return num_tc;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004557}
4558
4559/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004560 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4561 * @pf: PF being queried
4562 *
4563 * Return a bitmap for enabled traffic classes for this PF.
4564 **/
4565static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4566{
4567 /* If DCB is not enabled for this PF then just return default TC */
4568 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
David Ertmanea6acb72016-09-20 07:10:50 -07004569 return I40E_DEFAULT_TRAFFIC_CLASS;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004570
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004571 /* SFP mode we want PF to be enabled for all TCs */
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004572 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4573 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4574
Neerav Parikhfc51de92015-02-24 06:58:53 +00004575 /* MFP enabled and iSCSI PF type */
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00004576 if (pf->hw.func_caps.iscsi)
4577 return i40e_get_iscsi_tc_map(pf);
4578 else
David Ertmanea6acb72016-09-20 07:10:50 -07004579 return I40E_DEFAULT_TRAFFIC_CLASS;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004580}
4581
4582/**
4583 * i40e_vsi_get_bw_info - Query VSI BW Information
4584 * @vsi: the VSI being queried
4585 *
4586 * Returns 0 on success, negative value on failure
4587 **/
4588static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4589{
4590 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4591 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4592 struct i40e_pf *pf = vsi->back;
4593 struct i40e_hw *hw = &pf->hw;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004594 i40e_status ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004595 u32 tc_bw_max;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004596 int i;
4597
4598 /* Get the VSI level BW configuration */
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004599 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4600 if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004601 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004602 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4603 i40e_stat_str(&pf->hw, ret),
4604 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004605 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004606 }
4607
4608 /* Get the VSI level BW configuration per TC */
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004609 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4610 NULL);
4611 if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004612 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004613 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4614 i40e_stat_str(&pf->hw, ret),
4615 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004616 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004617 }
4618
4619 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4620 dev_info(&pf->pdev->dev,
4621 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4622 bw_config.tc_valid_bits,
4623 bw_ets_config.tc_valid_bits);
4624 /* Still continuing */
4625 }
4626
4627 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4628 vsi->bw_max_quanta = bw_config.max_bw;
4629 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4630 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4631 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4632 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4633 vsi->bw_ets_limit_credits[i] =
4634 le16_to_cpu(bw_ets_config.credits[i]);
4635 /* 3 bits out of 4 for each TC */
4636 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4637 }
Jesse Brandeburg078b5872013-09-25 23:41:14 +00004638
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004639 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004640}
4641
4642/**
4643 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4644 * @vsi: the VSI being configured
4645 * @enabled_tc: TC bitmap
4646 * @bw_credits: BW shared credits per TC
4647 *
4648 * Returns 0 on success, negative value on failure
4649 **/
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004650static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004651 u8 *bw_share)
4652{
4653 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004654 i40e_status ret;
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004655 int i;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004656
4657 bw_data.tc_valid_bits = enabled_tc;
4658 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4659 bw_data.tc_bw_credits[i] = bw_share[i];
4660
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004661 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4662 NULL);
4663 if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004664 dev_info(&vsi->back->pdev->dev,
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00004665 "AQ command Config VSI BW allocation per TC failed = %d\n",
4666 vsi->back->hw.aq.asq_last_status);
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004667 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004668 }
4669
4670 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4671 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4672
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00004673 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004674}
4675
4676/**
4677 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4678 * @vsi: the VSI being configured
4679 * @enabled_tc: TC map to be enabled
4680 *
4681 **/
4682static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4683{
4684 struct net_device *netdev = vsi->netdev;
4685 struct i40e_pf *pf = vsi->back;
4686 struct i40e_hw *hw = &pf->hw;
4687 u8 netdev_tc = 0;
4688 int i;
4689 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4690
4691 if (!netdev)
4692 return;
4693
4694 if (!enabled_tc) {
4695 netdev_reset_tc(netdev);
4696 return;
4697 }
4698
4699 /* Set up actual enabled TCs on the VSI */
4700 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4701 return;
4702
4703 /* set per TC queues for the VSI */
4704 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4705 /* Only set TC queues for enabled tcs
4706 *
4707 * e.g. For a VSI that has TC0 and TC3 enabled the
4708 * enabled_tc bitmap would be 0x00001001; the driver
4709 * will set the numtc for netdev as 2 that will be
4710 * referenced by the netdev layer as TC 0 and 1.
4711 */
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08004712 if (vsi->tc_config.enabled_tc & BIT(i))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004713 netdev_set_tc_queue(netdev,
4714 vsi->tc_config.tc_info[i].netdev_tc,
4715 vsi->tc_config.tc_info[i].qcount,
4716 vsi->tc_config.tc_info[i].qoffset);
4717 }
4718
4719 /* Assign UP2TC map for the VSI */
4720 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4721 /* Get the actual TC# for the UP */
4722 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4723 /* Get the mapped netdev TC# for the UP */
4724 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
4725 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4726 }
4727}
4728
4729/**
4730 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4731 * @vsi: the VSI being configured
4732 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4733 **/
4734static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4735 struct i40e_vsi_context *ctxt)
4736{
4737 /* copy just the sections touched not the entire info
4738 * since not all sections are valid as returned by
4739 * update vsi params
4740 */
4741 vsi->info.mapping_flags = ctxt->info.mapping_flags;
4742 memcpy(&vsi->info.queue_mapping,
4743 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4744 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4745 sizeof(vsi->info.tc_mapping));
4746}
4747
4748/**
4749 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4750 * @vsi: VSI to be configured
4751 * @enabled_tc: TC bitmap
4752 *
4753 * This configures a particular VSI for TCs that are mapped to the
4754 * given TC bitmap. It uses default bandwidth share for TCs across
4755 * VSIs to configure TC for a particular VSI.
4756 *
4757 * NOTE:
4758 * It is expected that the VSI queues have been quisced before calling
4759 * this function.
4760 **/
4761static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4762{
4763 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4764 struct i40e_vsi_context ctxt;
4765 int ret = 0;
4766 int i;
4767
4768 /* Check if enabled_tc is same as existing or new TCs */
4769 if (vsi->tc_config.enabled_tc == enabled_tc)
4770 return ret;
4771
4772 /* Enable ETS TCs with equal BW Share for now across all VSIs */
4773 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08004774 if (enabled_tc & BIT(i))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004775 bw_share[i] = 1;
4776 }
4777
4778 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4779 if (ret) {
4780 dev_info(&vsi->back->pdev->dev,
4781 "Failed configuring TC map %d for VSI %d\n",
4782 enabled_tc, vsi->seid);
4783 goto out;
4784 }
4785
4786 /* Update Queue Pairs Mapping for currently enabled UPs */
4787 ctxt.seid = vsi->seid;
4788 ctxt.pf_num = vsi->back->hw.pf_id;
4789 ctxt.vf_num = 0;
4790 ctxt.uplink_seid = vsi->uplink_seid;
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07004791 ctxt.info = vsi->info;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004792 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4793
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06004794 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
4795 ctxt.info.valid_sections |=
4796 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
4797 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
4798 }
4799
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004800 /* Update the VSI after updating the VSI queue-mapping information */
4801 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4802 if (ret) {
4803 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004804 "Update vsi tc config failed, err %s aq_err %s\n",
4805 i40e_stat_str(&vsi->back->hw, ret),
4806 i40e_aq_str(&vsi->back->hw,
4807 vsi->back->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004808 goto out;
4809 }
4810 /* update the local VSI info with updated queue map */
4811 i40e_vsi_update_queue_map(vsi, &ctxt);
4812 vsi->info.valid_sections = 0;
4813
4814 /* Update current VSI BW information */
4815 ret = i40e_vsi_get_bw_info(vsi);
4816 if (ret) {
4817 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004818 "Failed updating vsi bw info, err %s aq_err %s\n",
4819 i40e_stat_str(&vsi->back->hw, ret),
4820 i40e_aq_str(&vsi->back->hw,
4821 vsi->back->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00004822 goto out;
4823 }
4824
4825 /* Update the netdev TC setup */
4826 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4827out:
4828 return ret;
4829}
4830
4831/**
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004832 * i40e_veb_config_tc - Configure TCs for given VEB
4833 * @veb: given VEB
4834 * @enabled_tc: TC bitmap
4835 *
4836 * Configures given TC bitmap for VEB (switching) element
4837 **/
4838int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4839{
4840 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4841 struct i40e_pf *pf = veb->pf;
4842 int ret = 0;
4843 int i;
4844
4845 /* No TCs or already enabled TCs just return */
4846 if (!enabled_tc || veb->enabled_tc == enabled_tc)
4847 return ret;
4848
4849 bw_data.tc_valid_bits = enabled_tc;
4850 /* bw_data.absolute_credits is not set (relative) */
4851
4852 /* Enable ETS TCs with equal BW Share for now */
4853 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08004854 if (enabled_tc & BIT(i))
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004855 bw_data.tc_bw_share_credits[i] = 1;
4856 }
4857
4858 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4859 &bw_data, NULL);
4860 if (ret) {
4861 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004862 "VEB bw config failed, err %s aq_err %s\n",
4863 i40e_stat_str(&pf->hw, ret),
4864 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004865 goto out;
4866 }
4867
4868 /* Update the BW information */
4869 ret = i40e_veb_get_bw_info(veb);
4870 if (ret) {
4871 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004872 "Failed getting veb bw config, err %s aq_err %s\n",
4873 i40e_stat_str(&pf->hw, ret),
4874 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004875 }
4876
4877out:
4878 return ret;
4879}
4880
4881#ifdef CONFIG_I40E_DCB
4882/**
4883 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4884 * @pf: PF struct
4885 *
4886 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4887 * the caller would've quiesce all the VSIs before calling
4888 * this function
4889 **/
4890static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4891{
4892 u8 tc_map = 0;
4893 int ret;
4894 u8 v;
4895
4896 /* Enable the TCs available on PF to all VEBs */
4897 tc_map = i40e_pf_get_tc_map(pf);
4898 for (v = 0; v < I40E_MAX_VEB; v++) {
4899 if (!pf->veb[v])
4900 continue;
4901 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4902 if (ret) {
4903 dev_info(&pf->pdev->dev,
4904 "Failed configuring TC for VEB seid=%d\n",
4905 pf->veb[v]->seid);
4906 /* Will try to configure as many components */
4907 }
4908 }
4909
4910 /* Update each VSI */
Mitch Williams505682c2014-05-20 08:01:37 +00004911 for (v = 0; v < pf->num_alloc_vsi; v++) {
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004912 if (!pf->vsi[v])
4913 continue;
4914
4915 /* - Enable all TCs for the LAN VSI
Vasu Dev38e00432014-08-01 13:27:03 -07004916#ifdef I40E_FCOE
4917 * - For FCoE VSI only enable the TC configured
4918 * as per the APP TLV
4919#endif
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004920 * - For all others keep them at TC0 for now
4921 */
4922 if (v == pf->lan_vsi)
4923 tc_map = i40e_pf_get_tc_map(pf);
4924 else
David Ertmanea6acb72016-09-20 07:10:50 -07004925 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
Vasu Dev38e00432014-08-01 13:27:03 -07004926#ifdef I40E_FCOE
4927 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4928 tc_map = i40e_get_fcoe_tc_map(pf);
4929#endif /* #ifdef I40E_FCOE */
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004930
4931 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4932 if (ret) {
4933 dev_info(&pf->pdev->dev,
4934 "Failed configuring TC for VSI seid=%d\n",
4935 pf->vsi[v]->seid);
4936 /* Will try to configure as many components */
4937 } else {
Neerav Parikh0672a092014-04-01 07:11:47 +00004938 /* Re-configure VSI vectors based on updated TC map */
4939 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004940 if (pf->vsi[v]->netdev)
4941 i40e_dcbnl_set_all(pf->vsi[v]);
4942 }
4943 }
4944}
4945
4946/**
Neerav Parikh2fd75f32014-11-12 00:18:20 +00004947 * i40e_resume_port_tx - Resume port Tx
4948 * @pf: PF struct
4949 *
4950 * Resume a port's Tx and issue a PF reset in case of failure to
4951 * resume.
4952 **/
4953static int i40e_resume_port_tx(struct i40e_pf *pf)
4954{
4955 struct i40e_hw *hw = &pf->hw;
4956 int ret;
4957
4958 ret = i40e_aq_resume_port_tx(hw, NULL);
4959 if (ret) {
4960 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04004961 "Resume Port Tx failed, err %s aq_err %s\n",
4962 i40e_stat_str(&pf->hw, ret),
4963 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Neerav Parikh2fd75f32014-11-12 00:18:20 +00004964 /* Schedule PF reset to recover */
4965 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4966 i40e_service_event_schedule(pf);
4967 }
4968
4969 return ret;
4970}
4971
4972/**
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004973 * i40e_init_pf_dcb - Initialize DCB configuration
4974 * @pf: PF being configured
4975 *
4976 * Query the current DCB configuration and cache it
4977 * in the hardware structure
4978 **/
4979static int i40e_init_pf_dcb(struct i40e_pf *pf)
4980{
4981 struct i40e_hw *hw = &pf->hw;
4982 int err = 0;
4983
Anjali Singhai Jain025b4a52015-02-24 06:58:46 +00004984 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
Neerav Parikhf1bbad32016-01-13 16:51:39 -08004985 if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
Anjali Singhai Jain025b4a52015-02-24 06:58:46 +00004986 goto out;
4987
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08004988 /* Get the initial DCB configuration */
4989 err = i40e_init_dcb(hw);
4990 if (!err) {
4991 /* Device/Function is not DCBX capable */
4992 if ((!hw->func_caps.dcb) ||
4993 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4994 dev_info(&pf->pdev->dev,
4995 "DCBX offload is not supported or is disabled for this PF.\n");
4996
4997 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4998 goto out;
4999
5000 } else {
5001 /* When status is not DISABLED then DCBX in FW */
5002 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5003 DCB_CAP_DCBX_VER_IEEE;
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005004
5005 pf->flags |= I40E_FLAG_DCB_CAPABLE;
Dave Ertmana0362442016-08-29 17:38:26 -07005006 /* Enable DCB tagging only when more than one TC
5007 * or explicitly disable if only one TC
5008 */
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005009 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5010 pf->flags |= I40E_FLAG_DCB_ENABLED;
Dave Ertmana0362442016-08-29 17:38:26 -07005011 else
5012 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
Neerav Parikh9fa61dd2014-11-12 00:18:25 +00005013 dev_dbg(&pf->pdev->dev,
5014 "DCBX offload is supported for this PF.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005015 }
Neerav Parikh014269f2014-04-01 07:11:48 +00005016 } else {
Shannon Nelsonaebfc812014-12-11 07:06:38 +00005017 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04005018 "Query for DCB configuration failed, err %s aq_err %s\n",
5019 i40e_stat_str(&pf->hw, err),
5020 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005021 }
5022
5023out:
5024 return err;
5025}
5026#endif /* CONFIG_I40E_DCB */
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005027#define SPEED_SIZE 14
5028#define FC_SIZE 8
5029/**
5030 * i40e_print_link_message - print link up or down
5031 * @vsi: the VSI for which link needs a message
5032 */
Matt Jaredc156f852015-08-27 11:42:39 -04005033void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005034{
Shannon Nelsona9165492015-09-03 17:19:00 -04005035 char *speed = "Unknown";
5036 char *fc = "Unknown";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005037
Matt Jaredc156f852015-08-27 11:42:39 -04005038 if (vsi->current_isup == isup)
5039 return;
5040 vsi->current_isup = isup;
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005041 if (!isup) {
5042 netdev_info(vsi->netdev, "NIC Link is Down\n");
5043 return;
5044 }
5045
Greg Rose148c2d82014-12-11 07:06:27 +00005046 /* Warn user if link speed on NPAR enabled partition is not at
5047 * least 10GB
5048 */
5049 if (vsi->back->hw.func_caps.npar_enable &&
5050 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5051 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5052 netdev_warn(vsi->netdev,
5053 "The partition detected link speed that is less than 10Gbps\n");
5054
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005055 switch (vsi->back->hw.phy.link_info.link_speed) {
5056 case I40E_LINK_SPEED_40GB:
Shannon Nelsona9165492015-09-03 17:19:00 -04005057 speed = "40 G";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005058 break;
Jesse Brandeburgae24b402015-03-27 00:12:09 -07005059 case I40E_LINK_SPEED_20GB:
Shannon Nelsona9165492015-09-03 17:19:00 -04005060 speed = "20 G";
Jesse Brandeburgae24b402015-03-27 00:12:09 -07005061 break;
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005062 case I40E_LINK_SPEED_10GB:
Shannon Nelsona9165492015-09-03 17:19:00 -04005063 speed = "10 G";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005064 break;
5065 case I40E_LINK_SPEED_1GB:
Shannon Nelsona9165492015-09-03 17:19:00 -04005066 speed = "1000 M";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005067 break;
Mitch Williams5960d332014-09-13 07:40:47 +00005068 case I40E_LINK_SPEED_100MB:
Shannon Nelsona9165492015-09-03 17:19:00 -04005069 speed = "100 M";
Mitch Williams5960d332014-09-13 07:40:47 +00005070 break;
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005071 default:
5072 break;
5073 }
5074
5075 switch (vsi->back->hw.fc.current_mode) {
5076 case I40E_FC_FULL:
Shannon Nelsona9165492015-09-03 17:19:00 -04005077 fc = "RX/TX";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005078 break;
5079 case I40E_FC_TX_PAUSE:
Shannon Nelsona9165492015-09-03 17:19:00 -04005080 fc = "TX";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005081 break;
5082 case I40E_FC_RX_PAUSE:
Shannon Nelsona9165492015-09-03 17:19:00 -04005083 fc = "RX";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005084 break;
5085 default:
Shannon Nelsona9165492015-09-03 17:19:00 -04005086 fc = "None";
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005087 break;
5088 }
5089
Shannon Nelsona9165492015-09-03 17:19:00 -04005090 netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005091 speed, fc);
5092}
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005093
5094/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005095 * i40e_up_complete - Finish the last steps of bringing up a connection
5096 * @vsi: the VSI being configured
5097 **/
5098static int i40e_up_complete(struct i40e_vsi *vsi)
5099{
5100 struct i40e_pf *pf = vsi->back;
5101 int err;
5102
5103 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5104 i40e_vsi_configure_msix(vsi);
5105 else
5106 i40e_configure_msi_and_legacy(vsi);
5107
5108 /* start rings */
5109 err = i40e_vsi_control_rings(vsi, true);
5110 if (err)
5111 return err;
5112
5113 clear_bit(__I40E_DOWN, &vsi->state);
5114 i40e_napi_enable_all(vsi);
5115 i40e_vsi_enable_irq(vsi);
5116
5117 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5118 (vsi->netdev)) {
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005119 i40e_print_link_message(vsi, true);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005120 netif_tx_start_all_queues(vsi->netdev);
5121 netif_carrier_on(vsi->netdev);
Anjali Singhai6d779b42013-09-28 06:00:02 +00005122 } else if (vsi->netdev) {
Jesse Brandeburgcf05ed02014-04-23 04:50:12 +00005123 i40e_print_link_message(vsi, false);
Carolyn Wyborny7b592f62014-07-10 07:58:19 +00005124 /* need to check for qualified module here*/
5125 if ((pf->hw.phy.link_info.link_info &
5126 I40E_AQ_MEDIA_AVAILABLE) &&
5127 (!(pf->hw.phy.link_info.an_info &
5128 I40E_AQ_QUALIFIED_MODULE)))
5129 netdev_err(vsi->netdev,
5130 "the driver failed to link because an unqualified module was detected.");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005131 }
Anjali Singhai Jainca64fa42014-02-11 08:26:30 +00005132
5133 /* replay FDIR SB filters */
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005134 if (vsi->type == I40E_VSI_FDIR) {
5135 /* reset fd counters */
5136 pf->fd_add_err = pf->fd_atr_cnt = 0;
5137 if (pf->fd_tcp_rule > 0) {
Jacob Keller234dc4e2016-09-06 18:05:09 -07005138 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -04005139 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5140 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005141 pf->fd_tcp_rule = 0;
5142 }
Anjali Singhai Jainca64fa42014-02-11 08:26:30 +00005143 i40e_fdir_filter_restore(vsi);
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005144 }
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06005145
5146 /* On the next run of the service_task, notify any clients of the new
5147 * opened netdev
5148 */
5149 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005150 i40e_service_event_schedule(pf);
5151
5152 return 0;
5153}
5154
5155/**
5156 * i40e_vsi_reinit_locked - Reset the VSI
5157 * @vsi: the VSI being configured
5158 *
5159 * Rebuild the ring structs after some configuration
5160 * has changed, e.g. MTU size.
5161 **/
5162static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5163{
5164 struct i40e_pf *pf = vsi->back;
5165
5166 WARN_ON(in_interrupt());
5167 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
5168 usleep_range(1000, 2000);
5169 i40e_down(vsi);
5170
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005171 i40e_up(vsi);
5172 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5173}
5174
5175/**
5176 * i40e_up - Bring the connection back up after being down
5177 * @vsi: the VSI being configured
5178 **/
5179int i40e_up(struct i40e_vsi *vsi)
5180{
5181 int err;
5182
5183 err = i40e_vsi_configure(vsi);
5184 if (!err)
5185 err = i40e_up_complete(vsi);
5186
5187 return err;
5188}
5189
5190/**
5191 * i40e_down - Shutdown the connection processing
5192 * @vsi: the VSI being stopped
5193 **/
5194void i40e_down(struct i40e_vsi *vsi)
5195{
5196 int i;
5197
5198 /* It is assumed that the caller of this function
5199 * sets the vsi->state __I40E_DOWN bit.
5200 */
5201 if (vsi->netdev) {
5202 netif_carrier_off(vsi->netdev);
5203 netif_tx_disable(vsi->netdev);
5204 }
5205 i40e_vsi_disable_irq(vsi);
5206 i40e_vsi_control_rings(vsi, false);
5207 i40e_napi_disable_all(vsi);
5208
5209 for (i = 0; i < vsi->num_queue_pairs; i++) {
Alexander Duyck9f65e152013-09-28 06:00:58 +00005210 i40e_clean_tx_ring(vsi->tx_rings[i]);
5211 i40e_clean_rx_ring(vsi->rx_rings[i]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005212 }
Catherine Sullivanf980d442016-05-16 10:26:34 -07005213
5214 i40e_notify_client_of_netdev_close(vsi, false);
5215
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005216}
5217
5218/**
5219 * i40e_setup_tc - configure multiple traffic classes
5220 * @netdev: net device to configure
5221 * @tc: number of traffic classes to enable
5222 **/
5223static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5224{
5225 struct i40e_netdev_priv *np = netdev_priv(netdev);
5226 struct i40e_vsi *vsi = np->vsi;
5227 struct i40e_pf *pf = vsi->back;
5228 u8 enabled_tc = 0;
5229 int ret = -EINVAL;
5230 int i;
5231
5232 /* Check if DCB enabled to continue */
5233 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5234 netdev_info(netdev, "DCB is not enabled for adapter\n");
5235 goto exit;
5236 }
5237
5238 /* Check if MFP enabled */
5239 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5240 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5241 goto exit;
5242 }
5243
5244 /* Check whether tc count is within enabled limit */
5245 if (tc > i40e_pf_get_num_tc(pf)) {
5246 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5247 goto exit;
5248 }
5249
5250 /* Generate TC map for number of tc requested */
5251 for (i = 0; i < tc; i++)
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08005252 enabled_tc |= BIT(i);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005253
5254 /* Requesting same TC configuration as already enabled */
5255 if (enabled_tc == vsi->tc_config.enabled_tc)
5256 return 0;
5257
5258 /* Quiesce VSI queues */
5259 i40e_quiesce_vsi(vsi);
5260
5261 /* Configure VSI for enabled TCs */
5262 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5263 if (ret) {
5264 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5265 vsi->seid);
5266 goto exit;
5267 }
5268
5269 /* Unquiesce VSI */
5270 i40e_unquiesce_vsi(vsi);
5271
5272exit:
5273 return ret;
5274}
5275
John Fastabende4c67342016-02-16 21:16:15 -08005276#ifdef I40E_FCOE
John Fastabend16e5cc62016-02-16 21:16:43 -08005277int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5278 struct tc_to_netdev *tc)
John Fastabende4c67342016-02-16 21:16:15 -08005279#else
John Fastabend16e5cc62016-02-16 21:16:43 -08005280static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5281 struct tc_to_netdev *tc)
John Fastabende4c67342016-02-16 21:16:15 -08005282#endif
5283{
John Fastabend16e5cc62016-02-16 21:16:43 -08005284 if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
John Fastabende4c67342016-02-16 21:16:15 -08005285 return -EINVAL;
John Fastabend16e5cc62016-02-16 21:16:43 -08005286 return i40e_setup_tc(netdev, tc->tc);
John Fastabende4c67342016-02-16 21:16:15 -08005287}
5288
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005289/**
5290 * i40e_open - Called when a network interface is made active
5291 * @netdev: network interface device structure
5292 *
5293 * The open entry point is called when a network interface is made
5294 * active by the system (IFF_UP). At this point all resources needed
5295 * for transmit and receive operations are allocated, the interrupt
5296 * handler is registered with the OS, the netdev watchdog subtask is
5297 * enabled, and the stack is notified that the interface is ready.
5298 *
5299 * Returns 0 on success, negative value on failure
5300 **/
Vasu Dev38e00432014-08-01 13:27:03 -07005301int i40e_open(struct net_device *netdev)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005302{
5303 struct i40e_netdev_priv *np = netdev_priv(netdev);
5304 struct i40e_vsi *vsi = np->vsi;
5305 struct i40e_pf *pf = vsi->back;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005306 int err;
5307
Shannon Nelson4eb3f762014-03-06 08:59:58 +00005308 /* disallow open during test or if eeprom is broken */
5309 if (test_bit(__I40E_TESTING, &pf->state) ||
5310 test_bit(__I40E_BAD_EEPROM, &pf->state))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005311 return -EBUSY;
5312
5313 netif_carrier_off(netdev);
5314
Elizabeth Kappler6c167f52014-02-15 07:41:38 +00005315 err = i40e_vsi_open(vsi);
5316 if (err)
5317 return err;
5318
Jesse Brandeburg059dab62014-04-01 09:07:20 +00005319 /* configure global TSO hardware offload settings */
5320 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5321 TCP_FLAG_FIN) >> 16);
5322 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5323 TCP_FLAG_FIN |
5324 TCP_FLAG_CWR) >> 16);
5325 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5326
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07005327 udp_tunnel_get_rx_info(netdev);
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06005328
Elizabeth Kappler6c167f52014-02-15 07:41:38 +00005329 return 0;
5330}
5331
5332/**
5333 * i40e_vsi_open -
5334 * @vsi: the VSI to open
5335 *
5336 * Finish initialization of the VSI.
5337 *
5338 * Returns 0 on success, negative value on failure
5339 **/
5340int i40e_vsi_open(struct i40e_vsi *vsi)
5341{
5342 struct i40e_pf *pf = vsi->back;
Carolyn Wybornyb294ac72014-12-11 07:06:39 +00005343 char int_name[I40E_INT_NAME_STR_LEN];
Elizabeth Kappler6c167f52014-02-15 07:41:38 +00005344 int err;
5345
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005346 /* allocate descriptors */
5347 err = i40e_vsi_setup_tx_resources(vsi);
5348 if (err)
5349 goto err_setup_tx;
5350 err = i40e_vsi_setup_rx_resources(vsi);
5351 if (err)
5352 goto err_setup_rx;
5353
5354 err = i40e_vsi_configure(vsi);
5355 if (err)
5356 goto err_setup_rx;
5357
Shannon Nelsonc22e3c62014-03-14 07:32:25 +00005358 if (vsi->netdev) {
5359 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5360 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5361 err = i40e_vsi_request_irq(vsi, int_name);
5362 if (err)
5363 goto err_setup_rx;
5364
5365 /* Notify the stack of the actual queue counts. */
5366 err = netif_set_real_num_tx_queues(vsi->netdev,
5367 vsi->num_queue_pairs);
5368 if (err)
5369 goto err_set_queues;
5370
5371 err = netif_set_real_num_rx_queues(vsi->netdev,
5372 vsi->num_queue_pairs);
5373 if (err)
5374 goto err_set_queues;
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +00005375
5376 } else if (vsi->type == I40E_VSI_FDIR) {
Carolyn Wybornye240f672014-12-11 07:06:37 +00005377 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
Carolyn Wybornyb2008cb2014-11-11 20:05:26 +00005378 dev_driver_string(&pf->pdev->dev),
5379 dev_name(&pf->pdev->dev));
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +00005380 err = i40e_vsi_request_irq(vsi, int_name);
Carolyn Wybornyb2008cb2014-11-11 20:05:26 +00005381
Shannon Nelsonc22e3c62014-03-14 07:32:25 +00005382 } else {
Jean Sacrence9ccb12014-05-01 14:31:18 +00005383 err = -EINVAL;
Elizabeth Kappler6c167f52014-02-15 07:41:38 +00005384 goto err_setup_rx;
5385 }
Anjali Singhai Jain25946dd2013-11-26 10:49:14 +00005386
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005387 err = i40e_up_complete(vsi);
5388 if (err)
5389 goto err_up_complete;
5390
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005391 return 0;
5392
5393err_up_complete:
5394 i40e_down(vsi);
Anjali Singhai Jain25946dd2013-11-26 10:49:14 +00005395err_set_queues:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005396 i40e_vsi_free_irq(vsi);
5397err_setup_rx:
5398 i40e_vsi_free_rx_resources(vsi);
5399err_setup_tx:
5400 i40e_vsi_free_tx_resources(vsi);
5401 if (vsi == pf->vsi[pf->lan_vsi])
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005402 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005403
5404 return err;
5405}
5406
5407/**
Joseph Gasparakis17a73f62014-02-12 01:45:30 +00005408 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00005409 * @pf: Pointer to PF
Joseph Gasparakis17a73f62014-02-12 01:45:30 +00005410 *
5411 * This function destroys the hlist where all the Flow Director
5412 * filters were saved.
5413 **/
5414static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5415{
5416 struct i40e_fdir_filter *filter;
5417 struct hlist_node *node2;
5418
5419 hlist_for_each_entry_safe(filter, node2,
5420 &pf->fdir_filter_list, fdir_node) {
5421 hlist_del(&filter->fdir_node);
5422 kfree(filter);
5423 }
5424 pf->fdir_pf_active_filters = 0;
5425}
5426
5427/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005428 * i40e_close - Disables a network interface
5429 * @netdev: network interface device structure
5430 *
5431 * The close entry point is called when an interface is de-activated
5432 * by the OS. The hardware is still under the driver's control, but
5433 * this netdev interface is disabled.
5434 *
5435 * Returns 0, this is not allowed to fail
5436 **/
Vasu Dev38e00432014-08-01 13:27:03 -07005437int i40e_close(struct net_device *netdev)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005438{
5439 struct i40e_netdev_priv *np = netdev_priv(netdev);
5440 struct i40e_vsi *vsi = np->vsi;
5441
Shannon Nelson90ef8d42014-03-14 07:32:26 +00005442 i40e_vsi_close(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005443
5444 return 0;
5445}
5446
5447/**
5448 * i40e_do_reset - Start a PF or Core Reset sequence
5449 * @pf: board private structure
5450 * @reset_flags: which reset is requested
5451 *
5452 * The essential difference in resets is that the PF Reset
5453 * doesn't clear the packet buffers, doesn't reset the PE
5454 * firmware, and doesn't bother the other PFs on the chip.
5455 **/
5456void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5457{
5458 u32 val;
5459
5460 WARN_ON(in_interrupt());
5461
Mitch Williams263fc482014-04-23 04:50:11 +00005462
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005463 /* do the biggest reset indicated */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005464 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005465
5466 /* Request a Global Reset
5467 *
5468 * This will start the chip's countdown to the actual full
5469 * chip reset event, and a warning interrupt to be sent
5470 * to all PFs, including the requestor. Our handler
5471 * for the warning interrupt will deal with the shutdown
5472 * and recovery of the switch setup.
5473 */
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005474 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005475 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5476 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5477 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5478
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005479 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005480
5481 /* Request a Core Reset
5482 *
5483 * Same as Global Reset, except does *not* include the MAC/PHY
5484 */
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005485 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005486 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5487 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5488 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5489 i40e_flush(&pf->hw);
5490
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005491 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005492
5493 /* Request a PF Reset
5494 *
5495 * Resets only the PF-specific registers
5496 *
5497 * This goes directly to the tear-down and rebuild of
5498 * the switch, since we need to do all the recovery as
5499 * for the Core Reset.
5500 */
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005501 dev_dbg(&pf->pdev->dev, "PFR requested\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005502 i40e_handle_reset_warning(pf);
5503
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005504 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005505 int v;
5506
5507 /* Find the VSI(s) that requested a re-init */
5508 dev_info(&pf->pdev->dev,
5509 "VSI reinit requested\n");
Mitch Williams505682c2014-05-20 08:01:37 +00005510 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005511 struct i40e_vsi *vsi = pf->vsi[v];
Jesse Brandeburg6995b362015-08-28 17:55:54 -04005512
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005513 if (vsi != NULL &&
5514 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5515 i40e_vsi_reinit_locked(pf->vsi[v]);
5516 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5517 }
5518 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04005519 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
Neerav Parikhb5d06f02014-06-03 23:50:17 +00005520 int v;
5521
5522 /* Find the VSI(s) that needs to be brought down */
5523 dev_info(&pf->pdev->dev, "VSI down requested\n");
5524 for (v = 0; v < pf->num_alloc_vsi; v++) {
5525 struct i40e_vsi *vsi = pf->vsi[v];
Jesse Brandeburg6995b362015-08-28 17:55:54 -04005526
Neerav Parikhb5d06f02014-06-03 23:50:17 +00005527 if (vsi != NULL &&
5528 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5529 set_bit(__I40E_DOWN, &vsi->state);
5530 i40e_down(vsi);
5531 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5532 }
5533 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005534 } else {
5535 dev_info(&pf->pdev->dev,
5536 "bad reset request 0x%08x\n", reset_flags);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005537 }
5538}
5539
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005540#ifdef CONFIG_I40E_DCB
5541/**
5542 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5543 * @pf: board private structure
5544 * @old_cfg: current DCB config
5545 * @new_cfg: new DCB config
5546 **/
5547bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5548 struct i40e_dcbx_config *old_cfg,
5549 struct i40e_dcbx_config *new_cfg)
5550{
5551 bool need_reconfig = false;
5552
5553 /* Check if ETS configuration has changed */
5554 if (memcmp(&new_cfg->etscfg,
5555 &old_cfg->etscfg,
5556 sizeof(new_cfg->etscfg))) {
5557 /* If Priority Table has changed reconfig is needed */
5558 if (memcmp(&new_cfg->etscfg.prioritytable,
5559 &old_cfg->etscfg.prioritytable,
5560 sizeof(new_cfg->etscfg.prioritytable))) {
5561 need_reconfig = true;
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005562 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005563 }
5564
5565 if (memcmp(&new_cfg->etscfg.tcbwtable,
5566 &old_cfg->etscfg.tcbwtable,
5567 sizeof(new_cfg->etscfg.tcbwtable)))
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005568 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005569
5570 if (memcmp(&new_cfg->etscfg.tsatable,
5571 &old_cfg->etscfg.tsatable,
5572 sizeof(new_cfg->etscfg.tsatable)))
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005573 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005574 }
5575
5576 /* Check if PFC configuration has changed */
5577 if (memcmp(&new_cfg->pfc,
5578 &old_cfg->pfc,
5579 sizeof(new_cfg->pfc))) {
5580 need_reconfig = true;
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005581 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005582 }
5583
5584 /* Check if APP Table has changed */
5585 if (memcmp(&new_cfg->app,
5586 &old_cfg->app,
Dave Jones3d9667a2014-01-27 23:11:09 -05005587 sizeof(new_cfg->app))) {
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005588 need_reconfig = true;
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005589 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
Dave Jones3d9667a2014-01-27 23:11:09 -05005590 }
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005591
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04005592 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005593 return need_reconfig;
5594}
5595
5596/**
5597 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5598 * @pf: board private structure
5599 * @e: event info posted on ARQ
5600 **/
5601static int i40e_handle_lldp_event(struct i40e_pf *pf,
5602 struct i40e_arq_event_info *e)
5603{
5604 struct i40e_aqc_lldp_get_mib *mib =
5605 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5606 struct i40e_hw *hw = &pf->hw;
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005607 struct i40e_dcbx_config tmp_dcbx_cfg;
5608 bool need_reconfig = false;
5609 int ret = 0;
5610 u8 type;
5611
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005612 /* Not DCB capable or capability disabled */
David Ertmanea6acb72016-09-20 07:10:50 -07005613 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005614 return ret;
5615
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005616 /* Ignore if event is not for Nearest Bridge */
5617 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5618 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04005619 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005620 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5621 return ret;
5622
5623 /* Check MIB Type and return if event for Remote MIB update */
5624 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
Neerav Parikh9fa61dd2014-11-12 00:18:25 +00005625 dev_dbg(&pf->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04005626 "LLDP event mib type %s\n", type ? "remote" : "local");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005627 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5628 /* Update the remote cached instance and return */
5629 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5630 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5631 &hw->remote_dcbx_config);
5632 goto exit;
5633 }
5634
Neerav Parikh9fa61dd2014-11-12 00:18:25 +00005635 /* Store the old configuration */
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07005636 tmp_dcbx_cfg = hw->local_dcbx_config;
Neerav Parikh9fa61dd2014-11-12 00:18:25 +00005637
Neerav Parikh750fcbc2015-02-24 06:58:47 +00005638 /* Reset the old DCBx configuration data */
5639 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
Neerav Parikh9fa61dd2014-11-12 00:18:25 +00005640 /* Get updated DCBX data from firmware */
5641 ret = i40e_get_dcb_config(&pf->hw);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005642 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04005643 dev_info(&pf->pdev->dev,
5644 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5645 i40e_stat_str(&pf->hw, ret),
5646 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005647 goto exit;
5648 }
5649
5650 /* No change detected in DCBX configs */
Neerav Parikh750fcbc2015-02-24 06:58:47 +00005651 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5652 sizeof(tmp_dcbx_cfg))) {
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005653 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005654 goto exit;
5655 }
5656
Neerav Parikh750fcbc2015-02-24 06:58:47 +00005657 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5658 &hw->local_dcbx_config);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005659
Neerav Parikh750fcbc2015-02-24 06:58:47 +00005660 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005661
5662 if (!need_reconfig)
5663 goto exit;
5664
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005665 /* Enable DCB tagging only when more than one TC */
Neerav Parikh750fcbc2015-02-24 06:58:47 +00005666 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
Neerav Parikh4d9b6042014-05-22 06:31:51 +00005667 pf->flags |= I40E_FLAG_DCB_ENABLED;
5668 else
5669 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5670
Neerav Parikh69129dc2014-11-12 00:18:46 +00005671 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005672 /* Reconfiguration needed quiesce all VSIs */
5673 i40e_pf_quiesce_all_vsi(pf);
5674
5675 /* Changes in configuration update VEB/VSI */
5676 i40e_dcb_reconfigure(pf);
5677
Neerav Parikh2fd75f32014-11-12 00:18:20 +00005678 ret = i40e_resume_port_tx(pf);
5679
Neerav Parikh69129dc2014-11-12 00:18:46 +00005680 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
Neerav Parikh2fd75f32014-11-12 00:18:20 +00005681 /* In case of error no point in resuming VSIs */
Neerav Parikh69129dc2014-11-12 00:18:46 +00005682 if (ret)
5683 goto exit;
5684
Neerav Parikh3fe06f42016-02-17 16:12:15 -08005685 /* Wait for the PF's queues to be disabled */
5686 ret = i40e_pf_wait_queues_disabled(pf);
Parikh, Neerav11e47702015-02-21 06:43:55 +00005687 if (ret) {
5688 /* Schedule PF reset to recover */
5689 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5690 i40e_service_event_schedule(pf);
5691 } else {
Neerav Parikh2fd75f32014-11-12 00:18:20 +00005692 i40e_pf_unquiesce_all_vsi(pf);
Neerav Parikh85a1aab2016-06-07 09:14:55 -07005693 /* Notify the client for the DCB changes */
5694 i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
Parikh, Neerav11e47702015-02-21 06:43:55 +00005695 }
5696
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08005697exit:
5698 return ret;
5699}
5700#endif /* CONFIG_I40E_DCB */
5701
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005702/**
Anjali Singhai Jain233261862013-11-26 10:49:22 +00005703 * i40e_do_reset_safe - Protected reset path for userland calls.
5704 * @pf: board private structure
5705 * @reset_flags: which reset is requested
5706 *
5707 **/
5708void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5709{
5710 rtnl_lock();
5711 i40e_do_reset(pf, reset_flags);
5712 rtnl_unlock();
5713}
5714
5715/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005716 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5717 * @pf: board private structure
5718 * @e: event info posted on ARQ
5719 *
5720 * Handler for LAN Queue Overflow Event generated by the firmware for PF
5721 * and VF queues
5722 **/
5723static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5724 struct i40e_arq_event_info *e)
5725{
5726 struct i40e_aqc_lan_overflow *data =
5727 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5728 u32 queue = le32_to_cpu(data->prtdcb_rupto);
5729 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5730 struct i40e_hw *hw = &pf->hw;
5731 struct i40e_vf *vf;
5732 u16 vf_id;
5733
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00005734 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5735 queue, qtx_ctl);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005736
5737 /* Queue belongs to VF, find the VF and issue VF reset */
5738 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5739 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5740 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5741 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5742 vf_id -= hw->func_caps.vf_base_id;
5743 vf = &pf->vf[vf_id];
5744 i40e_vc_notify_vf_reset(vf);
5745 /* Allow VF to process pending reset notification */
5746 msleep(20);
5747 i40e_reset_vf(vf, false);
5748 }
5749}
5750
5751/**
5752 * i40e_service_event_complete - Finish up the service event
5753 * @pf: board private structure
5754 **/
5755static void i40e_service_event_complete(struct i40e_pf *pf)
5756{
Shannon Nelsonb875f992015-10-21 19:47:03 -04005757 WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005758
5759 /* flush memory to make sure state is correct before next watchog */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005760 smp_mb__before_atomic();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005761 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5762}
5763
5764/**
Anjali Singhai Jain12957382014-06-04 04:22:47 +00005765 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5766 * @pf: board private structure
5767 **/
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005768u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
Anjali Singhai Jain12957382014-06-04 04:22:47 +00005769{
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005770 u32 val, fcnt_prog;
Anjali Singhai Jain12957382014-06-04 04:22:47 +00005771
5772 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5773 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5774 return fcnt_prog;
5775}
5776
5777/**
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005778 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005779 * @pf: board private structure
5780 **/
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005781u32 i40e_get_current_fd_count(struct i40e_pf *pf)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005782{
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005783 u32 val, fcnt_prog;
5784
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005785 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5786 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5787 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5788 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5789 return fcnt_prog;
5790}
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005791
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005792/**
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005793 * i40e_get_global_fd_count - Get total FD filters programmed on device
5794 * @pf: board private structure
5795 **/
5796u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5797{
5798 u32 val, fcnt_prog;
5799
5800 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5801 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5802 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5803 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5804 return fcnt_prog;
5805}
5806
5807/**
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005808 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5809 * @pf: board private structure
5810 **/
5811void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5812{
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -04005813 struct i40e_fdir_filter *filter;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005814 u32 fcnt_prog, fcnt_avail;
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -04005815 struct hlist_node *node;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005816
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005817 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5818 return;
5819
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005820 /* Check if, FD SB or ATR was auto disabled and if there is enough room
5821 * to re-enable
5822 */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005823 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +00005824 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005825 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5826 (pf->fd_add_err == 0) ||
5827 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005828 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5829 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5830 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -04005831 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5832 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005833 }
5834 }
Jacob Kellera3417d22016-09-06 18:05:10 -07005835
5836 /* Wait for some more space to be available to turn on ATR. We also
5837 * must check that no existing ntuple rules for TCP are in effect
5838 */
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005839 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5840 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
Jacob Kellera3417d22016-09-06 18:05:10 -07005841 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
5842 (pf->fd_tcp_rule == 0)) {
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005843 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -04005844 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Jacob Kellera3417d22016-09-06 18:05:10 -07005845 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005846 }
5847 }
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -04005848
5849 /* if hw had a problem adding a filter, delete it */
5850 if (pf->fd_inv > 0) {
5851 hlist_for_each_entry_safe(filter, node,
5852 &pf->fdir_filter_list, fdir_node) {
5853 if (filter->fd_id == pf->fd_inv) {
5854 hlist_del(&filter->fdir_node);
5855 kfree(filter);
5856 pf->fdir_pf_active_filters--;
5857 }
5858 }
5859 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005860}
5861
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005862#define I40E_MIN_FD_FLUSH_INTERVAL 10
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005863#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005864/**
5865 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5866 * @pf: board private structure
5867 **/
5868static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5869{
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005870 unsigned long min_flush_time;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005871 int flush_wait_retry = 50;
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005872 bool disable_atr = false;
5873 int fd_room;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005874 int reg;
5875
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005876 if (!time_after(jiffies, pf->fd_flush_timestamp +
5877 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
5878 return;
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005879
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005880 /* If the flush is happening too quick and we have mostly SB rules we
5881 * should not re-enable ATR for some time.
5882 */
5883 min_flush_time = pf->fd_flush_timestamp +
5884 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5885 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005886
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005887 if (!(time_after(jiffies, min_flush_time)) &&
5888 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5889 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5890 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5891 disable_atr = true;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005892 }
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005893
5894 pf->fd_flush_timestamp = jiffies;
Jacob Keller234dc4e2016-09-06 18:05:09 -07005895 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005896 /* flush all filters */
5897 wr32(&pf->hw, I40E_PFQF_CTL_1,
5898 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5899 i40e_flush(&pf->hw);
5900 pf->fd_flush_cnt++;
5901 pf->fd_add_err = 0;
5902 do {
5903 /* Check FD flush status every 5-6msec */
5904 usleep_range(5000, 6000);
5905 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5906 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5907 break;
5908 } while (flush_wait_retry--);
5909 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5910 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5911 } else {
5912 /* replay sideband filters */
5913 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5914 if (!disable_atr)
Jacob Keller234dc4e2016-09-06 18:05:09 -07005915 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
Jesse Brandeburga5fdaf32015-08-28 17:55:56 -04005916 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5917 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5918 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5919 }
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005920}
5921
5922/**
5923 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5924 * @pf: board private structure
5925 **/
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005926u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005927{
5928 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5929}
5930
5931/* We can see up to 256 filter programming desc in transit if the filters are
5932 * being applied really fast; before we see the first
5933 * filter miss error on Rx queue 0. Accumulating enough error messages before
5934 * reacting will make sure we don't cause flush too often.
5935 */
5936#define I40E_MAX_FD_PROGRAM_ERROR 256
5937
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005938/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005939 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5940 * @pf: board private structure
5941 **/
5942static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5943{
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005944
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005945 /* if interface is down do nothing */
5946 if (test_bit(__I40E_DOWN, &pf->state))
5947 return;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005948
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00005949 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00005950 i40e_fdir_flush_and_replay(pf);
5951
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00005952 i40e_fdir_check_and_reenable(pf);
5953
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005954}
5955
5956/**
5957 * i40e_vsi_link_event - notify VSI of a link event
5958 * @vsi: vsi to be notified
5959 * @link_up: link up or down
5960 **/
5961static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5962{
Jesse Brandeburg32b5b812014-08-12 06:33:14 +00005963 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005964 return;
5965
5966 switch (vsi->type) {
5967 case I40E_VSI_MAIN:
Vasu Dev38e00432014-08-01 13:27:03 -07005968#ifdef I40E_FCOE
5969 case I40E_VSI_FCOE:
5970#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005971 if (!vsi->netdev || !vsi->netdev_registered)
5972 break;
5973
5974 if (link_up) {
5975 netif_carrier_on(vsi->netdev);
5976 netif_tx_wake_all_queues(vsi->netdev);
5977 } else {
5978 netif_carrier_off(vsi->netdev);
5979 netif_tx_stop_all_queues(vsi->netdev);
5980 }
5981 break;
5982
5983 case I40E_VSI_SRIOV:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005984 case I40E_VSI_VMDQ2:
5985 case I40E_VSI_CTRL:
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06005986 case I40E_VSI_IWARP:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00005987 case I40E_VSI_MIRROR:
5988 default:
5989 /* there is no notification for other VSIs */
5990 break;
5991 }
5992}
5993
5994/**
5995 * i40e_veb_link_event - notify elements on the veb of a link event
5996 * @veb: veb to be notified
5997 * @link_up: link up or down
5998 **/
5999static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6000{
6001 struct i40e_pf *pf;
6002 int i;
6003
6004 if (!veb || !veb->pf)
6005 return;
6006 pf = veb->pf;
6007
6008 /* depth first... */
6009 for (i = 0; i < I40E_MAX_VEB; i++)
6010 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6011 i40e_veb_link_event(pf->veb[i], link_up);
6012
6013 /* ... now the local VSIs */
Mitch Williams505682c2014-05-20 08:01:37 +00006014 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006015 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6016 i40e_vsi_link_event(pf->vsi[i], link_up);
6017}
6018
6019/**
6020 * i40e_link_event - Update netif_carrier status
6021 * @pf: board private structure
6022 **/
6023static void i40e_link_event(struct i40e_pf *pf)
6024{
Mitch Williams320684c2014-10-17 03:14:43 +00006025 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
Catherine Sullivanfef59dd2014-12-11 07:06:33 +00006026 u8 new_link_speed, old_link_speed;
Jesse Brandeburga72a5abc2015-08-26 15:14:19 -04006027 i40e_status status;
6028 bool new_link, old_link;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006029
Catherine Sullivan1f9610e2015-10-21 19:47:09 -04006030 /* save off old link status information */
6031 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6032
Jesse Brandeburg1e701e02014-09-13 07:40:42 +00006033 /* set this to force the get_link_status call to refresh state */
6034 pf->hw.phy.get_link_info = true;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006035
Jesse Brandeburg1e701e02014-09-13 07:40:42 +00006036 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
Jesse Brandeburga72a5abc2015-08-26 15:14:19 -04006037
6038 status = i40e_get_link_status(&pf->hw, &new_link);
6039 if (status) {
6040 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6041 status);
6042 return;
6043 }
6044
Catherine Sullivanfef59dd2014-12-11 07:06:33 +00006045 old_link_speed = pf->hw.phy.link_info_old.link_speed;
6046 new_link_speed = pf->hw.phy.link_info.link_speed;
Jesse Brandeburg1e701e02014-09-13 07:40:42 +00006047
6048 if (new_link == old_link &&
Catherine Sullivanfef59dd2014-12-11 07:06:33 +00006049 new_link_speed == old_link_speed &&
Mitch Williams320684c2014-10-17 03:14:43 +00006050 (test_bit(__I40E_DOWN, &vsi->state) ||
6051 new_link == netif_carrier_ok(vsi->netdev)))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006052 return;
Mitch Williams320684c2014-10-17 03:14:43 +00006053
6054 if (!test_bit(__I40E_DOWN, &vsi->state))
6055 i40e_print_link_message(vsi, new_link);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006056
6057 /* Notify the base of the switch tree connected to
6058 * the link. Floating VEBs are not notified.
6059 */
6060 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6061 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6062 else
Mitch Williams320684c2014-10-17 03:14:43 +00006063 i40e_vsi_link_event(vsi, new_link);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006064
6065 if (pf->vf)
6066 i40e_vc_notify_link_state(pf);
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00006067
6068 if (pf->flags & I40E_FLAG_PTP)
6069 i40e_ptp_set_increment(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006070}
6071
6072/**
Shannon Nelson21536712014-10-25 10:35:25 +00006073 * i40e_watchdog_subtask - periodic checks not using event driven response
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006074 * @pf: board private structure
6075 **/
6076static void i40e_watchdog_subtask(struct i40e_pf *pf)
6077{
6078 int i;
6079
6080 /* if interface is down do nothing */
6081 if (test_bit(__I40E_DOWN, &pf->state) ||
6082 test_bit(__I40E_CONFIG_BUSY, &pf->state))
6083 return;
6084
Shannon Nelson21536712014-10-25 10:35:25 +00006085 /* make sure we don't do these things too often */
6086 if (time_before(jiffies, (pf->service_timer_previous +
6087 pf->service_timer_period)))
6088 return;
6089 pf->service_timer_previous = jiffies;
6090
Shannon Nelson9ac77262015-08-27 11:42:40 -04006091 if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
6092 i40e_link_event(pf);
Shannon Nelson21536712014-10-25 10:35:25 +00006093
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006094 /* Update the stats for active netdevs so the network stack
6095 * can look at updated numbers whenever it cares to
6096 */
Mitch Williams505682c2014-05-20 08:01:37 +00006097 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006098 if (pf->vsi[i] && pf->vsi[i]->netdev)
6099 i40e_update_stats(pf->vsi[i]);
6100
Anjali Singhai Jaind1a8d272015-07-23 16:54:40 -04006101 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6102 /* Update the stats for the active switching components */
6103 for (i = 0; i < I40E_MAX_VEB; i++)
6104 if (pf->veb[i])
6105 i40e_update_veb_stats(pf->veb[i]);
6106 }
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00006107
6108 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006109}
6110
6111/**
6112 * i40e_reset_subtask - Set up for resetting the device and driver
6113 * @pf: board private structure
6114 **/
6115static void i40e_reset_subtask(struct i40e_pf *pf)
6116{
6117 u32 reset_flags = 0;
6118
Anjali Singhai Jain233261862013-11-26 10:49:22 +00006119 rtnl_lock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006120 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08006121 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006122 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
6123 }
6124 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08006125 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006126 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6127 }
6128 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08006129 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006130 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
6131 }
6132 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08006133 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006134 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
6135 }
Neerav Parikhb5d06f02014-06-03 23:50:17 +00006136 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -08006137 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
Neerav Parikhb5d06f02014-06-03 23:50:17 +00006138 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
6139 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006140
6141 /* If there's a recovery already waiting, it takes
6142 * precedence before starting a new reset sequence.
6143 */
6144 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
6145 i40e_handle_reset_warning(pf);
Anjali Singhai Jain233261862013-11-26 10:49:22 +00006146 goto unlock;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006147 }
6148
6149 /* If we're already down or resetting, just bail */
6150 if (reset_flags &&
6151 !test_bit(__I40E_DOWN, &pf->state) &&
6152 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
6153 i40e_do_reset(pf, reset_flags);
Anjali Singhai Jain233261862013-11-26 10:49:22 +00006154
6155unlock:
6156 rtnl_unlock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006157}
6158
6159/**
6160 * i40e_handle_link_event - Handle link event
6161 * @pf: board private structure
6162 * @e: event info posted on ARQ
6163 **/
6164static void i40e_handle_link_event(struct i40e_pf *pf,
6165 struct i40e_arq_event_info *e)
6166{
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006167 struct i40e_aqc_get_link_status *status =
6168 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006169
Jesse Brandeburg1e701e02014-09-13 07:40:42 +00006170 /* Do a new status request to re-enable LSE reporting
6171 * and load new status information into the hw struct
6172 * This completely ignores any state information
6173 * in the ARQ event info, instead choosing to always
6174 * issue the AQ update link status command.
6175 */
6176 i40e_link_event(pf);
6177
Carolyn Wyborny7b592f62014-07-10 07:58:19 +00006178 /* check for unqualified module, if link is down */
6179 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6180 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6181 (!(status->link_info & I40E_AQ_LINK_UP)))
6182 dev_err(&pf->pdev->dev,
6183 "The driver failed to link because an unqualified module was detected.\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006184}
6185
6186/**
6187 * i40e_clean_adminq_subtask - Clean the AdminQ rings
6188 * @pf: board private structure
6189 **/
6190static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6191{
6192 struct i40e_arq_event_info event;
6193 struct i40e_hw *hw = &pf->hw;
6194 u16 pending, i = 0;
6195 i40e_status ret;
6196 u16 opcode;
Shannon Nelson86df2422014-05-20 08:01:35 +00006197 u32 oldval;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006198 u32 val;
6199
Anjali Singhai Jaina316f652014-07-12 07:28:25 +00006200 /* Do not run clean AQ when PF reset fails */
6201 if (test_bit(__I40E_RESET_FAILED, &pf->state))
6202 return;
6203
Shannon Nelson86df2422014-05-20 08:01:35 +00006204 /* check for error indications */
6205 val = rd32(&pf->hw, pf->hw.aq.arq.len);
6206 oldval = val;
6207 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006208 if (hw->debug_mask & I40E_DEBUG_AQ)
6209 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006210 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6211 }
6212 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006213 if (hw->debug_mask & I40E_DEBUG_AQ)
6214 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006215 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
Mitch Williams1d0a4ad2015-12-23 12:05:48 -08006216 pf->arq_overflows++;
Shannon Nelson86df2422014-05-20 08:01:35 +00006217 }
6218 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006219 if (hw->debug_mask & I40E_DEBUG_AQ)
6220 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006221 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6222 }
6223 if (oldval != val)
6224 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6225
6226 val = rd32(&pf->hw, pf->hw.aq.asq.len);
6227 oldval = val;
6228 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006229 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6230 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006231 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6232 }
6233 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006234 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6235 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006236 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6237 }
6238 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
Mitch Williams75eb73c2015-11-19 11:34:21 -08006239 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6240 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
Shannon Nelson86df2422014-05-20 08:01:35 +00006241 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6242 }
6243 if (oldval != val)
6244 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6245
Mitch Williams1001dc32014-11-11 20:02:19 +00006246 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6247 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006248 if (!event.msg_buf)
6249 return;
6250
6251 do {
6252 ret = i40e_clean_arq_element(hw, &event, &pending);
Mitch Williams56497972014-06-04 08:45:18 +00006253 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006254 break;
Mitch Williams56497972014-06-04 08:45:18 +00006255 else if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006256 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6257 break;
6258 }
6259
6260 opcode = le16_to_cpu(event.desc.opcode);
6261 switch (opcode) {
6262
6263 case i40e_aqc_opc_get_link_status:
6264 i40e_handle_link_event(pf, &event);
6265 break;
6266 case i40e_aqc_opc_send_msg_to_pf:
6267 ret = i40e_vc_process_vf_msg(pf,
6268 le16_to_cpu(event.desc.retval),
6269 le32_to_cpu(event.desc.cookie_high),
6270 le32_to_cpu(event.desc.cookie_low),
6271 event.msg_buf,
Mitch Williams1001dc32014-11-11 20:02:19 +00006272 event.msg_len);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006273 break;
6274 case i40e_aqc_opc_lldp_update_mib:
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00006275 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08006276#ifdef CONFIG_I40E_DCB
6277 rtnl_lock();
6278 ret = i40e_handle_lldp_event(pf, &event);
6279 rtnl_unlock();
6280#endif /* CONFIG_I40E_DCB */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006281 break;
6282 case i40e_aqc_opc_event_lan_overflow:
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00006283 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006284 i40e_handle_lan_overflow_event(pf, &event);
6285 break;
Shannon Nelson0467bc92013-12-18 13:45:58 +00006286 case i40e_aqc_opc_send_msg_to_peer:
6287 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6288 break;
Shannon Nelson91a0f932015-03-19 14:32:01 -07006289 case i40e_aqc_opc_nvm_erase:
6290 case i40e_aqc_opc_nvm_update:
Michal Kosiarz00ada502015-11-19 11:34:20 -08006291 case i40e_aqc_opc_oem_post_update:
Shannon Nelson6e93d0c2016-01-15 14:33:18 -08006292 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6293 "ARQ NVM operation 0x%04x completed\n",
6294 opcode);
Shannon Nelson91a0f932015-03-19 14:32:01 -07006295 break;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006296 default:
6297 dev_info(&pf->pdev->dev,
Shannon Nelson56e5ca62016-03-10 14:59:48 -08006298 "ARQ: Unknown event 0x%04x ignored\n",
Shannon Nelson0467bc92013-12-18 13:45:58 +00006299 opcode);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006300 break;
6301 }
6302 } while (pending && (i++ < pf->adminq_work_limit));
6303
6304 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6305 /* re-enable Admin queue interrupt cause */
6306 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6307 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6308 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6309 i40e_flush(hw);
6310
6311 kfree(event.msg_buf);
6312}
6313
6314/**
Shannon Nelson4eb3f762014-03-06 08:59:58 +00006315 * i40e_verify_eeprom - make sure eeprom is good to use
6316 * @pf: board private structure
6317 **/
6318static void i40e_verify_eeprom(struct i40e_pf *pf)
6319{
6320 int err;
6321
6322 err = i40e_diag_eeprom_test(&pf->hw);
6323 if (err) {
6324 /* retry in case of garbage read */
6325 err = i40e_diag_eeprom_test(&pf->hw);
6326 if (err) {
6327 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6328 err);
6329 set_bit(__I40E_BAD_EEPROM, &pf->state);
6330 }
6331 }
6332
6333 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6334 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6335 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6336 }
6337}
6338
6339/**
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006340 * i40e_enable_pf_switch_lb
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006341 * @pf: pointer to the PF structure
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006342 *
6343 * enable switch loop back or die - no point in a return value
6344 **/
6345static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6346{
6347 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6348 struct i40e_vsi_context ctxt;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006349 int ret;
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006350
6351 ctxt.seid = pf->main_vsi_seid;
6352 ctxt.pf_num = pf->hw.pf_id;
6353 ctxt.vf_num = 0;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006354 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6355 if (ret) {
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006356 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006357 "couldn't get PF vsi config, err %s aq_err %s\n",
6358 i40e_stat_str(&pf->hw, ret),
6359 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006360 return;
6361 }
6362 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6363 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6364 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6365
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006366 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6367 if (ret) {
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006368 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006369 "update vsi switch failed, err %s aq_err %s\n",
6370 i40e_stat_str(&pf->hw, ret),
6371 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006372 }
6373}
6374
6375/**
6376 * i40e_disable_pf_switch_lb
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006377 * @pf: pointer to the PF structure
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006378 *
6379 * disable switch loop back or die - no point in a return value
6380 **/
6381static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6382{
6383 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6384 struct i40e_vsi_context ctxt;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006385 int ret;
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006386
6387 ctxt.seid = pf->main_vsi_seid;
6388 ctxt.pf_num = pf->hw.pf_id;
6389 ctxt.vf_num = 0;
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006390 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6391 if (ret) {
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006392 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006393 "couldn't get PF vsi config, err %s aq_err %s\n",
6394 i40e_stat_str(&pf->hw, ret),
6395 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006396 return;
6397 }
6398 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6399 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6400 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6401
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006402 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6403 if (ret) {
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006404 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006405 "update vsi switch failed, err %s aq_err %s\n",
6406 i40e_stat_str(&pf->hw, ret),
6407 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Akeem G Abodunrin386a0af2015-02-27 09:15:25 +00006408 }
6409}
6410
6411/**
Neerav Parikh51616012015-02-06 08:52:14 +00006412 * i40e_config_bridge_mode - Configure the HW bridge mode
6413 * @veb: pointer to the bridge instance
6414 *
6415 * Configure the loop back mode for the LAN VSI that is downlink to the
6416 * specified HW bridge instance. It is expected this function is called
6417 * when a new HW bridge is instantiated.
6418 **/
6419static void i40e_config_bridge_mode(struct i40e_veb *veb)
6420{
6421 struct i40e_pf *pf = veb->pf;
6422
Shannon Nelson6dec1012015-09-28 14:12:30 -04006423 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6424 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6425 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
Neerav Parikh51616012015-02-06 08:52:14 +00006426 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6427 i40e_disable_pf_switch_lb(pf);
6428 else
6429 i40e_enable_pf_switch_lb(pf);
6430}
6431
6432/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006433 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6434 * @veb: pointer to the VEB instance
6435 *
6436 * This is a recursive function that first builds the attached VSIs then
6437 * recurses in to build the next layer of VEB. We track the connections
6438 * through our own index numbers because the seid's from the HW could
6439 * change across the reset.
6440 **/
6441static int i40e_reconstitute_veb(struct i40e_veb *veb)
6442{
6443 struct i40e_vsi *ctl_vsi = NULL;
6444 struct i40e_pf *pf = veb->pf;
6445 int v, veb_idx;
6446 int ret;
6447
6448 /* build VSI that owns this VEB, temporarily attached to base VEB */
Mitch Williams505682c2014-05-20 08:01:37 +00006449 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006450 if (pf->vsi[v] &&
6451 pf->vsi[v]->veb_idx == veb->idx &&
6452 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6453 ctl_vsi = pf->vsi[v];
6454 break;
6455 }
6456 }
6457 if (!ctl_vsi) {
6458 dev_info(&pf->pdev->dev,
6459 "missing owner VSI for veb_idx %d\n", veb->idx);
6460 ret = -ENOENT;
6461 goto end_reconstitute;
6462 }
6463 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6464 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6465 ret = i40e_add_vsi(ctl_vsi);
6466 if (ret) {
6467 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006468 "rebuild of veb_idx %d owner VSI failed: %d\n",
6469 veb->idx, ret);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006470 goto end_reconstitute;
6471 }
6472 i40e_vsi_reset_stats(ctl_vsi);
6473
6474 /* create the VEB in the switch and move the VSI onto the VEB */
6475 ret = i40e_add_veb(veb, ctl_vsi);
6476 if (ret)
6477 goto end_reconstitute;
6478
Anjali Singhai Jainfc608612015-05-08 15:35:57 -07006479 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6480 veb->bridge_mode = BRIDGE_MODE_VEB;
6481 else
6482 veb->bridge_mode = BRIDGE_MODE_VEPA;
Neerav Parikh51616012015-02-06 08:52:14 +00006483 i40e_config_bridge_mode(veb);
Anjali Singhai Jainb64ba082014-11-13 03:06:15 +00006484
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006485 /* create the remaining VSIs attached to this VEB */
Mitch Williams505682c2014-05-20 08:01:37 +00006486 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006487 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6488 continue;
6489
6490 if (pf->vsi[v]->veb_idx == veb->idx) {
6491 struct i40e_vsi *vsi = pf->vsi[v];
Jesse Brandeburg6995b362015-08-28 17:55:54 -04006492
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006493 vsi->uplink_seid = veb->seid;
6494 ret = i40e_add_vsi(vsi);
6495 if (ret) {
6496 dev_info(&pf->pdev->dev,
6497 "rebuild of vsi_idx %d failed: %d\n",
6498 v, ret);
6499 goto end_reconstitute;
6500 }
6501 i40e_vsi_reset_stats(vsi);
6502 }
6503 }
6504
6505 /* create any VEBs attached to this VEB - RECURSION */
6506 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6507 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6508 pf->veb[veb_idx]->uplink_seid = veb->seid;
6509 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6510 if (ret)
6511 break;
6512 }
6513 }
6514
6515end_reconstitute:
6516 return ret;
6517}
6518
6519/**
6520 * i40e_get_capabilities - get info about the HW
6521 * @pf: the PF struct
6522 **/
6523static int i40e_get_capabilities(struct i40e_pf *pf)
6524{
6525 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6526 u16 data_size;
6527 int buf_len;
6528 int err;
6529
6530 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6531 do {
6532 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6533 if (!cap_buf)
6534 return -ENOMEM;
6535
6536 /* this loads the data into the hw struct for us */
6537 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6538 &data_size,
6539 i40e_aqc_opc_list_func_capabilities,
6540 NULL);
6541 /* data loaded, buffer no longer needed */
6542 kfree(cap_buf);
6543
6544 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6545 /* retry with a larger buffer */
6546 buf_len = data_size;
6547 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6548 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006549 "capability discovery failed, err %s aq_err %s\n",
6550 i40e_stat_str(&pf->hw, err),
6551 i40e_aq_str(&pf->hw,
6552 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006553 return -ENODEV;
6554 }
6555 } while (err);
6556
6557 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6558 dev_info(&pf->pdev->dev,
6559 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6560 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6561 pf->hw.func_caps.num_msix_vectors,
6562 pf->hw.func_caps.num_msix_vectors_vf,
6563 pf->hw.func_caps.fd_filters_guaranteed,
6564 pf->hw.func_caps.fd_filters_best_effort,
6565 pf->hw.func_caps.num_tx_qp,
6566 pf->hw.func_caps.num_vsis);
6567
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00006568#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6569 + pf->hw.func_caps.num_vfs)
6570 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6571 dev_info(&pf->pdev->dev,
6572 "got num_vsis %d, setting num_vsis to %d\n",
6573 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6574 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6575 }
6576
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006577 return 0;
6578}
6579
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006580static int i40e_vsi_clear(struct i40e_vsi *vsi);
6581
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006582/**
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006583 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006584 * @pf: board private structure
6585 **/
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006586static void i40e_fdir_sb_setup(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006587{
6588 struct i40e_vsi *vsi;
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +00006589 int i;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006590
Jesse Brandeburg407e0632014-06-03 23:50:12 +00006591 /* quick workaround for an NVM issue that leaves a critical register
6592 * uninitialized
6593 */
6594 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6595 static const u32 hkey[] = {
6596 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6597 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6598 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6599 0x95b3a76d};
6600
6601 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6602 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6603 }
6604
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006605 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006606 return;
6607
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006608 /* find existing VSI and see if it needs configuring */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006609 vsi = NULL;
Mitch Williams505682c2014-05-20 08:01:37 +00006610 for (i = 0; i < pf->num_alloc_vsi; i++) {
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006611 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006612 vsi = pf->vsi[i];
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006613 break;
6614 }
6615 }
6616
6617 /* create a new VSI if none exists */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006618 if (!vsi) {
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006619 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6620 pf->vsi[pf->lan_vsi]->seid, 0);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006621 if (!vsi) {
6622 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +00006623 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6624 return;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006625 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006626 }
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +00006627
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08006628 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006629}
6630
6631/**
6632 * i40e_fdir_teardown - release the Flow Director resources
6633 * @pf: board private structure
6634 **/
6635static void i40e_fdir_teardown(struct i40e_pf *pf)
6636{
6637 int i;
6638
Joseph Gasparakis17a73f62014-02-12 01:45:30 +00006639 i40e_fdir_filter_exit(pf);
Mitch Williams505682c2014-05-20 08:01:37 +00006640 for (i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006641 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6642 i40e_vsi_release(pf->vsi[i]);
6643 break;
6644 }
6645 }
6646}
6647
6648/**
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006649 * i40e_prep_for_reset - prep for the core to reset
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006650 * @pf: board private structure
6651 *
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006652 * Close up the VFs and other things in prep for PF Reset.
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006653 **/
Shannon Nelson23cfbe02014-06-04 01:23:14 +00006654static void i40e_prep_for_reset(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006655{
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006656 struct i40e_hw *hw = &pf->hw;
Shannon Nelson60442de2014-04-23 04:50:13 +00006657 i40e_status ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006658 u32 v;
6659
6660 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6661 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
Shannon Nelson23cfbe02014-06-04 01:23:14 +00006662 return;
Mitch Williamsd3ce57342016-03-10 14:59:46 -08006663 if (i40e_check_asq_alive(&pf->hw))
6664 i40e_vc_notify_reset(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006665
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00006666 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006667
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006668 /* quiesce the VSIs and their queues that are not already DOWN */
6669 i40e_pf_quiesce_all_vsi(pf);
6670
Mitch Williams505682c2014-05-20 08:01:37 +00006671 for (v = 0; v < pf->num_alloc_vsi; v++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006672 if (pf->vsi[v])
6673 pf->vsi[v]->seid = 0;
6674 }
6675
6676 i40e_shutdown_adminq(&pf->hw);
6677
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006678 /* call shutdown HMC */
Shannon Nelson60442de2014-04-23 04:50:13 +00006679 if (hw->hmc.hmc_obj) {
6680 ret = i40e_shutdown_lan_hmc(hw);
Shannon Nelson23cfbe02014-06-04 01:23:14 +00006681 if (ret)
Shannon Nelson60442de2014-04-23 04:50:13 +00006682 dev_warn(&pf->pdev->dev,
6683 "shutdown_lan_hmc failed: %d\n", ret);
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006684 }
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006685}
6686
6687/**
Jesse Brandeburg44033fa2014-04-23 04:50:15 +00006688 * i40e_send_version - update firmware with driver version
6689 * @pf: PF struct
6690 */
6691static void i40e_send_version(struct i40e_pf *pf)
6692{
6693 struct i40e_driver_version dv;
6694
6695 dv.major_version = DRV_VERSION_MAJOR;
6696 dv.minor_version = DRV_VERSION_MINOR;
6697 dv.build_version = DRV_VERSION_BUILD;
6698 dv.subbuild_version = 0;
Rickard Strandqvist35a7d802014-07-29 09:26:25 +00006699 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
Jesse Brandeburg44033fa2014-04-23 04:50:15 +00006700 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6701}
6702
6703/**
Jesse Brandeburg4dda12e2013-12-18 13:46:01 +00006704 * i40e_reset_and_rebuild - reset and rebuild using a saved config
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006705 * @pf: board private structure
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00006706 * @reinit: if the Main VSI needs to re-initialized.
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006707 **/
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00006708static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006709{
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006710 struct i40e_hw *hw = &pf->hw;
Anjali Singhai Jaincafa2ee2014-09-13 07:40:45 +00006711 u8 set_fc_aq_fail = 0;
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006712 i40e_status ret;
Anjali Singhai Jain4f2f017c2015-10-21 19:47:07 -04006713 u32 val;
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006714 u32 v;
6715
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006716 /* Now we wait for GRST to settle out.
6717 * We don't have to delete the VEBs or VSIs from the hw switch
6718 * because the reset will make them disappear.
6719 */
6720 ret = i40e_pf_reset(hw);
Akeem G Abodunrinb5565402014-04-09 05:59:04 +00006721 if (ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006722 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
Anjali Singhai Jaina316f652014-07-12 07:28:25 +00006723 set_bit(__I40E_RESET_FAILED, &pf->state);
6724 goto clear_recovery;
Akeem G Abodunrinb5565402014-04-09 05:59:04 +00006725 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006726 pf->pfr_count++;
6727
6728 if (test_bit(__I40E_DOWN, &pf->state))
Anjali Singhai Jaina316f652014-07-12 07:28:25 +00006729 goto clear_recovery;
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00006730 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006731
6732 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6733 ret = i40e_init_adminq(&pf->hw);
6734 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006735 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6736 i40e_stat_str(&pf->hw, ret),
6737 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Anjali Singhai Jaina316f652014-07-12 07:28:25 +00006738 goto clear_recovery;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006739 }
6740
Shannon Nelson4eb3f762014-03-06 08:59:58 +00006741 /* re-verify the eeprom if we just had an EMP reset */
Anjali Singhai Jain9df42d12015-01-24 09:58:40 +00006742 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
Shannon Nelson4eb3f762014-03-06 08:59:58 +00006743 i40e_verify_eeprom(pf);
Shannon Nelson4eb3f762014-03-06 08:59:58 +00006744
Shannon Nelsone78ac4bf2014-05-10 04:49:09 +00006745 i40e_clear_pxe_mode(hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006746 ret = i40e_get_capabilities(pf);
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006747 if (ret)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006748 goto end_core_reset;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006749
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006750 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6751 hw->func_caps.num_rx_qp,
6752 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6753 if (ret) {
6754 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6755 goto end_core_reset;
6756 }
6757 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6758 if (ret) {
6759 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6760 goto end_core_reset;
6761 }
6762
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08006763#ifdef CONFIG_I40E_DCB
6764 ret = i40e_init_pf_dcb(pf);
6765 if (ret) {
Shannon Nelsonaebfc812014-12-11 07:06:38 +00006766 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6767 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6768 /* Continue without DCB enabled */
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08006769 }
6770#endif /* CONFIG_I40E_DCB */
Vasu Dev38e00432014-08-01 13:27:03 -07006771#ifdef I40E_FCOE
Shannon Nelson21364bc2015-08-26 15:14:13 -04006772 i40e_init_pf_fcoe(pf);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08006773
Vasu Dev38e00432014-08-01 13:27:03 -07006774#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006775 /* do basic switch setup */
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00006776 ret = i40e_setup_pf_switch(pf, reinit);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006777 if (ret)
6778 goto end_core_reset;
6779
Shannon Nelson2f0aff42016-01-04 10:33:08 -08006780 /* The driver only wants link up/down and module qualification
6781 * reports from firmware. Note the negative logic.
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +00006782 */
6783 ret = i40e_aq_set_phy_int_mask(&pf->hw,
Shannon Nelson2f0aff42016-01-04 10:33:08 -08006784 ~(I40E_AQ_EVENT_LINK_UPDOWN |
Shannon Nelson867a79e2016-03-18 12:18:15 -07006785 I40E_AQ_EVENT_MEDIA_NA |
Shannon Nelson2f0aff42016-01-04 10:33:08 -08006786 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +00006787 if (ret)
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006788 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6789 i40e_stat_str(&pf->hw, ret),
6790 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +00006791
Anjali Singhai Jaincafa2ee2014-09-13 07:40:45 +00006792 /* make sure our flow control settings are restored */
6793 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6794 if (ret)
Neerav Parikh8279e492015-09-03 17:18:50 -04006795 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
6796 i40e_stat_str(&pf->hw, ret),
6797 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Anjali Singhai Jaincafa2ee2014-09-13 07:40:45 +00006798
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006799 /* Rebuild the VSIs and VEBs that existed before reset.
6800 * They are still in our local switch element arrays, so only
6801 * need to rebuild the switch model in the HW.
6802 *
6803 * If there were VEBs but the reconstitution failed, we'll try
6804 * try to recover minimal use by getting the basic PF VSI working.
6805 */
6806 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
Jesse Brandeburg69bfb112014-02-11 08:24:13 +00006807 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006808 /* find the one VEB connected to the MAC, and find orphans */
6809 for (v = 0; v < I40E_MAX_VEB; v++) {
6810 if (!pf->veb[v])
6811 continue;
6812
6813 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6814 pf->veb[v]->uplink_seid == 0) {
6815 ret = i40e_reconstitute_veb(pf->veb[v]);
6816
6817 if (!ret)
6818 continue;
6819
6820 /* If Main VEB failed, we're in deep doodoo,
6821 * so give up rebuilding the switch and set up
6822 * for minimal rebuild of PF VSI.
6823 * If orphan failed, we'll report the error
6824 * but try to keep going.
6825 */
6826 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6827 dev_info(&pf->pdev->dev,
6828 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6829 ret);
6830 pf->vsi[pf->lan_vsi]->uplink_seid
6831 = pf->mac_seid;
6832 break;
6833 } else if (pf->veb[v]->uplink_seid == 0) {
6834 dev_info(&pf->pdev->dev,
6835 "rebuild of orphan VEB failed: %d\n",
6836 ret);
6837 }
6838 }
6839 }
6840 }
6841
6842 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
Shannon Nelsoncde4cbc2014-06-04 01:23:17 +00006843 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006844 /* no VEB, so rebuild only the Main VSI */
6845 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6846 if (ret) {
6847 dev_info(&pf->pdev->dev,
6848 "rebuild of Main VSI failed: %d\n", ret);
6849 goto end_core_reset;
6850 }
6851 }
6852
Anjali Singhai Jain4f2f017c2015-10-21 19:47:07 -04006853 /* Reconfigure hardware for allowing smaller MSS in the case
6854 * of TSO, so that we avoid the MDD being fired and causing
6855 * a reset in the case of small MSS+TSO.
6856 */
6857#define I40E_REG_MSS 0x000E64DC
6858#define I40E_REG_MSS_MIN_MASK 0x3FF0000
6859#define I40E_64BYTE_MSS 0x400000
6860 val = rd32(hw, I40E_REG_MSS);
6861 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
6862 val &= ~I40E_REG_MSS_MIN_MASK;
6863 val |= I40E_64BYTE_MSS;
6864 wr32(hw, I40E_REG_MSS, val);
6865 }
6866
Anjali Singhai Jain8eed76f2015-12-09 15:50:31 -08006867 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
Anjali Singhai Jain025b4a52015-02-24 06:58:46 +00006868 msleep(75);
6869 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6870 if (ret)
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04006871 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6872 i40e_stat_str(&pf->hw, ret),
6873 i40e_aq_str(&pf->hw,
6874 pf->hw.aq.asq_last_status));
Anjali Singhai Jaincafa2ee2014-09-13 07:40:45 +00006875 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006876 /* reinit the misc interrupt */
6877 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6878 ret = i40e_setup_misc_vector(pf);
6879
Anjali Singhai Jaine7358f52015-10-01 14:37:34 -04006880 /* Add a filter to drop all Flow control frames from any VSI from being
6881 * transmitted. By doing so we stop a malicious VF from sending out
6882 * PAUSE or PFC frames and potentially controlling traffic for other
6883 * PF/VF VSIs.
6884 * The FW can still send Flow control frames if enabled.
6885 */
6886 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
6887 pf->main_vsi_seid);
6888
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006889 /* restart the VSIs that were rebuilt and running before the reset */
6890 i40e_pf_unquiesce_all_vsi(pf);
6891
Mitch Williams69f64b22014-02-13 03:48:46 -08006892 if (pf->num_alloc_vfs) {
6893 for (v = 0; v < pf->num_alloc_vfs; v++)
6894 i40e_reset_vf(&pf->vf[v], true);
6895 }
6896
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006897 /* tell the firmware that we're starting */
Jesse Brandeburg44033fa2014-04-23 04:50:15 +00006898 i40e_send_version(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006899
6900end_core_reset:
Anjali Singhai Jaina316f652014-07-12 07:28:25 +00006901 clear_bit(__I40E_RESET_FAILED, &pf->state);
6902clear_recovery:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006903 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6904}
6905
6906/**
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006907 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006908 * @pf: board private structure
6909 *
6910 * Close up the VFs and other things in prep for a Core Reset,
6911 * then get ready to rebuild the world.
6912 **/
6913static void i40e_handle_reset_warning(struct i40e_pf *pf)
6914{
Shannon Nelson23cfbe02014-06-04 01:23:14 +00006915 i40e_prep_for_reset(pf);
6916 i40e_reset_and_rebuild(pf, false);
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00006917}
6918
6919/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006920 * i40e_handle_mdd_event
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006921 * @pf: pointer to the PF structure
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006922 *
6923 * Called from the MDD irq handler to identify possibly malicious vfs
6924 **/
6925static void i40e_handle_mdd_event(struct i40e_pf *pf)
6926{
6927 struct i40e_hw *hw = &pf->hw;
6928 bool mdd_detected = false;
Neerav Parikhdf430b12014-06-04 01:23:15 +00006929 bool pf_mdd_detected = false;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006930 struct i40e_vf *vf;
6931 u32 reg;
6932 int i;
6933
6934 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6935 return;
6936
6937 /* find what triggered the MDD event */
6938 reg = rd32(hw, I40E_GL_MDET_TX);
6939 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
Anjali Singhai Jain4c33f832014-06-05 00:18:21 +00006940 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6941 I40E_GL_MDET_TX_PF_NUM_SHIFT;
Mitch Williams2089ad02014-10-17 03:14:53 +00006942 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
Anjali Singhai Jain4c33f832014-06-05 00:18:21 +00006943 I40E_GL_MDET_TX_VF_NUM_SHIFT;
Dan Carpenter013f6572014-10-22 20:06:29 -07006944 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
Anjali Singhai Jain4c33f832014-06-05 00:18:21 +00006945 I40E_GL_MDET_TX_EVENT_SHIFT;
Mitch Williams2089ad02014-10-17 03:14:53 +00006946 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6947 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6948 pf->hw.func_caps.base_queue;
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00006949 if (netif_msg_tx_err(pf))
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00006950 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00006951 event, queue, pf_num, vf_num);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006952 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6953 mdd_detected = true;
6954 }
6955 reg = rd32(hw, I40E_GL_MDET_RX);
6956 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
Anjali Singhai Jain4c33f832014-06-05 00:18:21 +00006957 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6958 I40E_GL_MDET_RX_FUNCTION_SHIFT;
Dan Carpenter013f6572014-10-22 20:06:29 -07006959 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
Anjali Singhai Jain4c33f832014-06-05 00:18:21 +00006960 I40E_GL_MDET_RX_EVENT_SHIFT;
Mitch Williams2089ad02014-10-17 03:14:53 +00006961 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6962 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6963 pf->hw.func_caps.base_queue;
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00006964 if (netif_msg_rx_err(pf))
6965 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6966 event, queue, func);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006967 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6968 mdd_detected = true;
6969 }
6970
Neerav Parikhdf430b12014-06-04 01:23:15 +00006971 if (mdd_detected) {
6972 reg = rd32(hw, I40E_PF_MDET_TX);
6973 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6974 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00006975 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
Neerav Parikhdf430b12014-06-04 01:23:15 +00006976 pf_mdd_detected = true;
6977 }
6978 reg = rd32(hw, I40E_PF_MDET_RX);
6979 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6980 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00006981 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
Neerav Parikhdf430b12014-06-04 01:23:15 +00006982 pf_mdd_detected = true;
6983 }
6984 /* Queue belongs to the PF, initiate a reset */
6985 if (pf_mdd_detected) {
6986 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6987 i40e_service_event_schedule(pf);
6988 }
6989 }
6990
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00006991 /* see if one of the VFs needs its hand slapped */
6992 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6993 vf = &(pf->vf[i]);
6994 reg = rd32(hw, I40E_VP_MDET_TX(i));
6995 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6996 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6997 vf->num_mdd_events++;
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00006998 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6999 i);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007000 }
7001
7002 reg = rd32(hw, I40E_VP_MDET_RX(i));
7003 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7004 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7005 vf->num_mdd_events++;
Jesse Brandeburgfaf32972014-07-12 07:28:21 +00007006 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7007 i);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007008 }
7009
7010 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7011 dev_info(&pf->pdev->dev,
7012 "Too many MDD events on VF %d, disabled\n", i);
7013 dev_info(&pf->pdev->dev,
7014 "Use PF Control I/F to re-enable the VF\n");
7015 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
7016 }
7017 }
7018
7019 /* re-enable mdd interrupt cause */
7020 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
7021 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7022 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7023 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7024 i40e_flush(hw);
7025}
7026
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007027/**
Singhai, Anjali6a899022015-12-14 12:21:18 -08007028 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007029 * @pf: board private structure
7030 **/
Singhai, Anjali6a899022015-12-14 12:21:18 -08007031static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007032{
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007033 struct i40e_hw *hw = &pf->hw;
7034 i40e_status ret;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007035 __be16 port;
7036 int i;
7037
Singhai, Anjali6a899022015-12-14 12:21:18 -08007038 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007039 return;
7040
Singhai, Anjali6a899022015-12-14 12:21:18 -08007041 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007042
7043 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
Singhai, Anjali6a899022015-12-14 12:21:18 -08007044 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7045 pf->pending_udp_bitmap &= ~BIT_ULL(i);
7046 port = pf->udp_ports[i].index;
Shannon Nelsonc22c06c2015-03-31 00:45:04 -07007047 if (port)
Carolyn Wybornyb3f5c7b2016-08-24 11:33:51 -07007048 ret = i40e_aq_add_udp_tunnel(hw, port,
7049 pf->udp_ports[i].type,
7050 NULL, NULL);
Shannon Nelsonc22c06c2015-03-31 00:45:04 -07007051 else
7052 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007053
7054 if (ret) {
Carolyn Wyborny730a8f82016-02-17 16:12:16 -08007055 dev_dbg(&pf->pdev->dev,
7056 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7057 pf->udp_ports[i].type ? "vxlan" : "geneve",
7058 port ? "add" : "delete",
7059 ntohs(port), i,
7060 i40e_stat_str(&pf->hw, ret),
7061 i40e_aq_str(&pf->hw,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04007062 pf->hw.aq.asq_last_status));
Singhai, Anjali6a899022015-12-14 12:21:18 -08007063 pf->udp_ports[i].index = 0;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00007064 }
7065 }
7066 }
7067}
7068
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007069/**
7070 * i40e_service_task - Run the driver's async subtasks
7071 * @work: pointer to work_struct containing our data
7072 **/
7073static void i40e_service_task(struct work_struct *work)
7074{
7075 struct i40e_pf *pf = container_of(work,
7076 struct i40e_pf,
7077 service_task);
7078 unsigned long start_time = jiffies;
7079
Shannon Nelsone57a2fe2014-06-03 23:50:19 +00007080 /* don't bother with service tasks if a reset is in progress */
7081 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7082 i40e_service_event_complete(pf);
7083 return;
7084 }
7085
Kiran Patilb03a8c12015-09-24 18:13:15 -04007086 i40e_detect_recover_hung(pf);
Jesse Brandeburg2818ccd2016-01-13 16:51:38 -08007087 i40e_sync_filters_subtask(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007088 i40e_reset_subtask(pf);
7089 i40e_handle_mdd_event(pf);
7090 i40e_vc_process_vflr_event(pf);
7091 i40e_watchdog_subtask(pf);
7092 i40e_fdir_reinit_subtask(pf);
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007093 i40e_client_subtask(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007094 i40e_sync_filters_subtask(pf);
Singhai, Anjali6a899022015-12-14 12:21:18 -08007095 i40e_sync_udp_filters_subtask(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007096 i40e_clean_adminq_subtask(pf);
7097
7098 i40e_service_event_complete(pf);
7099
7100 /* If the tasks have taken longer than one timer cycle or there
7101 * is more work to be done, reschedule the service task now
7102 * rather than wait for the timer to tick again.
7103 */
7104 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7105 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
7106 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
7107 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
7108 i40e_service_event_schedule(pf);
7109}
7110
7111/**
7112 * i40e_service_timer - timer callback
7113 * @data: pointer to PF struct
7114 **/
7115static void i40e_service_timer(unsigned long data)
7116{
7117 struct i40e_pf *pf = (struct i40e_pf *)data;
7118
7119 mod_timer(&pf->service_timer,
7120 round_jiffies(jiffies + pf->service_timer_period));
7121 i40e_service_event_schedule(pf);
7122}
7123
7124/**
7125 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7126 * @vsi: the VSI being configured
7127 **/
7128static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7129{
7130 struct i40e_pf *pf = vsi->back;
7131
7132 switch (vsi->type) {
7133 case I40E_VSI_MAIN:
7134 vsi->alloc_queue_pairs = pf->num_lan_qps;
7135 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7136 I40E_REQ_DESCRIPTOR_MULTIPLE);
7137 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7138 vsi->num_q_vectors = pf->num_lan_msix;
7139 else
7140 vsi->num_q_vectors = 1;
7141
7142 break;
7143
7144 case I40E_VSI_FDIR:
7145 vsi->alloc_queue_pairs = 1;
7146 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7147 I40E_REQ_DESCRIPTOR_MULTIPLE);
Tushar Davea70e4072016-05-16 12:40:53 -07007148 vsi->num_q_vectors = pf->num_fdsb_msix;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007149 break;
7150
7151 case I40E_VSI_VMDQ2:
7152 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7153 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7154 I40E_REQ_DESCRIPTOR_MULTIPLE);
7155 vsi->num_q_vectors = pf->num_vmdq_msix;
7156 break;
7157
7158 case I40E_VSI_SRIOV:
7159 vsi->alloc_queue_pairs = pf->num_vf_qps;
7160 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7161 I40E_REQ_DESCRIPTOR_MULTIPLE);
7162 break;
7163
Vasu Dev38e00432014-08-01 13:27:03 -07007164#ifdef I40E_FCOE
7165 case I40E_VSI_FCOE:
7166 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
7167 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7168 I40E_REQ_DESCRIPTOR_MULTIPLE);
7169 vsi->num_q_vectors = pf->num_fcoe_msix;
7170 break;
7171
7172#endif /* I40E_FCOE */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007173 default:
7174 WARN_ON(1);
7175 return -ENODATA;
7176 }
7177
7178 return 0;
7179}
7180
7181/**
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007182 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7183 * @type: VSI pointer
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007184 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007185 *
7186 * On error: returns error code (negative)
7187 * On success: returns 0
7188 **/
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007189static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007190{
7191 int size;
7192 int ret = 0;
7193
Shannon Nelsonac6c5e32013-11-20 10:02:57 +00007194 /* allocate memory for both Tx and Rx ring pointers */
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007195 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
7196 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7197 if (!vsi->tx_rings)
7198 return -ENOMEM;
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007199 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
7200
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007201 if (alloc_qvectors) {
7202 /* allocate memory for q_vector pointers */
Julia Lawallf57e4fb2014-07-30 03:11:09 +00007203 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007204 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7205 if (!vsi->q_vectors) {
7206 ret = -ENOMEM;
7207 goto err_vectors;
7208 }
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007209 }
7210 return ret;
7211
7212err_vectors:
7213 kfree(vsi->tx_rings);
7214 return ret;
7215}
7216
7217/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007218 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7219 * @pf: board private structure
7220 * @type: type of VSI
7221 *
7222 * On error: returns error code (negative)
7223 * On success: returns vsi index in PF (positive)
7224 **/
7225static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7226{
7227 int ret = -ENODEV;
7228 struct i40e_vsi *vsi;
7229 int vsi_idx;
7230 int i;
7231
7232 /* Need to protect the allocation of the VSIs at the PF level */
7233 mutex_lock(&pf->switch_mutex);
7234
7235 /* VSI list may be fragmented if VSI creation/destruction has
7236 * been happening. We can afford to do a quick scan to look
7237 * for any free VSIs in the list.
7238 *
7239 * find next empty vsi slot, looping back around if necessary
7240 */
7241 i = pf->next_vsi;
Mitch Williams505682c2014-05-20 08:01:37 +00007242 while (i < pf->num_alloc_vsi && pf->vsi[i])
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007243 i++;
Mitch Williams505682c2014-05-20 08:01:37 +00007244 if (i >= pf->num_alloc_vsi) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007245 i = 0;
7246 while (i < pf->next_vsi && pf->vsi[i])
7247 i++;
7248 }
7249
Mitch Williams505682c2014-05-20 08:01:37 +00007250 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007251 vsi_idx = i; /* Found one! */
7252 } else {
7253 ret = -ENODEV;
Alexander Duyck493fb302013-09-28 07:01:44 +00007254 goto unlock_pf; /* out of VSI slots! */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007255 }
7256 pf->next_vsi = ++i;
7257
7258 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7259 if (!vsi) {
7260 ret = -ENOMEM;
Alexander Duyck493fb302013-09-28 07:01:44 +00007261 goto unlock_pf;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007262 }
7263 vsi->type = type;
7264 vsi->back = pf;
7265 set_bit(__I40E_DOWN, &vsi->state);
7266 vsi->flags = 0;
7267 vsi->idx = vsi_idx;
Jesse Brandeburgac26fc12015-09-28 14:12:37 -04007268 vsi->int_rate_limit = 0;
Anjali Singhai Jain5db4cb52015-02-24 06:58:49 +00007269 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7270 pf->rss_table_size : 64;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007271 vsi->netdev_registered = false;
7272 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7273 INIT_LIST_HEAD(&vsi->mac_filter_list);
Shannon Nelson63741842014-04-23 04:50:16 +00007274 vsi->irqs_ready = false;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007275
Alexander Duyck9f65e152013-09-28 06:00:58 +00007276 ret = i40e_set_num_rings_in_vsi(vsi);
7277 if (ret)
7278 goto err_rings;
7279
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007280 ret = i40e_vsi_alloc_arrays(vsi, true);
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007281 if (ret)
Alexander Duyck9f65e152013-09-28 06:00:58 +00007282 goto err_rings;
Alexander Duyck493fb302013-09-28 07:01:44 +00007283
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007284 /* Setup default MSIX irq handler for VSI */
7285 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7286
Kiran Patil21659032015-09-30 14:09:03 -04007287 /* Initialize VSI lock */
7288 spin_lock_init(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007289 pf->vsi[vsi_idx] = vsi;
7290 ret = vsi_idx;
Alexander Duyck493fb302013-09-28 07:01:44 +00007291 goto unlock_pf;
7292
Alexander Duyck9f65e152013-09-28 06:00:58 +00007293err_rings:
Alexander Duyck493fb302013-09-28 07:01:44 +00007294 pf->next_vsi = i - 1;
7295 kfree(vsi);
7296unlock_pf:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007297 mutex_unlock(&pf->switch_mutex);
7298 return ret;
7299}
7300
7301/**
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007302 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7303 * @type: VSI pointer
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007304 * @free_qvectors: a bool to specify if q_vectors need to be freed.
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007305 *
7306 * On error: returns error code (negative)
7307 * On success: returns 0
7308 **/
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007309static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007310{
7311 /* free the ring and vector containers */
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007312 if (free_qvectors) {
7313 kfree(vsi->q_vectors);
7314 vsi->q_vectors = NULL;
7315 }
Anjali Singhai Jainf650a382013-11-20 10:02:55 +00007316 kfree(vsi->tx_rings);
7317 vsi->tx_rings = NULL;
7318 vsi->rx_rings = NULL;
7319}
7320
7321/**
Helin Zhang28c58692015-10-26 19:44:27 -04007322 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7323 * and lookup table
7324 * @vsi: Pointer to VSI structure
7325 */
7326static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7327{
7328 if (!vsi)
7329 return;
7330
7331 kfree(vsi->rss_hkey_user);
7332 vsi->rss_hkey_user = NULL;
7333
7334 kfree(vsi->rss_lut_user);
7335 vsi->rss_lut_user = NULL;
7336}
7337
7338/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007339 * i40e_vsi_clear - Deallocate the VSI provided
7340 * @vsi: the VSI being un-configured
7341 **/
7342static int i40e_vsi_clear(struct i40e_vsi *vsi)
7343{
7344 struct i40e_pf *pf;
7345
7346 if (!vsi)
7347 return 0;
7348
7349 if (!vsi->back)
7350 goto free_vsi;
7351 pf = vsi->back;
7352
7353 mutex_lock(&pf->switch_mutex);
7354 if (!pf->vsi[vsi->idx]) {
7355 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7356 vsi->idx, vsi->idx, vsi, vsi->type);
7357 goto unlock_vsi;
7358 }
7359
7360 if (pf->vsi[vsi->idx] != vsi) {
7361 dev_err(&pf->pdev->dev,
7362 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7363 pf->vsi[vsi->idx]->idx,
7364 pf->vsi[vsi->idx],
7365 pf->vsi[vsi->idx]->type,
7366 vsi->idx, vsi, vsi->type);
7367 goto unlock_vsi;
7368 }
7369
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00007370 /* updates the PF for this cleared vsi */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007371 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7372 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7373
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00007374 i40e_vsi_free_arrays(vsi, true);
Helin Zhang28c58692015-10-26 19:44:27 -04007375 i40e_clear_rss_config_user(vsi);
Alexander Duyck493fb302013-09-28 07:01:44 +00007376
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007377 pf->vsi[vsi->idx] = NULL;
7378 if (vsi->idx < pf->next_vsi)
7379 pf->next_vsi = vsi->idx;
7380
7381unlock_vsi:
7382 mutex_unlock(&pf->switch_mutex);
7383free_vsi:
7384 kfree(vsi);
7385
7386 return 0;
7387}
7388
7389/**
Alexander Duyck9f65e152013-09-28 06:00:58 +00007390 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7391 * @vsi: the VSI being cleaned
7392 **/
Shannon Nelsonbe1d5ee2013-11-28 06:39:23 +00007393static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
Alexander Duyck9f65e152013-09-28 06:00:58 +00007394{
7395 int i;
7396
Greg Rose8e9dca52013-12-18 13:45:53 +00007397 if (vsi->tx_rings && vsi->tx_rings[0]) {
Neerav Parikhd7397642013-11-28 06:39:37 +00007398 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
Mitch Williams00403f02013-09-28 07:13:13 +00007399 kfree_rcu(vsi->tx_rings[i], rcu);
7400 vsi->tx_rings[i] = NULL;
7401 vsi->rx_rings[i] = NULL;
7402 }
Shannon Nelsonbe1d5ee2013-11-28 06:39:23 +00007403 }
Alexander Duyck9f65e152013-09-28 06:00:58 +00007404}
7405
7406/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007407 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7408 * @vsi: the VSI being configured
7409 **/
7410static int i40e_alloc_rings(struct i40e_vsi *vsi)
7411{
Akeem G Abodunrine7046ee2014-04-09 05:58:58 +00007412 struct i40e_ring *tx_ring, *rx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007413 struct i40e_pf *pf = vsi->back;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007414 int i;
7415
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007416 /* Set basic values in the rings to be used later during open() */
Neerav Parikhd7397642013-11-28 06:39:37 +00007417 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
Shannon Nelsonac6c5e32013-11-20 10:02:57 +00007418 /* allocate space for both Tx and Rx in one shot */
Alexander Duyck9f65e152013-09-28 06:00:58 +00007419 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7420 if (!tx_ring)
7421 goto err_out;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007422
7423 tx_ring->queue_index = i;
7424 tx_ring->reg_idx = vsi->base_queue + i;
7425 tx_ring->ring_active = false;
7426 tx_ring->vsi = vsi;
7427 tx_ring->netdev = vsi->netdev;
7428 tx_ring->dev = &pf->pdev->dev;
7429 tx_ring->count = vsi->num_desc;
7430 tx_ring->size = 0;
7431 tx_ring->dcb_tc = 0;
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04007432 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7433 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
Kan Lianga75e8002016-02-19 09:24:04 -05007434 tx_ring->tx_itr_setting = pf->tx_itr_default;
Alexander Duyck9f65e152013-09-28 06:00:58 +00007435 vsi->tx_rings[i] = tx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007436
Alexander Duyck9f65e152013-09-28 06:00:58 +00007437 rx_ring = &tx_ring[1];
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007438 rx_ring->queue_index = i;
7439 rx_ring->reg_idx = vsi->base_queue + i;
7440 rx_ring->ring_active = false;
7441 rx_ring->vsi = vsi;
7442 rx_ring->netdev = vsi->netdev;
7443 rx_ring->dev = &pf->pdev->dev;
7444 rx_ring->count = vsi->num_desc;
7445 rx_ring->size = 0;
7446 rx_ring->dcb_tc = 0;
Kan Lianga75e8002016-02-19 09:24:04 -05007447 rx_ring->rx_itr_setting = pf->rx_itr_default;
Alexander Duyck9f65e152013-09-28 06:00:58 +00007448 vsi->rx_rings[i] = rx_ring;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007449 }
7450
7451 return 0;
Alexander Duyck9f65e152013-09-28 06:00:58 +00007452
7453err_out:
7454 i40e_vsi_clear_rings(vsi);
7455 return -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007456}
7457
7458/**
7459 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7460 * @pf: board private structure
7461 * @vectors: the number of MSI-X vectors to request
7462 *
7463 * Returns the number of vectors reserved, or error
7464 **/
7465static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7466{
Alexander Gordeev7b37f372014-02-18 11:11:42 +01007467 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7468 I40E_MIN_MSIX, vectors);
7469 if (vectors < 0) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007470 dev_info(&pf->pdev->dev,
Alexander Gordeev7b37f372014-02-18 11:11:42 +01007471 "MSI-X vector reservation failed: %d\n", vectors);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007472 vectors = 0;
7473 }
7474
7475 return vectors;
7476}
7477
7478/**
7479 * i40e_init_msix - Setup the MSIX capability
7480 * @pf: board private structure
7481 *
7482 * Work with the OS to set up the MSIX vectors needed.
7483 *
Shannon Nelson3b444392015-02-26 16:15:57 +00007484 * Returns the number of vectors reserved or negative on failure
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007485 **/
7486static int i40e_init_msix(struct i40e_pf *pf)
7487{
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007488 struct i40e_hw *hw = &pf->hw;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007489 int vectors_left;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007490 int v_budget, i;
Shannon Nelson3b444392015-02-26 16:15:57 +00007491 int v_actual;
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007492 int iwarp_requested = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007493
7494 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7495 return -ENODEV;
7496
7497 /* The number of vectors we'll request will be comprised of:
7498 * - Add 1 for "other" cause for Admin Queue events, etc.
7499 * - The number of LAN queue pairs
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00007500 * - Queues being used for RSS.
7501 * We don't need as many as max_rss_size vectors.
7502 * use rss_size instead in the calculation since that
7503 * is governed by number of cpus in the system.
7504 * - assumes symmetric Tx/Rx pairing
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007505 * - The number of VMDq pairs
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007506 * - The CPU count within the NUMA node if iWARP is enabled
Vasu Dev38e00432014-08-01 13:27:03 -07007507#ifdef I40E_FCOE
7508 * - The number of FCOE qps.
7509#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007510 * Once we count this up, try the request.
7511 *
7512 * If we can't get what we want, we'll simplify to nearly nothing
7513 * and try again. If that still fails, we punt.
7514 */
Shannon Nelson1e200e42015-02-27 09:15:24 +00007515 vectors_left = hw->func_caps.num_msix_vectors;
7516 v_budget = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007517
Shannon Nelson1e200e42015-02-27 09:15:24 +00007518 /* reserve one vector for miscellaneous handler */
7519 if (vectors_left) {
7520 v_budget++;
7521 vectors_left--;
7522 }
7523
7524 /* reserve vectors for the main PF traffic queues */
7525 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7526 vectors_left -= pf->num_lan_msix;
7527 v_budget += pf->num_lan_msix;
7528
7529 /* reserve one vector for sideband flow director */
7530 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7531 if (vectors_left) {
Tushar Davea70e4072016-05-16 12:40:53 -07007532 pf->num_fdsb_msix = 1;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007533 v_budget++;
7534 vectors_left--;
7535 } else {
Tushar Davea70e4072016-05-16 12:40:53 -07007536 pf->num_fdsb_msix = 0;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007537 }
7538 }
John W Linville83840e42015-01-14 03:06:28 +00007539
Vasu Dev38e00432014-08-01 13:27:03 -07007540#ifdef I40E_FCOE
Shannon Nelson1e200e42015-02-27 09:15:24 +00007541 /* can we reserve enough for FCoE? */
Vasu Dev38e00432014-08-01 13:27:03 -07007542 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
Shannon Nelson1e200e42015-02-27 09:15:24 +00007543 if (!vectors_left)
7544 pf->num_fcoe_msix = 0;
7545 else if (vectors_left >= pf->num_fcoe_qps)
7546 pf->num_fcoe_msix = pf->num_fcoe_qps;
7547 else
7548 pf->num_fcoe_msix = 1;
Vasu Dev38e00432014-08-01 13:27:03 -07007549 v_budget += pf->num_fcoe_msix;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007550 vectors_left -= pf->num_fcoe_msix;
Vasu Dev38e00432014-08-01 13:27:03 -07007551 }
Shannon Nelson1e200e42015-02-27 09:15:24 +00007552
Vasu Dev38e00432014-08-01 13:27:03 -07007553#endif
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007554 /* can we reserve enough for iWARP? */
7555 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007556 iwarp_requested = pf->num_iwarp_msix;
7557
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007558 if (!vectors_left)
7559 pf->num_iwarp_msix = 0;
7560 else if (vectors_left < pf->num_iwarp_msix)
7561 pf->num_iwarp_msix = 1;
7562 v_budget += pf->num_iwarp_msix;
7563 vectors_left -= pf->num_iwarp_msix;
7564 }
7565
Shannon Nelson1e200e42015-02-27 09:15:24 +00007566 /* any vectors left over go for VMDq support */
7567 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7568 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7569 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7570
Stefan Assmann9ca57e92016-09-19 13:37:49 +02007571 if (!vectors_left) {
7572 pf->num_vmdq_msix = 0;
7573 pf->num_vmdq_qps = 0;
7574 } else {
7575 /* if we're short on vectors for what's desired, we limit
7576 * the queues per vmdq. If this is still more than are
7577 * available, the user will need to change the number of
7578 * queues/vectors used by the PF later with the ethtool
7579 * channels command
7580 */
7581 if (vmdq_vecs < vmdq_vecs_wanted)
7582 pf->num_vmdq_qps = 1;
7583 pf->num_vmdq_msix = pf->num_vmdq_qps;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007584
Stefan Assmann9ca57e92016-09-19 13:37:49 +02007585 v_budget += vmdq_vecs;
7586 vectors_left -= vmdq_vecs;
7587 }
Shannon Nelson1e200e42015-02-27 09:15:24 +00007588 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007589
7590 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7591 GFP_KERNEL);
7592 if (!pf->msix_entries)
7593 return -ENOMEM;
7594
7595 for (i = 0; i < v_budget; i++)
7596 pf->msix_entries[i].entry = i;
Shannon Nelson3b444392015-02-26 16:15:57 +00007597 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
Anjali Singhai Jaina34977b2014-05-21 23:32:43 +00007598
Shannon Nelson3b444392015-02-26 16:15:57 +00007599 if (v_actual < I40E_MIN_MSIX) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007600 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7601 kfree(pf->msix_entries);
7602 pf->msix_entries = NULL;
Guilherme G Piccoli4c95aa52016-09-22 10:03:58 -03007603 pci_disable_msix(pf->pdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007604 return -ENODEV;
7605
Shannon Nelson3b444392015-02-26 16:15:57 +00007606 } else if (v_actual == I40E_MIN_MSIX) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007607 /* Adjust for minimal MSIX use */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007608 pf->num_vmdq_vsis = 0;
7609 pf->num_vmdq_qps = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007610 pf->num_lan_qps = 1;
7611 pf->num_lan_msix = 1;
7612
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007613 } else if (!vectors_left) {
7614 /* If we have limited resources, we will start with no vectors
7615 * for the special features and then allocate vectors to some
7616 * of these features based on the policy and at the end disable
7617 * the features that did not get any vectors.
7618 */
Shannon Nelson3b444392015-02-26 16:15:57 +00007619 int vec;
7620
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007621 dev_info(&pf->pdev->dev,
7622 "MSI-X vector limit reached, attempting to redistribute vectors\n");
Anjali Singhai Jaina34977b2014-05-21 23:32:43 +00007623 /* reserve the misc vector */
Shannon Nelson3b444392015-02-26 16:15:57 +00007624 vec = v_actual - 1;
Anjali Singhai Jaina34977b2014-05-21 23:32:43 +00007625
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007626 /* Scale vector usage down */
7627 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
Anjali Singhai Jaina34977b2014-05-21 23:32:43 +00007628 pf->num_vmdq_vsis = 1;
Shannon Nelson1e200e42015-02-27 09:15:24 +00007629 pf->num_vmdq_qps = 1;
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007630#ifdef I40E_FCOE
7631 pf->num_fcoe_qps = 0;
7632 pf->num_fcoe_msix = 0;
7633#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007634
7635 /* partition out the remaining vectors */
7636 switch (vec) {
7637 case 2:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007638 pf->num_lan_msix = 1;
7639 break;
7640 case 3:
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007641 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7642 pf->num_lan_msix = 1;
7643 pf->num_iwarp_msix = 1;
7644 } else {
7645 pf->num_lan_msix = 2;
7646 }
Vasu Dev38e00432014-08-01 13:27:03 -07007647#ifdef I40E_FCOE
7648 /* give one vector to FCoE */
7649 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7650 pf->num_lan_msix = 1;
7651 pf->num_fcoe_msix = 1;
7652 }
Vasu Dev38e00432014-08-01 13:27:03 -07007653#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007654 break;
7655 default:
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007656 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7657 pf->num_iwarp_msix = min_t(int, (vec / 3),
7658 iwarp_requested);
7659 pf->num_vmdq_vsis = min_t(int, (vec / 3),
7660 I40E_DEFAULT_NUM_VMDQ_VSI);
7661 } else {
7662 pf->num_vmdq_vsis = min_t(int, (vec / 2),
7663 I40E_DEFAULT_NUM_VMDQ_VSI);
7664 }
Stefan Assmannabd97a92016-09-19 13:37:51 +02007665 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7666 pf->num_fdsb_msix = 1;
7667 vec--;
7668 }
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007669 pf->num_lan_msix = min_t(int,
7670 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
7671 pf->num_lan_msix);
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007672 pf->num_lan_qps = pf->num_lan_msix;
Vasu Dev38e00432014-08-01 13:27:03 -07007673#ifdef I40E_FCOE
7674 /* give one vector to FCoE */
7675 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7676 pf->num_fcoe_msix = 1;
7677 vec--;
7678 }
7679#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007680 break;
7681 }
7682 }
7683
Stefan Assmannabd97a92016-09-19 13:37:51 +02007684 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
7685 (pf->num_fdsb_msix == 0)) {
7686 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
7687 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7688 }
Anjali Singhai Jaina34977b2014-05-21 23:32:43 +00007689 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7690 (pf->num_vmdq_msix == 0)) {
7691 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7692 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7693 }
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007694
7695 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
7696 (pf->num_iwarp_msix == 0)) {
7697 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
7698 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
7699 }
Vasu Dev38e00432014-08-01 13:27:03 -07007700#ifdef I40E_FCOE
7701
7702 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7703 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7704 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7705 }
7706#endif
Stefan Assmann4ce20ab2016-09-19 13:37:50 +02007707 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
7708 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
7709 pf->num_lan_msix,
7710 pf->num_vmdq_msix * pf->num_vmdq_vsis,
7711 pf->num_fdsb_msix,
7712 pf->num_iwarp_msix);
7713
Shannon Nelson3b444392015-02-26 16:15:57 +00007714 return v_actual;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007715}
7716
7717/**
Greg Rose90e04072014-03-06 08:59:57 +00007718 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
Alexander Duyck493fb302013-09-28 07:01:44 +00007719 * @vsi: the VSI being configured
7720 * @v_idx: index of the vector in the vsi struct
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007721 * @cpu: cpu to be used on affinity_mask
Alexander Duyck493fb302013-09-28 07:01:44 +00007722 *
7723 * We allocate one q_vector. If allocation fails we return -ENOMEM.
7724 **/
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007725static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
Alexander Duyck493fb302013-09-28 07:01:44 +00007726{
7727 struct i40e_q_vector *q_vector;
7728
7729 /* allocate q_vector */
7730 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7731 if (!q_vector)
7732 return -ENOMEM;
7733
7734 q_vector->vsi = vsi;
7735 q_vector->v_idx = v_idx;
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007736 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
7737
Alexander Duyck493fb302013-09-28 07:01:44 +00007738 if (vsi->netdev)
7739 netif_napi_add(vsi->netdev, &q_vector->napi,
Jesse Brandeburgeefeace2014-05-10 04:49:13 +00007740 i40e_napi_poll, NAPI_POLL_WEIGHT);
Alexander Duyck493fb302013-09-28 07:01:44 +00007741
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00007742 q_vector->rx.latency_range = I40E_LOW_LATENCY;
7743 q_vector->tx.latency_range = I40E_LOW_LATENCY;
7744
Alexander Duyck493fb302013-09-28 07:01:44 +00007745 /* tie q_vector and vsi together */
7746 vsi->q_vectors[v_idx] = q_vector;
7747
7748 return 0;
7749}
7750
7751/**
Greg Rose90e04072014-03-06 08:59:57 +00007752 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007753 * @vsi: the VSI being configured
7754 *
7755 * We allocate one q_vector per queue interrupt. If allocation fails we
7756 * return -ENOMEM.
7757 **/
Greg Rose90e04072014-03-06 08:59:57 +00007758static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007759{
7760 struct i40e_pf *pf = vsi->back;
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007761 int err, v_idx, num_q_vectors, current_cpu;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007762
7763 /* if not MSIX, give the one vector only to the LAN VSI */
7764 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7765 num_q_vectors = vsi->num_q_vectors;
7766 else if (vsi == pf->vsi[pf->lan_vsi])
7767 num_q_vectors = 1;
7768 else
7769 return -EINVAL;
7770
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007771 current_cpu = cpumask_first(cpu_online_mask);
7772
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007773 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007774 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
Alexander Duyck493fb302013-09-28 07:01:44 +00007775 if (err)
7776 goto err_out;
Guilherme G. Piccoli7f6c5532016-06-27 12:16:43 -03007777 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
7778 if (unlikely(current_cpu >= nr_cpu_ids))
7779 current_cpu = cpumask_first(cpu_online_mask);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007780 }
7781
7782 return 0;
Alexander Duyck493fb302013-09-28 07:01:44 +00007783
7784err_out:
7785 while (v_idx--)
7786 i40e_free_q_vector(vsi, v_idx);
7787
7788 return err;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007789}
7790
7791/**
7792 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7793 * @pf: board private structure to initialize
7794 **/
Jesse Brandeburgc11472802015-04-07 19:45:39 -04007795static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007796{
Shannon Nelson3b444392015-02-26 16:15:57 +00007797 int vectors = 0;
7798 ssize_t size;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007799
7800 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
Shannon Nelson3b444392015-02-26 16:15:57 +00007801 vectors = i40e_init_msix(pf);
7802 if (vectors < 0) {
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08007803 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06007804 I40E_FLAG_IWARP_ENABLED |
Vasu Dev38e00432014-08-01 13:27:03 -07007805#ifdef I40E_FCOE
7806 I40E_FLAG_FCOE_ENABLED |
7807#endif
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08007808 I40E_FLAG_RSS_ENABLED |
Neerav Parikh4d9b6042014-05-22 06:31:51 +00007809 I40E_FLAG_DCB_CAPABLE |
Dave Ertmana0362442016-08-29 17:38:26 -07007810 I40E_FLAG_DCB_ENABLED |
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08007811 I40E_FLAG_SRIOV_ENABLED |
7812 I40E_FLAG_FD_SB_ENABLED |
7813 I40E_FLAG_FD_ATR_ENABLED |
7814 I40E_FLAG_VMDQ_ENABLED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007815
7816 /* rework the queue expectations without MSIX */
7817 i40e_determine_queue_usage(pf);
7818 }
7819 }
7820
7821 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7822 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
Catherine Sullivan77fa28b2014-02-20 19:29:17 -08007823 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
Shannon Nelson3b444392015-02-26 16:15:57 +00007824 vectors = pci_enable_msi(pf->pdev);
7825 if (vectors < 0) {
7826 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7827 vectors);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007828 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7829 }
Shannon Nelson3b444392015-02-26 16:15:57 +00007830 vectors = 1; /* one MSI or Legacy vector */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007831 }
7832
Shannon Nelson958a3e32013-09-28 07:13:28 +00007833 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
Catherine Sullivan77fa28b2014-02-20 19:29:17 -08007834 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
Shannon Nelson958a3e32013-09-28 07:13:28 +00007835
Shannon Nelson3b444392015-02-26 16:15:57 +00007836 /* set up vector assignment tracking */
7837 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7838 pf->irq_pile = kzalloc(size, GFP_KERNEL);
Jesse Brandeburgc11472802015-04-07 19:45:39 -04007839 if (!pf->irq_pile) {
7840 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7841 return -ENOMEM;
7842 }
Shannon Nelson3b444392015-02-26 16:15:57 +00007843 pf->irq_pile->num_entries = vectors;
7844 pf->irq_pile->search_hint = 0;
7845
Jesse Brandeburgc11472802015-04-07 19:45:39 -04007846 /* track first vector for misc interrupts, ignore return */
Shannon Nelson3b444392015-02-26 16:15:57 +00007847 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
Jesse Brandeburgc11472802015-04-07 19:45:39 -04007848
7849 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007850}
7851
7852/**
7853 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7854 * @pf: board private structure
7855 *
7856 * This sets up the handler for MSIX 0, which is used to manage the
7857 * non-queue interrupts, e.g. AdminQ and errors. This is not used
7858 * when in MSI or Legacy interrupt mode.
7859 **/
7860static int i40e_setup_misc_vector(struct i40e_pf *pf)
7861{
7862 struct i40e_hw *hw = &pf->hw;
7863 int err = 0;
7864
7865 /* Only request the irq if this is the first time through, and
7866 * not when we're rebuilding after a Reset
7867 */
7868 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7869 err = request_irq(pf->msix_entries[0].vector,
Carolyn Wybornyb294ac72014-12-11 07:06:39 +00007870 i40e_intr, 0, pf->int_name, pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007871 if (err) {
7872 dev_info(&pf->pdev->dev,
Catherine Sullivan77fa28b2014-02-20 19:29:17 -08007873 "request_irq for %s failed: %d\n",
Carolyn Wybornyb294ac72014-12-11 07:06:39 +00007874 pf->int_name, err);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007875 return -EFAULT;
7876 }
7877 }
7878
Jacob Kellerab437b52014-12-14 01:55:08 +00007879 i40e_enable_misc_int_causes(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007880
7881 /* associate no queues to the misc vector */
7882 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7883 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7884
7885 i40e_flush(hw);
7886
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08007887 i40e_irq_dynamic_enable_icr0(pf, true);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00007888
7889 return err;
7890}
7891
7892/**
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007893 * i40e_config_rss_aq - Prepare for RSS using AQ commands
7894 * @vsi: vsi structure
7895 * @seed: RSS hash seed
7896 **/
Helin Zhange69ff812015-10-21 19:56:22 -04007897static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
7898 u8 *lut, u16 lut_size)
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007899{
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007900 struct i40e_pf *pf = vsi->back;
7901 struct i40e_hw *hw = &pf->hw;
Jacob Keller776b2e12016-07-19 16:23:30 -07007902 int ret = 0;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007903
Jacob Keller776b2e12016-07-19 16:23:30 -07007904 if (seed) {
7905 struct i40e_aqc_get_set_rss_key_data *seed_dw =
7906 (struct i40e_aqc_get_set_rss_key_data *)seed;
7907 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
7908 if (ret) {
7909 dev_info(&pf->pdev->dev,
7910 "Cannot set RSS key, err %s aq_err %s\n",
7911 i40e_stat_str(hw, ret),
7912 i40e_aq_str(hw, hw->aq.asq_last_status));
7913 return ret;
7914 }
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007915 }
Jacob Keller776b2e12016-07-19 16:23:30 -07007916 if (lut) {
7917 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007918
Jacob Keller776b2e12016-07-19 16:23:30 -07007919 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
7920 if (ret) {
7921 dev_info(&pf->pdev->dev,
7922 "Cannot set RSS lut, err %s aq_err %s\n",
7923 i40e_stat_str(hw, ret),
7924 i40e_aq_str(hw, hw->aq.asq_last_status));
7925 return ret;
7926 }
7927 }
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04007928 return ret;
7929}
7930
7931/**
Anjali Singhai Jain95a73782015-12-22 14:25:04 -08007932 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
7933 * @vsi: Pointer to vsi structure
7934 * @seed: Buffter to store the hash keys
7935 * @lut: Buffer to store the lookup table entries
7936 * @lut_size: Size of buffer to store the lookup table entries
7937 *
7938 * Return 0 on success, negative on failure
7939 */
7940static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
7941 u8 *lut, u16 lut_size)
7942{
7943 struct i40e_pf *pf = vsi->back;
7944 struct i40e_hw *hw = &pf->hw;
7945 int ret = 0;
7946
7947 if (seed) {
7948 ret = i40e_aq_get_rss_key(hw, vsi->id,
7949 (struct i40e_aqc_get_set_rss_key_data *)seed);
7950 if (ret) {
7951 dev_info(&pf->pdev->dev,
7952 "Cannot get RSS key, err %s aq_err %s\n",
7953 i40e_stat_str(&pf->hw, ret),
7954 i40e_aq_str(&pf->hw,
7955 pf->hw.aq.asq_last_status));
7956 return ret;
7957 }
7958 }
7959
7960 if (lut) {
7961 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
7962
7963 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
7964 if (ret) {
7965 dev_info(&pf->pdev->dev,
7966 "Cannot get RSS lut, err %s aq_err %s\n",
7967 i40e_stat_str(&pf->hw, ret),
7968 i40e_aq_str(&pf->hw,
7969 pf->hw.aq.asq_last_status));
7970 return ret;
7971 }
7972 }
7973
7974 return ret;
7975}
7976
7977/**
Jacob Keller0582b962016-07-19 16:23:29 -07007978 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
7979 * @vsi: VSI structure
7980 **/
7981static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
7982{
7983 u8 seed[I40E_HKEY_ARRAY_SIZE];
7984 struct i40e_pf *pf = vsi->back;
7985 u8 *lut;
7986 int ret;
7987
7988 if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
7989 return 0;
7990
Jacob Keller552b9962016-07-19 16:23:31 -07007991 if (!vsi->rss_size)
7992 vsi->rss_size = min_t(int, pf->alloc_rss_size,
7993 vsi->num_queue_pairs);
7994 if (!vsi->rss_size)
7995 return -EINVAL;
7996
Jacob Keller0582b962016-07-19 16:23:29 -07007997 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
7998 if (!lut)
7999 return -ENOMEM;
Jacob Keller552b9962016-07-19 16:23:31 -07008000 /* Use the user configured hash keys and lookup table if there is one,
8001 * otherwise use default
8002 */
8003 if (vsi->rss_lut_user)
8004 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8005 else
8006 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8007 if (vsi->rss_hkey_user)
8008 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8009 else
8010 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
Jacob Keller0582b962016-07-19 16:23:29 -07008011 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8012 kfree(lut);
8013
8014 return ret;
8015}
8016
8017/**
Helin Zhang043dd652015-10-21 19:56:23 -04008018 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
Helin Zhange69ff812015-10-21 19:56:22 -04008019 * @vsi: Pointer to vsi structure
8020 * @seed: RSS hash seed
8021 * @lut: Lookup table
8022 * @lut_size: Lookup table size
8023 *
8024 * Returns 0 on success, negative on failure
8025 **/
8026static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8027 const u8 *lut, u16 lut_size)
8028{
8029 struct i40e_pf *pf = vsi->back;
8030 struct i40e_hw *hw = &pf->hw;
Mitch Williamsc4e18682016-04-12 08:30:40 -07008031 u16 vf_id = vsi->vf_id;
Helin Zhange69ff812015-10-21 19:56:22 -04008032 u8 i;
8033
8034 /* Fill out hash function seed */
8035 if (seed) {
8036 u32 *seed_dw = (u32 *)seed;
8037
Mitch Williamsc4e18682016-04-12 08:30:40 -07008038 if (vsi->type == I40E_VSI_MAIN) {
8039 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8040 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
8041 seed_dw[i]);
8042 } else if (vsi->type == I40E_VSI_SRIOV) {
8043 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8044 i40e_write_rx_ctl(hw,
8045 I40E_VFQF_HKEY1(i, vf_id),
8046 seed_dw[i]);
8047 } else {
8048 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8049 }
Helin Zhange69ff812015-10-21 19:56:22 -04008050 }
8051
8052 if (lut) {
8053 u32 *lut_dw = (u32 *)lut;
8054
Mitch Williamsc4e18682016-04-12 08:30:40 -07008055 if (vsi->type == I40E_VSI_MAIN) {
8056 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8057 return -EINVAL;
8058 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8059 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8060 } else if (vsi->type == I40E_VSI_SRIOV) {
8061 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8062 return -EINVAL;
8063 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8064 i40e_write_rx_ctl(hw,
8065 I40E_VFQF_HLUT1(i, vf_id),
8066 lut_dw[i]);
8067 } else {
8068 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8069 }
Helin Zhange69ff812015-10-21 19:56:22 -04008070 }
8071 i40e_flush(hw);
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008072
8073 return 0;
8074}
8075
8076/**
Helin Zhang043dd652015-10-21 19:56:23 -04008077 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
8078 * @vsi: Pointer to VSI structure
8079 * @seed: Buffer to store the keys
8080 * @lut: Buffer to store the lookup table entries
8081 * @lut_size: Size of buffer to store the lookup table entries
8082 *
8083 * Returns 0 on success, negative on failure
8084 */
8085static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8086 u8 *lut, u16 lut_size)
8087{
8088 struct i40e_pf *pf = vsi->back;
8089 struct i40e_hw *hw = &pf->hw;
8090 u16 i;
8091
8092 if (seed) {
8093 u32 *seed_dw = (u32 *)seed;
8094
8095 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
Shannon Nelson272cdaf22016-02-17 16:12:21 -08008096 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
Helin Zhang043dd652015-10-21 19:56:23 -04008097 }
8098 if (lut) {
8099 u32 *lut_dw = (u32 *)lut;
8100
8101 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8102 return -EINVAL;
8103 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8104 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8105 }
8106
8107 return 0;
8108}
8109
8110/**
8111 * i40e_config_rss - Configure RSS keys and lut
8112 * @vsi: Pointer to VSI structure
8113 * @seed: RSS hash seed
8114 * @lut: Lookup table
8115 * @lut_size: Lookup table size
8116 *
8117 * Returns 0 on success, negative on failure
8118 */
8119int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8120{
8121 struct i40e_pf *pf = vsi->back;
8122
8123 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8124 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8125 else
8126 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8127}
8128
8129/**
8130 * i40e_get_rss - Get RSS keys and lut
8131 * @vsi: Pointer to VSI structure
8132 * @seed: Buffer to store the keys
8133 * @lut: Buffer to store the lookup table entries
8134 * lut_size: Size of buffer to store the lookup table entries
8135 *
8136 * Returns 0 on success, negative on failure
8137 */
8138int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8139{
Anjali Singhai Jain95a73782015-12-22 14:25:04 -08008140 struct i40e_pf *pf = vsi->back;
8141
8142 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8143 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8144 else
8145 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
Helin Zhang043dd652015-10-21 19:56:23 -04008146}
8147
8148/**
Helin Zhange69ff812015-10-21 19:56:22 -04008149 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8150 * @pf: Pointer to board private structure
8151 * @lut: Lookup table
8152 * @rss_table_size: Lookup table size
8153 * @rss_size: Range of queue number for hashing
8154 */
Alan Bradyf1582352016-08-24 11:33:46 -07008155void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8156 u16 rss_table_size, u16 rss_size)
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008157{
Helin Zhange69ff812015-10-21 19:56:22 -04008158 u16 i;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008159
Helin Zhange69ff812015-10-21 19:56:22 -04008160 for (i = 0; i < rss_table_size; i++)
8161 lut[i] = i % rss_size;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008162}
8163
8164/**
Helin Zhang043dd652015-10-21 19:56:23 -04008165 * i40e_pf_config_rss - Prepare for RSS if used
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008166 * @pf: board private structure
8167 **/
Helin Zhang043dd652015-10-21 19:56:23 -04008168static int i40e_pf_config_rss(struct i40e_pf *pf)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008169{
Anjali Singhai Jain66ddcff2015-02-24 06:58:50 +00008170 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008171 u8 seed[I40E_HKEY_ARRAY_SIZE];
Helin Zhange69ff812015-10-21 19:56:22 -04008172 u8 *lut;
Anjali Singhai Jain4617e8c2013-11-20 10:02:56 +00008173 struct i40e_hw *hw = &pf->hw;
Carolyn Wybornye157ea32014-06-03 23:50:22 +00008174 u32 reg_val;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008175 u64 hena;
Helin Zhange69ff812015-10-21 19:56:22 -04008176 int ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008177
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008178 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
Shannon Nelson272cdaf22016-02-17 16:12:21 -08008179 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8180 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008181 hena |= i40e_pf_get_default_rss_hena(pf);
8182
Shannon Nelson272cdaf22016-02-17 16:12:21 -08008183 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8184 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008185
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008186 /* Determine the RSS table size based on the hardware capabilities */
Shannon Nelson272cdaf22016-02-17 16:12:21 -08008187 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008188 reg_val = (pf->rss_table_size == 512) ?
8189 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8190 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
Shannon Nelson272cdaf22016-02-17 16:12:21 -08008191 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
Carolyn Wybornye157ea32014-06-03 23:50:22 +00008192
Helin Zhang28c58692015-10-26 19:44:27 -04008193 /* Determine the RSS size of the VSI */
8194 if (!vsi->rss_size)
Helin Zhangacd65442015-10-26 19:44:28 -04008195 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8196 vsi->num_queue_pairs);
Mitch Williamsa4fa59c2016-09-12 14:18:43 -07008197 if (!vsi->rss_size)
8198 return -EINVAL;
Helin Zhang28c58692015-10-26 19:44:27 -04008199
Helin Zhange69ff812015-10-21 19:56:22 -04008200 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8201 if (!lut)
8202 return -ENOMEM;
8203
Helin Zhang28c58692015-10-26 19:44:27 -04008204 /* Use user configured lut if there is one, otherwise use default */
8205 if (vsi->rss_lut_user)
8206 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8207 else
8208 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
Helin Zhange69ff812015-10-21 19:56:22 -04008209
Helin Zhang28c58692015-10-26 19:44:27 -04008210 /* Use user configured hash key if there is one, otherwise
8211 * use default.
8212 */
8213 if (vsi->rss_hkey_user)
8214 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8215 else
8216 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
Helin Zhang043dd652015-10-21 19:56:23 -04008217 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
Helin Zhange69ff812015-10-21 19:56:22 -04008218 kfree(lut);
8219
8220 return ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008221}
8222
8223/**
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008224 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8225 * @pf: board private structure
8226 * @queue_count: the requested queue count for rss.
8227 *
8228 * returns 0 if rss is not enabled, if enabled returns the final rss queue
8229 * count which may be different from the requested queue count.
8230 **/
8231int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8232{
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +00008233 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8234 int new_rss_size;
8235
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008236 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8237 return 0;
8238
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +00008239 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008240
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +00008241 if (queue_count != vsi->num_queue_pairs) {
8242 vsi->req_queue_pairs = queue_count;
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008243 i40e_prep_for_reset(pf);
8244
Helin Zhangacd65442015-10-26 19:44:28 -04008245 pf->alloc_rss_size = new_rss_size;
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008246
8247 i40e_reset_and_rebuild(pf, true);
Helin Zhang28c58692015-10-26 19:44:27 -04008248
8249 /* Discard the user configured hash keys and lut, if less
8250 * queues are enabled.
8251 */
8252 if (queue_count < vsi->rss_size) {
8253 i40e_clear_rss_config_user(vsi);
8254 dev_dbg(&pf->pdev->dev,
8255 "discard user configured hash keys and lut\n");
8256 }
8257
8258 /* Reset vsi->rss_size, as number of enabled queues changed */
Helin Zhangacd65442015-10-26 19:44:28 -04008259 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8260 vsi->num_queue_pairs);
Helin Zhang28c58692015-10-26 19:44:27 -04008261
Helin Zhang043dd652015-10-21 19:56:23 -04008262 i40e_pf_config_rss(pf);
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008263 }
Lihong Yang12815052016-09-27 11:28:48 -07008264 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
8265 vsi->req_queue_pairs, pf->rss_size_max);
Helin Zhangacd65442015-10-26 19:44:28 -04008266 return pf->alloc_rss_size;
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00008267}
8268
8269/**
Greg Rosef4492db2015-02-06 08:52:12 +00008270 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
8271 * @pf: board private structure
8272 **/
8273i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
8274{
8275 i40e_status status;
8276 bool min_valid, max_valid;
8277 u32 max_bw, min_bw;
8278
8279 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8280 &min_valid, &max_valid);
8281
8282 if (!status) {
8283 if (min_valid)
8284 pf->npar_min_bw = min_bw;
8285 if (max_valid)
8286 pf->npar_max_bw = max_bw;
8287 }
8288
8289 return status;
8290}
8291
8292/**
8293 * i40e_set_npar_bw_setting - Set BW settings for this PF partition
8294 * @pf: board private structure
8295 **/
8296i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
8297{
8298 struct i40e_aqc_configure_partition_bw_data bw_data;
8299 i40e_status status;
8300
Jeff Kirsherb40c82e62015-02-27 09:18:34 +00008301 /* Set the valid bit for this PF */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04008302 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
Greg Rosef4492db2015-02-06 08:52:12 +00008303 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
8304 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
8305
8306 /* Set the new bandwidths */
8307 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8308
8309 return status;
8310}
8311
8312/**
8313 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
8314 * @pf: board private structure
8315 **/
8316i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
8317{
8318 /* Commit temporary BW setting to permanent NVM image */
8319 enum i40e_admin_queue_err last_aq_status;
8320 i40e_status ret;
8321 u16 nvm_word;
8322
8323 if (pf->hw.partition_id != 1) {
8324 dev_info(&pf->pdev->dev,
8325 "Commit BW only works on partition 1! This is partition %d",
8326 pf->hw.partition_id);
8327 ret = I40E_NOT_SUPPORTED;
8328 goto bw_commit_out;
8329 }
8330
8331 /* Acquire NVM for read access */
8332 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8333 last_aq_status = pf->hw.aq.asq_last_status;
8334 if (ret) {
8335 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04008336 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8337 i40e_stat_str(&pf->hw, ret),
8338 i40e_aq_str(&pf->hw, last_aq_status));
Greg Rosef4492db2015-02-06 08:52:12 +00008339 goto bw_commit_out;
8340 }
8341
8342 /* Read word 0x10 of NVM - SW compatibility word 1 */
8343 ret = i40e_aq_read_nvm(&pf->hw,
8344 I40E_SR_NVM_CONTROL_WORD,
8345 0x10, sizeof(nvm_word), &nvm_word,
8346 false, NULL);
8347 /* Save off last admin queue command status before releasing
8348 * the NVM
8349 */
8350 last_aq_status = pf->hw.aq.asq_last_status;
8351 i40e_release_nvm(&pf->hw);
8352 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04008353 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8354 i40e_stat_str(&pf->hw, ret),
8355 i40e_aq_str(&pf->hw, last_aq_status));
Greg Rosef4492db2015-02-06 08:52:12 +00008356 goto bw_commit_out;
8357 }
8358
8359 /* Wait a bit for NVM release to complete */
8360 msleep(50);
8361
8362 /* Acquire NVM for write access */
8363 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8364 last_aq_status = pf->hw.aq.asq_last_status;
8365 if (ret) {
8366 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04008367 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8368 i40e_stat_str(&pf->hw, ret),
8369 i40e_aq_str(&pf->hw, last_aq_status));
Greg Rosef4492db2015-02-06 08:52:12 +00008370 goto bw_commit_out;
8371 }
8372 /* Write it back out unchanged to initiate update NVM,
8373 * which will force a write of the shadow (alt) RAM to
8374 * the NVM - thus storing the bandwidth values permanently.
8375 */
8376 ret = i40e_aq_update_nvm(&pf->hw,
8377 I40E_SR_NVM_CONTROL_WORD,
8378 0x10, sizeof(nvm_word),
8379 &nvm_word, true, NULL);
8380 /* Save off last admin queue command status before releasing
8381 * the NVM
8382 */
8383 last_aq_status = pf->hw.aq.asq_last_status;
8384 i40e_release_nvm(&pf->hw);
8385 if (ret)
8386 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04008387 "BW settings NOT SAVED, err %s aq_err %s\n",
8388 i40e_stat_str(&pf->hw, ret),
8389 i40e_aq_str(&pf->hw, last_aq_status));
Greg Rosef4492db2015-02-06 08:52:12 +00008390bw_commit_out:
8391
8392 return ret;
8393}
8394
8395/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008396 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8397 * @pf: board private structure to initialize
8398 *
8399 * i40e_sw_init initializes the Adapter private data structure.
8400 * Fields are initialized based on PCI device information and
8401 * OS network device settings (MTU size).
8402 **/
8403static int i40e_sw_init(struct i40e_pf *pf)
8404{
8405 int err = 0;
8406 int size;
8407
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008408 /* Set default capability flags */
8409 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8410 I40E_FLAG_MSI_ENABLED |
Mitch Williams2bc7ee82015-02-06 08:52:11 +00008411 I40E_FLAG_MSIX_ENABLED;
8412
Mitch Williamsca99eb92014-04-04 04:43:07 +00008413 /* Set default ITR */
8414 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8415 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8416
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00008417 /* Depending on PF configurations, it is possible that the RSS
8418 * maximum might end up larger than the available queues
8419 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04008420 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
Helin Zhangacd65442015-10-26 19:44:28 -04008421 pf->alloc_rss_size = 1;
Anjali Singhai Jain5db4cb52015-02-24 06:58:49 +00008422 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +00008423 pf->rss_size_max = min_t(int, pf->rss_size_max,
8424 pf->hw.func_caps.num_tx_qp);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008425 if (pf->hw.func_caps.rss) {
8426 pf->flags |= I40E_FLAG_RSS_ENABLED;
Helin Zhangacd65442015-10-26 19:44:28 -04008427 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8428 num_online_cpus());
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008429 }
8430
Catherine Sullivan2050bc62013-12-18 13:46:03 +00008431 /* MFP mode enabled */
Pawel Orlowskic78b9532015-04-22 19:34:06 -04008432 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
Catherine Sullivan2050bc62013-12-18 13:46:03 +00008433 pf->flags |= I40E_FLAG_MFP_ENABLED;
8434 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
Greg Rosef4492db2015-02-06 08:52:12 +00008435 if (i40e_get_npar_bw_setting(pf))
8436 dev_warn(&pf->pdev->dev,
8437 "Could not get NPAR bw settings\n");
8438 else
8439 dev_info(&pf->pdev->dev,
8440 "Min BW = %8.8x, Max BW = %8.8x\n",
8441 pf->npar_min_bw, pf->npar_max_bw);
Catherine Sullivan2050bc62013-12-18 13:46:03 +00008442 }
8443
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08008444 /* FW/NVM is not yet fixed in this regard */
8445 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8446 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8447 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8448 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
Shannon Nelson6eae9c62015-09-03 17:18:55 -04008449 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8450 pf->hw.num_partitions > 1)
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08008451 dev_info(&pf->pdev->dev,
Anjali Singhai Jain0b675842014-03-06 08:59:51 +00008452 "Flow Director Sideband mode Disabled in MFP mode\n");
Shannon Nelson6eae9c62015-09-03 17:18:55 -04008453 else
8454 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08008455 pf->fdir_pf_filter_count =
8456 pf->hw.func_caps.fd_filters_guaranteed;
8457 pf->hw.fdir_shared_filter_count =
8458 pf->hw.func_caps.fd_filters_best_effort;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008459 }
8460
Neerav Parikhf1bbad32016-01-13 16:51:39 -08008461 if (i40e_is_mac_710(&pf->hw) &&
Anjali Singhai Jain8eed76f2015-12-09 15:50:31 -08008462 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
Neerav Parikhf1bbad32016-01-13 16:51:39 -08008463 (pf->hw.aq.fw_maj_ver < 4))) {
Anjali Singhai Jain8eed76f2015-12-09 15:50:31 -08008464 pf->flags |= I40E_FLAG_RESTART_AUTONEG;
Neerav Parikhf1bbad32016-01-13 16:51:39 -08008465 /* No DCB support for FW < v4.33 */
8466 pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
8467 }
8468
8469 /* Disable FW LLDP if FW < v4.3 */
8470 if (i40e_is_mac_710(&pf->hw) &&
8471 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
8472 (pf->hw.aq.fw_maj_ver < 4)))
8473 pf->flags |= I40E_FLAG_STOP_FW_LLDP;
8474
8475 /* Use the FW Set LLDP MIB API if FW > v4.40 */
8476 if (i40e_is_mac_710(&pf->hw) &&
8477 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
8478 (pf->hw.aq.fw_maj_ver >= 5)))
8479 pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
Anjali Singhai Jain8eed76f2015-12-09 15:50:31 -08008480
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008481 if (pf->hw.func_caps.vmdq) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008482 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04008483 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
Jesse Brandeburge9e53662015-10-02 17:57:21 -07008484 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008485 }
8486
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06008487 if (pf->hw.func_caps.iwarp) {
8488 pf->flags |= I40E_FLAG_IWARP_ENABLED;
8489 /* IWARP needs one extra vector for CQP just like MISC.*/
8490 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
8491 }
8492
Vasu Dev38e00432014-08-01 13:27:03 -07008493#ifdef I40E_FCOE
Shannon Nelson21364bc2015-08-26 15:14:13 -04008494 i40e_init_pf_fcoe(pf);
Vasu Dev38e00432014-08-01 13:27:03 -07008495
8496#endif /* I40E_FCOE */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008497#ifdef CONFIG_PCI_IOV
Shannon Nelsonba252f132014-12-11 07:06:34 +00008498 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008499 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
8500 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
8501 pf->num_req_vfs = min_t(int,
8502 pf->hw.func_caps.num_vfs,
8503 I40E_MAX_VF_COUNT);
8504 }
8505#endif /* CONFIG_PCI_IOV */
Anjali Singhai Jaind502ce02015-06-05 12:20:26 -04008506 if (pf->hw.mac.type == I40E_MAC_X722) {
8507 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
8508 I40E_FLAG_128_QP_RSS_CAPABLE |
8509 I40E_FLAG_HW_ATR_EVICT_CAPABLE |
8510 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8511 I40E_FLAG_WB_ON_ITR_CAPABLE |
Singhai, Anjali6a899022015-12-14 12:21:18 -08008512 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
Jesse Brandeburg8e2cc0e2016-02-29 11:00:24 -08008513 I40E_FLAG_NO_PCI_LINK_CHECK |
Neerav Parikhf1bbad32016-01-13 16:51:39 -08008514 I40E_FLAG_USE_SET_LLDP_MIB |
Singhai, Anjali6a899022015-12-14 12:21:18 -08008515 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
Anjali Singhaia340c782016-01-06 11:49:28 -08008516 } else if ((pf->hw.aq.api_maj_ver > 1) ||
8517 ((pf->hw.aq.api_maj_ver == 1) &&
8518 (pf->hw.aq.api_min_ver > 4))) {
8519 /* Supported in FW API version higher than 1.4 */
8520 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08008521 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8522 } else {
8523 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
Anjali Singhai Jaind502ce02015-06-05 12:20:26 -04008524 }
Anjali Singhaia340c782016-01-06 11:49:28 -08008525
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008526 pf->eeprom_version = 0xDEAD;
8527 pf->lan_veb = I40E_NO_VEB;
8528 pf->lan_vsi = I40E_NO_VSI;
8529
Anjali Singhai Jaind1a8d272015-07-23 16:54:40 -04008530 /* By default FW has this off for performance reasons */
8531 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
8532
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008533 /* set up queue assignment tracking */
8534 size = sizeof(struct i40e_lump_tracking)
8535 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
8536 pf->qp_pile = kzalloc(size, GFP_KERNEL);
8537 if (!pf->qp_pile) {
8538 err = -ENOMEM;
8539 goto sw_init_done;
8540 }
8541 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8542 pf->qp_pile->search_hint = 0;
8543
Anjali Singhai Jain327fe042014-06-04 01:23:26 +00008544 pf->tx_timeout_recovery_level = 1;
8545
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008546 mutex_init(&pf->switch_mutex);
8547
Greg Rosec668a122015-02-26 16:10:39 +00008548 /* If NPAR is enabled nudge the Tx scheduler */
8549 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8550 i40e_set_npar_bw_setting(pf);
8551
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008552sw_init_done:
8553 return err;
8554}
8555
8556/**
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008557 * i40e_set_ntuple - set the ntuple feature flag and take action
8558 * @pf: board private structure to initialize
8559 * @features: the feature set that the stack is suggesting
8560 *
8561 * returns a bool to indicate if reset needs to happen
8562 **/
8563bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8564{
8565 bool need_reset = false;
8566
8567 /* Check if Flow Director n-tuple support was enabled or disabled. If
8568 * the state changed, we need to reset.
8569 */
8570 if (features & NETIF_F_NTUPLE) {
8571 /* Enable filters and mark for reset */
8572 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8573 need_reset = true;
Tushar Davea70e4072016-05-16 12:40:53 -07008574 /* enable FD_SB only if there is MSI-X vector */
8575 if (pf->num_fdsb_msix > 0)
8576 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008577 } else {
8578 /* turn off filters, mark for reset and clear SW filter list */
8579 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8580 need_reset = true;
8581 i40e_fdir_filter_exit(pf);
8582 }
8583 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain8a4f34f2014-06-04 08:45:20 +00008584 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +00008585 /* reset fd counters */
8586 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
8587 pf->fdir_pf_active_filters = 0;
Anjali Singhai Jain8a4f34f2014-06-04 08:45:20 +00008588 /* if ATR was auto disabled it can be re-enabled. */
8589 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
Jacob Keller234dc4e2016-09-06 18:05:09 -07008590 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
Anjali Singhai Jain8a4f34f2014-06-04 08:45:20 +00008591 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
Jacob Keller234dc4e2016-09-06 18:05:09 -07008592 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8593 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8594 }
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008595 }
8596 return need_reset;
8597}
8598
8599/**
Alan Bradyd8ec9862016-07-27 12:02:38 -07008600 * i40e_clear_rss_lut - clear the rx hash lookup table
8601 * @vsi: the VSI being configured
8602 **/
8603static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
8604{
8605 struct i40e_pf *pf = vsi->back;
8606 struct i40e_hw *hw = &pf->hw;
8607 u16 vf_id = vsi->vf_id;
8608 u8 i;
8609
8610 if (vsi->type == I40E_VSI_MAIN) {
8611 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8612 wr32(hw, I40E_PFQF_HLUT(i), 0);
8613 } else if (vsi->type == I40E_VSI_SRIOV) {
8614 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8615 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
8616 } else {
8617 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8618 }
8619}
8620
8621/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008622 * i40e_set_features - set the netdev feature flags
8623 * @netdev: ptr to the netdev being adjusted
8624 * @features: the feature set that the stack is suggesting
8625 **/
8626static int i40e_set_features(struct net_device *netdev,
8627 netdev_features_t features)
8628{
8629 struct i40e_netdev_priv *np = netdev_priv(netdev);
8630 struct i40e_vsi *vsi = np->vsi;
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008631 struct i40e_pf *pf = vsi->back;
8632 bool need_reset;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008633
Alan Bradyd8ec9862016-07-27 12:02:38 -07008634 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
8635 i40e_pf_config_rss(pf);
8636 else if (!(features & NETIF_F_RXHASH) &&
8637 netdev->features & NETIF_F_RXHASH)
8638 i40e_clear_rss_lut(vsi);
8639
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008640 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8641 i40e_vlan_stripping_enable(vsi);
8642 else
8643 i40e_vlan_stripping_disable(vsi);
8644
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008645 need_reset = i40e_set_ntuple(pf, features);
8646
8647 if (need_reset)
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04008648 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
Anjali Singhai Jain7c3c2882014-02-14 02:14:38 +00008649
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008650 return 0;
8651}
8652
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008653/**
Singhai, Anjali6a899022015-12-14 12:21:18 -08008654 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008655 * @pf: board private structure
8656 * @port: The UDP port to look up
8657 *
8658 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8659 **/
Singhai, Anjali6a899022015-12-14 12:21:18 -08008660static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008661{
8662 u8 i;
8663
8664 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
Singhai, Anjali6a899022015-12-14 12:21:18 -08008665 if (pf->udp_ports[i].index == port)
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008666 return i;
8667 }
8668
8669 return i;
8670}
8671
8672/**
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008673 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008674 * @netdev: This physical port's netdev
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008675 * @ti: Tunnel endpoint information
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008676 **/
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008677static void i40e_udp_tunnel_add(struct net_device *netdev,
8678 struct udp_tunnel_info *ti)
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008679{
8680 struct i40e_netdev_priv *np = netdev_priv(netdev);
8681 struct i40e_vsi *vsi = np->vsi;
8682 struct i40e_pf *pf = vsi->back;
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008683 __be16 port = ti->port;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008684 u8 next_idx;
8685 u8 idx;
8686
Singhai, Anjali6a899022015-12-14 12:21:18 -08008687 idx = i40e_get_udp_port_idx(pf, port);
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008688
8689 /* Check if port already exists */
8690 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008691 netdev_info(netdev, "port %d already offloaded\n",
Shannon Nelsonc22c06c2015-03-31 00:45:04 -07008692 ntohs(port));
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008693 return;
8694 }
8695
8696 /* Now check if there is space to add the new port */
Singhai, Anjali6a899022015-12-14 12:21:18 -08008697 next_idx = i40e_get_udp_port_idx(pf, 0);
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008698
8699 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008700 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008701 ntohs(port));
8702 return;
8703 }
8704
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008705 switch (ti->type) {
8706 case UDP_TUNNEL_TYPE_VXLAN:
8707 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8708 break;
8709 case UDP_TUNNEL_TYPE_GENEVE:
8710 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8711 return;
8712 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
8713 break;
8714 default:
8715 return;
8716 }
8717
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008718 /* New port: add it and mark its index in the bitmap */
Singhai, Anjali6a899022015-12-14 12:21:18 -08008719 pf->udp_ports[next_idx].index = port;
Singhai, Anjali6a899022015-12-14 12:21:18 -08008720 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8721 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008722}
8723
8724/**
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008725 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008726 * @netdev: This physical port's netdev
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008727 * @ti: Tunnel endpoint information
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008728 **/
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008729static void i40e_udp_tunnel_del(struct net_device *netdev,
8730 struct udp_tunnel_info *ti)
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008731{
8732 struct i40e_netdev_priv *np = netdev_priv(netdev);
8733 struct i40e_vsi *vsi = np->vsi;
8734 struct i40e_pf *pf = vsi->back;
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008735 __be16 port = ti->port;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008736 u8 idx;
8737
Singhai, Anjali6a899022015-12-14 12:21:18 -08008738 idx = i40e_get_udp_port_idx(pf, port);
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008739
8740 /* Check if port already exists */
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008741 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
8742 goto not_found;
Jeff Kirshera1c9a9d2013-12-28 07:32:18 +00008743
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008744 switch (ti->type) {
8745 case UDP_TUNNEL_TYPE_VXLAN:
8746 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
8747 goto not_found;
8748 break;
8749 case UDP_TUNNEL_TYPE_GENEVE:
8750 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
8751 goto not_found;
8752 break;
8753 default:
8754 goto not_found;
Singhai, Anjali6a899022015-12-14 12:21:18 -08008755 }
8756
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008757 /* if port exists, set it to 0 (mark for deletion)
8758 * and make it pending
8759 */
8760 pf->udp_ports[idx].index = 0;
8761 pf->pending_udp_bitmap |= BIT_ULL(idx);
Singhai, Anjali6a899022015-12-14 12:21:18 -08008762 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8763
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008764 return;
8765not_found:
8766 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
8767 ntohs(port));
Singhai, Anjali6a899022015-12-14 12:21:18 -08008768}
8769
Neerav Parikh1f224ad2014-02-12 01:45:31 +00008770static int i40e_get_phys_port_id(struct net_device *netdev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01008771 struct netdev_phys_item_id *ppid)
Neerav Parikh1f224ad2014-02-12 01:45:31 +00008772{
8773 struct i40e_netdev_priv *np = netdev_priv(netdev);
8774 struct i40e_pf *pf = np->vsi->back;
8775 struct i40e_hw *hw = &pf->hw;
8776
8777 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8778 return -EOPNOTSUPP;
8779
8780 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8781 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8782
8783 return 0;
8784}
8785
Jesse Brandeburg2f90ade2014-11-20 16:30:02 -08008786/**
8787 * i40e_ndo_fdb_add - add an entry to the hardware database
8788 * @ndm: the input from the stack
8789 * @tb: pointer to array of nladdr (unused)
8790 * @dev: the net device pointer
8791 * @addr: the MAC address entry being added
8792 * @flags: instructions from stack about fdb operation
8793 */
Greg Rose4ba0dea2014-03-06 08:59:55 +00008794static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8795 struct net_device *dev,
Jiri Pirkof6f64242014-11-28 14:34:15 +01008796 const unsigned char *addr, u16 vid,
Greg Rose4ba0dea2014-03-06 08:59:55 +00008797 u16 flags)
Greg Rose4ba0dea2014-03-06 08:59:55 +00008798{
8799 struct i40e_netdev_priv *np = netdev_priv(dev);
8800 struct i40e_pf *pf = np->vsi->back;
8801 int err = 0;
8802
8803 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8804 return -EOPNOTSUPP;
8805
Or Gerlitz65891fe2014-12-14 18:19:05 +02008806 if (vid) {
8807 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8808 return -EINVAL;
8809 }
8810
Greg Rose4ba0dea2014-03-06 08:59:55 +00008811 /* Hardware does not support aging addresses so if a
8812 * ndm_state is given only allow permanent addresses
8813 */
8814 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8815 netdev_info(dev, "FDB only supports static addresses\n");
8816 return -EINVAL;
8817 }
8818
8819 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8820 err = dev_uc_add_excl(dev, addr);
8821 else if (is_multicast_ether_addr(addr))
8822 err = dev_mc_add_excl(dev, addr);
8823 else
8824 err = -EINVAL;
8825
8826 /* Only return duplicate errors if NLM_F_EXCL is set */
8827 if (err == -EEXIST && !(flags & NLM_F_EXCL))
8828 err = 0;
8829
8830 return err;
8831}
8832
Neerav Parikh51616012015-02-06 08:52:14 +00008833/**
8834 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8835 * @dev: the netdev being configured
8836 * @nlh: RTNL message
8837 *
8838 * Inserts a new hardware bridge if not already created and
8839 * enables the bridging mode requested (VEB or VEPA). If the
8840 * hardware bridge has already been inserted and the request
8841 * is to change the mode then that requires a PF reset to
8842 * allow rebuild of the components with required hardware
8843 * bridge mode enabled.
8844 **/
8845static int i40e_ndo_bridge_setlink(struct net_device *dev,
Carolyn Wyborny9df70b62015-04-27 14:57:11 -04008846 struct nlmsghdr *nlh,
8847 u16 flags)
Neerav Parikh51616012015-02-06 08:52:14 +00008848{
8849 struct i40e_netdev_priv *np = netdev_priv(dev);
8850 struct i40e_vsi *vsi = np->vsi;
8851 struct i40e_pf *pf = vsi->back;
8852 struct i40e_veb *veb = NULL;
8853 struct nlattr *attr, *br_spec;
8854 int i, rem;
8855
8856 /* Only for PF VSI for now */
8857 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8858 return -EOPNOTSUPP;
8859
8860 /* Find the HW bridge for PF VSI */
8861 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8862 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8863 veb = pf->veb[i];
8864 }
8865
8866 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8867
8868 nla_for_each_nested(attr, br_spec, rem) {
8869 __u16 mode;
8870
8871 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8872 continue;
8873
8874 mode = nla_get_u16(attr);
8875 if ((mode != BRIDGE_MODE_VEPA) &&
8876 (mode != BRIDGE_MODE_VEB))
8877 return -EINVAL;
8878
8879 /* Insert a new HW bridge */
8880 if (!veb) {
8881 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8882 vsi->tc_config.enabled_tc);
8883 if (veb) {
8884 veb->bridge_mode = mode;
8885 i40e_config_bridge_mode(veb);
8886 } else {
8887 /* No Bridge HW offload available */
8888 return -ENOENT;
8889 }
8890 break;
8891 } else if (mode != veb->bridge_mode) {
8892 /* Existing HW bridge but different mode needs reset */
8893 veb->bridge_mode = mode;
Anjali Singhai Jainfc608612015-05-08 15:35:57 -07008894 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8895 if (mode == BRIDGE_MODE_VEB)
8896 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8897 else
8898 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8899 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
Neerav Parikh51616012015-02-06 08:52:14 +00008900 break;
8901 }
8902 }
8903
8904 return 0;
8905}
8906
8907/**
8908 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8909 * @skb: skb buff
8910 * @pid: process id
8911 * @seq: RTNL message seq #
8912 * @dev: the netdev being configured
8913 * @filter_mask: unused
Jesse Brandeburgd4b2f9f2015-09-03 17:18:48 -04008914 * @nlflags: netlink flags passed in
Neerav Parikh51616012015-02-06 08:52:14 +00008915 *
8916 * Return the mode in which the hardware bridge is operating in
8917 * i.e VEB or VEPA.
8918 **/
Neerav Parikh51616012015-02-06 08:52:14 +00008919static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8920 struct net_device *dev,
Carolyn Wyborny9f4ffc42015-08-31 19:54:42 -04008921 u32 __always_unused filter_mask,
8922 int nlflags)
Neerav Parikh51616012015-02-06 08:52:14 +00008923{
8924 struct i40e_netdev_priv *np = netdev_priv(dev);
8925 struct i40e_vsi *vsi = np->vsi;
8926 struct i40e_pf *pf = vsi->back;
8927 struct i40e_veb *veb = NULL;
8928 int i;
8929
8930 /* Only for PF VSI for now */
8931 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8932 return -EOPNOTSUPP;
8933
8934 /* Find the HW bridge for the PF VSI */
8935 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8936 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8937 veb = pf->veb[i];
8938 }
8939
8940 if (!veb)
8941 return 0;
8942
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02008943 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
Huaibin Wang599b0762016-09-26 09:51:18 +02008944 0, 0, nlflags, filter_mask, NULL);
Neerav Parikh51616012015-02-06 08:52:14 +00008945}
Neerav Parikh51616012015-02-06 08:52:14 +00008946
Singhai, Anjali6a899022015-12-14 12:21:18 -08008947/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
8948 * inner mac plus all inner ethertypes.
8949 */
8950#define I40E_MAX_TUNNEL_HDR_LEN 128
Joe Stringerf44a75e2015-04-14 17:09:14 -07008951/**
8952 * i40e_features_check - Validate encapsulated packet conforms to limits
8953 * @skb: skb buff
Jean Sacren2bc11c62015-09-19 05:08:43 -06008954 * @dev: This physical port's netdev
Joe Stringerf44a75e2015-04-14 17:09:14 -07008955 * @features: Offload features that the stack believes apply
8956 **/
8957static netdev_features_t i40e_features_check(struct sk_buff *skb,
8958 struct net_device *dev,
8959 netdev_features_t features)
8960{
8961 if (skb->encapsulation &&
Singhai, Anjali6a899022015-12-14 12:21:18 -08008962 ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
Joe Stringerf44a75e2015-04-14 17:09:14 -07008963 I40E_MAX_TUNNEL_HDR_LEN))
Tom Herberta1882222015-12-14 11:19:43 -08008964 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Joe Stringerf44a75e2015-04-14 17:09:14 -07008965
8966 return features;
8967}
8968
Shannon Nelson37a29732015-02-27 09:15:19 +00008969static const struct net_device_ops i40e_netdev_ops = {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008970 .ndo_open = i40e_open,
8971 .ndo_stop = i40e_close,
8972 .ndo_start_xmit = i40e_lan_xmit_frame,
8973 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
8974 .ndo_set_rx_mode = i40e_set_rx_mode,
8975 .ndo_validate_addr = eth_validate_addr,
8976 .ndo_set_mac_address = i40e_set_mac,
8977 .ndo_change_mtu = i40e_change_mtu,
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00008978 .ndo_do_ioctl = i40e_ioctl,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008979 .ndo_tx_timeout = i40e_tx_timeout,
8980 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
8981 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
8982#ifdef CONFIG_NET_POLL_CONTROLLER
8983 .ndo_poll_controller = i40e_netpoll,
8984#endif
John Fastabende4c67342016-02-16 21:16:15 -08008985 .ndo_setup_tc = __i40e_setup_tc,
Vasu Dev38e00432014-08-01 13:27:03 -07008986#ifdef I40E_FCOE
8987 .ndo_fcoe_enable = i40e_fcoe_enable,
8988 .ndo_fcoe_disable = i40e_fcoe_disable,
8989#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008990 .ndo_set_features = i40e_set_features,
8991 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
8992 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04008993 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00008994 .ndo_get_vf_config = i40e_ndo_get_vf_config,
Mitch Williams588aefa2014-02-11 08:27:49 +00008995 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
Serey Konge6d90042014-07-12 07:28:14 +00008996 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
Anjali Singhai Jainc3bbbd22016-04-01 03:56:07 -07008997 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
Alexander Duyck06a5f7f2016-06-16 12:22:06 -07008998 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
8999 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
Neerav Parikh1f224ad2014-02-12 01:45:31 +00009000 .ndo_get_phys_port_id = i40e_get_phys_port_id,
Greg Rose4ba0dea2014-03-06 08:59:55 +00009001 .ndo_fdb_add = i40e_ndo_fdb_add,
Joe Stringerf44a75e2015-04-14 17:09:14 -07009002 .ndo_features_check = i40e_features_check,
Neerav Parikh51616012015-02-06 08:52:14 +00009003 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
9004 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009005};
9006
9007/**
9008 * i40e_config_netdev - Setup the netdev flags
9009 * @vsi: the VSI being configured
9010 *
9011 * Returns 0 on success, negative value on failure
9012 **/
9013static int i40e_config_netdev(struct i40e_vsi *vsi)
9014{
9015 struct i40e_pf *pf = vsi->back;
9016 struct i40e_hw *hw = &pf->hw;
9017 struct i40e_netdev_priv *np;
9018 struct net_device *netdev;
9019 u8 mac_addr[ETH_ALEN];
9020 int etherdev_size;
9021
9022 etherdev_size = sizeof(struct i40e_netdev_priv);
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +00009023 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009024 if (!netdev)
9025 return -ENOMEM;
9026
9027 vsi->netdev = netdev;
9028 np = netdev_priv(netdev);
9029 np->vsi = vsi;
9030
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009031 netdev->hw_enc_features |= NETIF_F_SG |
9032 NETIF_F_IP_CSUM |
9033 NETIF_F_IPV6_CSUM |
9034 NETIF_F_HIGHDMA |
9035 NETIF_F_SOFT_FEATURES |
9036 NETIF_F_TSO |
9037 NETIF_F_TSO_ECN |
9038 NETIF_F_TSO6 |
9039 NETIF_F_GSO_GRE |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04009040 NETIF_F_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07009041 NETIF_F_GSO_IPXIP4 |
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07009042 NETIF_F_GSO_IPXIP6 |
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009043 NETIF_F_GSO_UDP_TUNNEL |
9044 NETIF_F_GSO_UDP_TUNNEL_CSUM |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04009045 NETIF_F_GSO_PARTIAL |
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009046 NETIF_F_SCTP_CRC |
9047 NETIF_F_RXHASH |
9048 NETIF_F_RXCSUM |
Jesse Brandeburg5afdaaa2015-12-10 11:38:50 -08009049 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009050
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009051 if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04009052 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9053
9054 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009055
9056 /* record features VLANs can make use of */
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04009057 netdev->vlan_features |= netdev->hw_enc_features |
9058 NETIF_F_TSO_MANGLEID;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009059
Anjali Singhai Jain2e86a0b2014-04-01 07:11:53 +00009060 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009061 netdev->hw_features |= NETIF_F_NTUPLE;
Anjali Singhai Jain2e86a0b2014-04-01 07:11:53 +00009062
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009063 netdev->hw_features |= netdev->hw_enc_features |
9064 NETIF_F_HW_VLAN_CTAG_TX |
9065 NETIF_F_HW_VLAN_CTAG_RX;
9066
9067 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04009068 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009069
9070 if (vsi->type == I40E_VSI_MAIN) {
9071 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
Greg Rose9a173902014-05-22 06:32:02 +00009072 ether_addr_copy(mac_addr, hw->mac.perm_addr);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07009073 spin_lock_bh(&vsi->mac_filter_list_lock);
Jacob Keller1bc87e82016-10-05 09:30:31 -07009074 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07009075 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009076 } else {
9077 /* relate the VSI_VMDQ name to the VSI_MAIN name */
9078 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
9079 pf->vsi[pf->lan_vsi]->netdev->name);
9080 random_ether_addr(mac_addr);
Kiran Patil21659032015-09-30 14:09:03 -04009081
9082 spin_lock_bh(&vsi->mac_filter_list_lock);
Jacob Keller1bc87e82016-10-05 09:30:31 -07009083 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
Kiran Patil21659032015-09-30 14:09:03 -04009084 spin_unlock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009085 }
Kiran Patil21659032015-09-30 14:09:03 -04009086
Greg Rose9a173902014-05-22 06:32:02 +00009087 ether_addr_copy(netdev->dev_addr, mac_addr);
9088 ether_addr_copy(netdev->perm_addr, mac_addr);
Alexander Duyckb0fe3302016-04-02 00:05:14 -07009089
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009090 netdev->priv_flags |= IFF_UNICAST_FLT;
9091 netdev->priv_flags |= IFF_SUPP_NOFCS;
9092 /* Setup netdev TC information */
9093 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9094
9095 netdev->netdev_ops = &i40e_netdev_ops;
9096 netdev->watchdog_timeo = 5 * HZ;
9097 i40e_set_ethtool_ops(netdev);
Vasu Dev38e00432014-08-01 13:27:03 -07009098#ifdef I40E_FCOE
9099 i40e_fcoe_config_netdev(netdev, vsi);
9100#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009101
Jarod Wilson91c527a2016-10-17 15:54:05 -04009102 /* MTU range: 68 - 9706 */
9103 netdev->min_mtu = ETH_MIN_MTU;
9104 netdev->max_mtu = I40E_MAX_RXBUFFER -
9105 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9106
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009107 return 0;
9108}
9109
9110/**
9111 * i40e_vsi_delete - Delete a VSI from the switch
9112 * @vsi: the VSI being removed
9113 *
9114 * Returns 0 on success, negative value on failure
9115 **/
9116static void i40e_vsi_delete(struct i40e_vsi *vsi)
9117{
9118 /* remove default VSI is not allowed */
9119 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9120 return;
9121
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009122 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009123}
9124
9125/**
Neerav Parikh51616012015-02-06 08:52:14 +00009126 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9127 * @vsi: the VSI being queried
9128 *
9129 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9130 **/
9131int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9132{
9133 struct i40e_veb *veb;
9134 struct i40e_pf *pf = vsi->back;
9135
9136 /* Uplink is not a bridge so default to VEB */
9137 if (vsi->veb_idx == I40E_NO_VEB)
9138 return 1;
9139
9140 veb = pf->veb[vsi->veb_idx];
Akeem G Abodunrin09603ea2015-10-01 14:37:36 -04009141 if (!veb) {
9142 dev_info(&pf->pdev->dev,
9143 "There is no veb associated with the bridge\n");
9144 return -ENOENT;
9145 }
Neerav Parikh51616012015-02-06 08:52:14 +00009146
Akeem G Abodunrin09603ea2015-10-01 14:37:36 -04009147 /* Uplink is a bridge in VEPA mode */
9148 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9149 return 0;
9150 } else {
9151 /* Uplink is a bridge in VEB mode */
9152 return 1;
9153 }
9154
9155 /* VEPA is now default bridge, so return 0 */
9156 return 0;
Neerav Parikh51616012015-02-06 08:52:14 +00009157}
9158
9159/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009160 * i40e_add_vsi - Add a VSI to the switch
9161 * @vsi: the VSI being configured
9162 *
9163 * This initializes a VSI context depending on the VSI type to be added and
9164 * passes it down to the add_vsi aq command.
9165 **/
9166static int i40e_add_vsi(struct i40e_vsi *vsi)
9167{
9168 int ret = -ENODEV;
Kiran Patilf6bd0962016-06-20 09:10:34 -07009169 i40e_status aq_ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009170 struct i40e_pf *pf = vsi->back;
9171 struct i40e_hw *hw = &pf->hw;
9172 struct i40e_vsi_context ctxt;
Kiran Patil21659032015-09-30 14:09:03 -04009173 struct i40e_mac_filter *f, *ftmp;
9174
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009175 u8 enabled_tc = 0x1; /* TC0 enabled */
9176 int f_count = 0;
9177
9178 memset(&ctxt, 0, sizeof(ctxt));
9179 switch (vsi->type) {
9180 case I40E_VSI_MAIN:
9181 /* The PF's main VSI is already setup as part of the
9182 * device initialization, so we'll not bother with
9183 * the add_vsi call, but we will retrieve the current
9184 * VSI context.
9185 */
9186 ctxt.seid = pf->main_vsi_seid;
9187 ctxt.pf_num = pf->hw.pf_id;
9188 ctxt.vf_num = 0;
9189 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9190 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9191 if (ret) {
9192 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009193 "couldn't get PF vsi config, err %s aq_err %s\n",
9194 i40e_stat_str(&pf->hw, ret),
9195 i40e_aq_str(&pf->hw,
9196 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009197 return -ENOENT;
9198 }
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07009199 vsi->info = ctxt.info;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009200 vsi->info.valid_sections = 0;
9201
9202 vsi->seid = ctxt.seid;
9203 vsi->id = ctxt.vsi_number;
9204
9205 enabled_tc = i40e_pf_get_tc_map(pf);
9206
9207 /* MFP mode setup queue map and update VSI */
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00009208 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9209 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009210 memset(&ctxt, 0, sizeof(ctxt));
9211 ctxt.seid = pf->main_vsi_seid;
9212 ctxt.pf_num = pf->hw.pf_id;
9213 ctxt.vf_num = 0;
9214 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9215 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9216 if (ret) {
9217 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009218 "update vsi failed, err %s aq_err %s\n",
9219 i40e_stat_str(&pf->hw, ret),
9220 i40e_aq_str(&pf->hw,
9221 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009222 ret = -ENOENT;
9223 goto err;
9224 }
9225 /* update the local VSI info queue map */
9226 i40e_vsi_update_queue_map(vsi, &ctxt);
9227 vsi->info.valid_sections = 0;
9228 } else {
9229 /* Default/Main VSI is only enabled for TC0
9230 * reconfigure it to enable all TCs that are
9231 * available on the port in SFP mode.
Neerav Parikh63d7e5a2014-12-14 01:55:16 +00009232 * For MFP case the iSCSI PF would use this
9233 * flow to enable LAN+iSCSI TC.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009234 */
9235 ret = i40e_vsi_config_tc(vsi, enabled_tc);
9236 if (ret) {
9237 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009238 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9239 enabled_tc,
9240 i40e_stat_str(&pf->hw, ret),
9241 i40e_aq_str(&pf->hw,
9242 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009243 ret = -ENOENT;
9244 }
9245 }
9246 break;
9247
9248 case I40E_VSI_FDIR:
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08009249 ctxt.pf_num = hw->pf_id;
9250 ctxt.vf_num = 0;
9251 ctxt.uplink_seid = vsi->uplink_seid;
Neerav Parikh2b18e592015-01-24 09:58:38 +00009252 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08009253 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
Anjali Singhai Jainfc608612015-05-08 15:35:57 -07009254 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9255 (i40e_is_vsi_uplink_mode_veb(vsi))) {
Neerav Parikh51616012015-02-06 08:52:14 +00009256 ctxt.info.valid_sections |=
Anjali Singhai Jainfc608612015-05-08 15:35:57 -07009257 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
Neerav Parikh51616012015-02-06 08:52:14 +00009258 ctxt.info.switch_id =
Anjali Singhai Jainfc608612015-05-08 15:35:57 -07009259 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
Neerav Parikh51616012015-02-06 08:52:14 +00009260 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009261 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009262 break;
9263
9264 case I40E_VSI_VMDQ2:
9265 ctxt.pf_num = hw->pf_id;
9266 ctxt.vf_num = 0;
9267 ctxt.uplink_seid = vsi->uplink_seid;
Neerav Parikh2b18e592015-01-24 09:58:38 +00009268 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009269 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9270
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009271 /* This VSI is connected to VEB so the switch_id
9272 * should be set to zero by default.
9273 */
Neerav Parikh51616012015-02-06 08:52:14 +00009274 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9275 ctxt.info.valid_sections |=
9276 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9277 ctxt.info.switch_id =
9278 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9279 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009280
9281 /* Setup the VSI tx/rx queue map for TC0 only for now */
9282 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9283 break;
9284
9285 case I40E_VSI_SRIOV:
9286 ctxt.pf_num = hw->pf_id;
9287 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9288 ctxt.uplink_seid = vsi->uplink_seid;
Neerav Parikh2b18e592015-01-24 09:58:38 +00009289 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009290 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9291
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009292 /* This VSI is connected to VEB so the switch_id
9293 * should be set to zero by default.
9294 */
Neerav Parikh51616012015-02-06 08:52:14 +00009295 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9296 ctxt.info.valid_sections |=
9297 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9298 ctxt.info.switch_id =
9299 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9300 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009301
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06009302 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9303 ctxt.info.valid_sections |=
9304 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9305 ctxt.info.queueing_opt_flags |=
Ashish Shah4b28cdb2016-05-03 15:13:17 -07009306 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
9307 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06009308 }
9309
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009310 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9311 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
Mitch Williamsc674d122014-05-20 08:01:40 +00009312 if (pf->vf[vsi->vf_id].spoofchk) {
9313 ctxt.info.valid_sections |=
9314 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9315 ctxt.info.sec_flags |=
9316 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9317 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9318 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009319 /* Setup the VSI tx/rx queue map for TC0 only for now */
9320 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9321 break;
9322
Vasu Dev38e00432014-08-01 13:27:03 -07009323#ifdef I40E_FCOE
9324 case I40E_VSI_FCOE:
9325 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
9326 if (ret) {
9327 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
9328 return ret;
9329 }
9330 break;
9331
9332#endif /* I40E_FCOE */
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -06009333 case I40E_VSI_IWARP:
9334 /* send down message to iWARP */
9335 break;
9336
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009337 default:
9338 return -ENODEV;
9339 }
9340
9341 if (vsi->type != I40E_VSI_MAIN) {
9342 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9343 if (ret) {
9344 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009345 "add vsi failed, err %s aq_err %s\n",
9346 i40e_stat_str(&pf->hw, ret),
9347 i40e_aq_str(&pf->hw,
9348 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009349 ret = -ENOENT;
9350 goto err;
9351 }
Jesse Brandeburg1a2f6242015-03-31 00:45:01 -07009352 vsi->info = ctxt.info;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009353 vsi->info.valid_sections = 0;
9354 vsi->seid = ctxt.seid;
9355 vsi->id = ctxt.vsi_number;
9356 }
Kiran Patilf6bd0962016-06-20 09:10:34 -07009357 /* Except FDIR VSI, for all othet VSI set the broadcast filter */
9358 if (vsi->type != I40E_VSI_FDIR) {
9359 aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
9360 if (aq_ret) {
9361 ret = i40e_aq_rc_to_posix(aq_ret,
9362 hw->aq.asq_last_status);
9363 dev_info(&pf->pdev->dev,
9364 "set brdcast promisc failed, err %s, aq_err %s\n",
9365 i40e_stat_str(hw, aq_ret),
9366 i40e_aq_str(hw, hw->aq.asq_last_status));
9367 }
9368 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009369
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07009370 vsi->active_filters = 0;
9371 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
Kiran Patil21659032015-09-30 14:09:03 -04009372 spin_lock_bh(&vsi->mac_filter_list_lock);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009373 /* If macvlan filters already exist, force them to get loaded */
9374 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
Mitch Williamsc3c7ea22016-06-20 09:10:38 -07009375 f->state = I40E_FILTER_NEW;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009376 f_count++;
9377 }
Kiran Patil21659032015-09-30 14:09:03 -04009378 spin_unlock_bh(&vsi->mac_filter_list_lock);
9379
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009380 if (f_count) {
9381 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
9382 pf->flags |= I40E_FLAG_FILTER_SYNC;
9383 }
9384
9385 /* Update VSI BW information */
9386 ret = i40e_vsi_get_bw_info(vsi);
9387 if (ret) {
9388 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009389 "couldn't get vsi bw info, err %s aq_err %s\n",
9390 i40e_stat_str(&pf->hw, ret),
9391 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009392 /* VSI is already added so not tearing that up */
9393 ret = 0;
9394 }
9395
9396err:
9397 return ret;
9398}
9399
9400/**
9401 * i40e_vsi_release - Delete a VSI and free its resources
9402 * @vsi: the VSI being removed
9403 *
9404 * Returns 0 on success or < 0 on error
9405 **/
9406int i40e_vsi_release(struct i40e_vsi *vsi)
9407{
9408 struct i40e_mac_filter *f, *ftmp;
9409 struct i40e_veb *veb = NULL;
9410 struct i40e_pf *pf;
9411 u16 uplink_seid;
9412 int i, n;
9413
9414 pf = vsi->back;
9415
9416 /* release of a VEB-owner or last VSI is not allowed */
9417 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
9418 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
9419 vsi->seid, vsi->uplink_seid);
9420 return -ENODEV;
9421 }
9422 if (vsi == pf->vsi[pf->lan_vsi] &&
9423 !test_bit(__I40E_DOWN, &pf->state)) {
9424 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9425 return -ENODEV;
9426 }
9427
9428 uplink_seid = vsi->uplink_seid;
9429 if (vsi->type != I40E_VSI_SRIOV) {
9430 if (vsi->netdev_registered) {
9431 vsi->netdev_registered = false;
9432 if (vsi->netdev) {
9433 /* results in a call to i40e_close() */
9434 unregister_netdev(vsi->netdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009435 }
9436 } else {
Shannon Nelson90ef8d42014-03-14 07:32:26 +00009437 i40e_vsi_close(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009438 }
9439 i40e_vsi_disable_irq(vsi);
9440 }
9441
Kiran Patil21659032015-09-30 14:09:03 -04009442 spin_lock_bh(&vsi->mac_filter_list_lock);
Jacob Keller6622f5c2016-10-05 09:30:32 -07009443
9444 /* clear the sync flag on all filters */
9445 if (vsi->netdev) {
9446 __dev_uc_unsync(vsi->netdev, NULL);
9447 __dev_mc_unsync(vsi->netdev, NULL);
9448 }
9449
9450 /* make sure any remaining filters are marked for deletion */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009451 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
Jacob Keller1bc87e82016-10-05 09:30:31 -07009452 i40e_del_filter(vsi, f->macaddr, f->vlan);
Jacob Keller6622f5c2016-10-05 09:30:32 -07009453
Kiran Patil21659032015-09-30 14:09:03 -04009454 spin_unlock_bh(&vsi->mac_filter_list_lock);
9455
Jesse Brandeburg17652c62015-11-05 17:01:02 -08009456 i40e_sync_vsi_filters(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009457
9458 i40e_vsi_delete(vsi);
9459 i40e_vsi_free_q_vectors(vsi);
Shannon Nelsona4866592014-02-11 08:24:07 +00009460 if (vsi->netdev) {
9461 free_netdev(vsi->netdev);
9462 vsi->netdev = NULL;
9463 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009464 i40e_vsi_clear_rings(vsi);
9465 i40e_vsi_clear(vsi);
9466
9467 /* If this was the last thing on the VEB, except for the
9468 * controlling VSI, remove the VEB, which puts the controlling
9469 * VSI onto the next level down in the switch.
9470 *
9471 * Well, okay, there's one more exception here: don't remove
9472 * the orphan VEBs yet. We'll wait for an explicit remove request
9473 * from up the network stack.
9474 */
Mitch Williams505682c2014-05-20 08:01:37 +00009475 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009476 if (pf->vsi[i] &&
9477 pf->vsi[i]->uplink_seid == uplink_seid &&
9478 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9479 n++; /* count the VSIs */
9480 }
9481 }
9482 for (i = 0; i < I40E_MAX_VEB; i++) {
9483 if (!pf->veb[i])
9484 continue;
9485 if (pf->veb[i]->uplink_seid == uplink_seid)
9486 n++; /* count the VEBs */
9487 if (pf->veb[i]->seid == uplink_seid)
9488 veb = pf->veb[i];
9489 }
9490 if (n == 0 && veb && veb->uplink_seid != 0)
9491 i40e_veb_release(veb);
9492
9493 return 0;
9494}
9495
9496/**
9497 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
9498 * @vsi: ptr to the VSI
9499 *
9500 * This should only be called after i40e_vsi_mem_alloc() which allocates the
9501 * corresponding SW VSI structure and initializes num_queue_pairs for the
9502 * newly allocated VSI.
9503 *
9504 * Returns 0 on success or negative on failure
9505 **/
9506static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
9507{
9508 int ret = -ENOENT;
9509 struct i40e_pf *pf = vsi->back;
9510
Alexander Duyck493fb302013-09-28 07:01:44 +00009511 if (vsi->q_vectors[0]) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009512 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
9513 vsi->seid);
9514 return -EEXIST;
9515 }
9516
9517 if (vsi->base_vector) {
Jesse Brandeburgf29eaa32014-02-11 08:24:12 +00009518 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009519 vsi->seid, vsi->base_vector);
9520 return -EEXIST;
9521 }
9522
Greg Rose90e04072014-03-06 08:59:57 +00009523 ret = i40e_vsi_alloc_q_vectors(vsi);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009524 if (ret) {
9525 dev_info(&pf->pdev->dev,
9526 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
9527 vsi->num_q_vectors, vsi->seid, ret);
9528 vsi->num_q_vectors = 0;
9529 goto vector_setup_out;
9530 }
9531
Anjali Singhai Jain26cdc442015-07-10 19:36:00 -04009532 /* In Legacy mode, we do not have to get any other vector since we
9533 * piggyback on the misc/ICR0 for queue interrupts.
9534 */
9535 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
9536 return ret;
Shannon Nelson958a3e32013-09-28 07:13:28 +00009537 if (vsi->num_q_vectors)
9538 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
9539 vsi->num_q_vectors, vsi->idx);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009540 if (vsi->base_vector < 0) {
9541 dev_info(&pf->pdev->dev,
Shannon Nelson049a2be2014-10-17 03:14:50 +00009542 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
9543 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009544 i40e_vsi_free_q_vectors(vsi);
9545 ret = -ENOENT;
9546 goto vector_setup_out;
9547 }
9548
9549vector_setup_out:
9550 return ret;
9551}
9552
9553/**
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00009554 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
9555 * @vsi: pointer to the vsi.
9556 *
9557 * This re-allocates a vsi's queue resources.
9558 *
9559 * Returns pointer to the successfully allocated and configured VSI sw struct
9560 * on success, otherwise returns NULL on failure.
9561 **/
9562static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
9563{
John Underwoodf5340392016-02-18 09:19:24 -08009564 struct i40e_pf *pf;
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00009565 u8 enabled_tc;
9566 int ret;
9567
John Underwoodf5340392016-02-18 09:19:24 -08009568 if (!vsi)
9569 return NULL;
9570
9571 pf = vsi->back;
9572
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00009573 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9574 i40e_vsi_clear_rings(vsi);
9575
9576 i40e_vsi_free_arrays(vsi, false);
9577 i40e_set_num_rings_in_vsi(vsi);
9578 ret = i40e_vsi_alloc_arrays(vsi, false);
9579 if (ret)
9580 goto err_vsi;
9581
9582 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
9583 if (ret < 0) {
Shannon Nelson049a2be2014-10-17 03:14:50 +00009584 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009585 "failed to get tracking for %d queues for VSI %d err %d\n",
Shannon Nelson049a2be2014-10-17 03:14:50 +00009586 vsi->alloc_queue_pairs, vsi->seid, ret);
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +00009587 goto err_vsi;
9588 }
9589 vsi->base_queue = ret;
9590
9591 /* Update the FW view of the VSI. Force a reset of TC and queue
9592 * layout configurations.
9593 */
9594 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9595 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9596 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9597 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9598
9599 /* assign it some queues */
9600 ret = i40e_alloc_rings(vsi);
9601 if (ret)
9602 goto err_rings;
9603
9604 /* map all of the rings to the q_vectors */
9605 i40e_vsi_map_rings_to_vectors(vsi);
9606 return vsi;
9607
9608err_rings:
9609 i40e_vsi_free_q_vectors(vsi);
9610 if (vsi->netdev_registered) {
9611 vsi->netdev_registered = false;
9612 unregister_netdev(vsi->netdev);
9613 free_netdev(vsi->netdev);
9614 vsi->netdev = NULL;
9615 }
9616 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9617err_vsi:
9618 i40e_vsi_clear(vsi);
9619 return NULL;
9620}
9621
9622/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009623 * i40e_vsi_setup - Set up a VSI by a given type
9624 * @pf: board private structure
9625 * @type: VSI type
9626 * @uplink_seid: the switch element to link to
9627 * @param1: usage depends upon VSI type. For VF types, indicates VF id
9628 *
9629 * This allocates the sw VSI structure and its queue resources, then add a VSI
9630 * to the identified VEB.
9631 *
9632 * Returns pointer to the successfully allocated and configure VSI sw struct on
9633 * success, otherwise returns NULL on failure.
9634 **/
9635struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9636 u16 uplink_seid, u32 param1)
9637{
9638 struct i40e_vsi *vsi = NULL;
9639 struct i40e_veb *veb = NULL;
9640 int ret, i;
9641 int v_idx;
9642
9643 /* The requested uplink_seid must be either
9644 * - the PF's port seid
9645 * no VEB is needed because this is the PF
9646 * or this is a Flow Director special case VSI
9647 * - seid of an existing VEB
9648 * - seid of a VSI that owns an existing VEB
9649 * - seid of a VSI that doesn't own a VEB
9650 * a new VEB is created and the VSI becomes the owner
9651 * - seid of the PF VSI, which is what creates the first VEB
9652 * this is a special case of the previous
9653 *
9654 * Find which uplink_seid we were given and create a new VEB if needed
9655 */
9656 for (i = 0; i < I40E_MAX_VEB; i++) {
9657 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9658 veb = pf->veb[i];
9659 break;
9660 }
9661 }
9662
9663 if (!veb && uplink_seid != pf->mac_seid) {
9664
Mitch Williams505682c2014-05-20 08:01:37 +00009665 for (i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009666 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9667 vsi = pf->vsi[i];
9668 break;
9669 }
9670 }
9671 if (!vsi) {
9672 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9673 uplink_seid);
9674 return NULL;
9675 }
9676
9677 if (vsi->uplink_seid == pf->mac_seid)
9678 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9679 vsi->tc_config.enabled_tc);
9680 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9681 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9682 vsi->tc_config.enabled_tc);
Anjali Singhai Jain79c21a82014-11-13 03:06:14 +00009683 if (veb) {
9684 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9685 dev_info(&vsi->back->pdev->dev,
Shannon Nelsonfb43201f2015-08-26 15:14:17 -04009686 "New VSI creation error, uplink seid of LAN VSI expected.\n");
Anjali Singhai Jain79c21a82014-11-13 03:06:14 +00009687 return NULL;
9688 }
Anjali Singhai Jainfa11cb32015-05-27 12:06:14 -04009689 /* We come up by default in VEPA mode if SRIOV is not
9690 * already enabled, in which case we can't force VEPA
9691 * mode.
9692 */
9693 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9694 veb->bridge_mode = BRIDGE_MODE_VEPA;
9695 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9696 }
Neerav Parikh51616012015-02-06 08:52:14 +00009697 i40e_config_bridge_mode(veb);
Anjali Singhai Jain79c21a82014-11-13 03:06:14 +00009698 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009699 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9700 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9701 veb = pf->veb[i];
9702 }
9703 if (!veb) {
9704 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9705 return NULL;
9706 }
9707
9708 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9709 uplink_seid = veb->seid;
9710 }
9711
9712 /* get vsi sw struct */
9713 v_idx = i40e_vsi_mem_alloc(pf, type);
9714 if (v_idx < 0)
9715 goto err_alloc;
9716 vsi = pf->vsi[v_idx];
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08009717 if (!vsi)
9718 goto err_alloc;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009719 vsi->type = type;
9720 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9721
9722 if (type == I40E_VSI_MAIN)
9723 pf->lan_vsi = v_idx;
9724 else if (type == I40E_VSI_SRIOV)
9725 vsi->vf_id = param1;
9726 /* assign it some queues */
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -08009727 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9728 vsi->idx);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009729 if (ret < 0) {
Shannon Nelson049a2be2014-10-17 03:14:50 +00009730 dev_info(&pf->pdev->dev,
9731 "failed to get tracking for %d queues for VSI %d err=%d\n",
9732 vsi->alloc_queue_pairs, vsi->seid, ret);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009733 goto err_vsi;
9734 }
9735 vsi->base_queue = ret;
9736
9737 /* get a VSI from the hardware */
9738 vsi->uplink_seid = uplink_seid;
9739 ret = i40e_add_vsi(vsi);
9740 if (ret)
9741 goto err_vsi;
9742
9743 switch (vsi->type) {
9744 /* setup the netdev if needed */
9745 case I40E_VSI_MAIN:
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -05009746 /* Apply relevant filters if a platform-specific mac
9747 * address was selected.
9748 */
9749 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
9750 ret = i40e_macaddr_init(vsi, pf->hw.mac.addr);
9751 if (ret) {
9752 dev_warn(&pf->pdev->dev,
9753 "could not set up macaddr; err %d\n",
9754 ret);
9755 }
9756 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009757 case I40E_VSI_VMDQ2:
Vasu Dev38e00432014-08-01 13:27:03 -07009758 case I40E_VSI_FCOE:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009759 ret = i40e_config_netdev(vsi);
9760 if (ret)
9761 goto err_netdev;
9762 ret = register_netdev(vsi->netdev);
9763 if (ret)
9764 goto err_netdev;
9765 vsi->netdev_registered = true;
9766 netif_carrier_off(vsi->netdev);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -08009767#ifdef CONFIG_I40E_DCB
9768 /* Setup DCB netlink interface */
9769 i40e_dcbnl_setup(vsi);
9770#endif /* CONFIG_I40E_DCB */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009771 /* fall through */
9772
9773 case I40E_VSI_FDIR:
9774 /* set up vectors and rings if needed */
9775 ret = i40e_vsi_setup_vectors(vsi);
9776 if (ret)
9777 goto err_msix;
9778
9779 ret = i40e_alloc_rings(vsi);
9780 if (ret)
9781 goto err_rings;
9782
9783 /* map all of the rings to the q_vectors */
9784 i40e_vsi_map_rings_to_vectors(vsi);
9785
9786 i40e_vsi_reset_stats(vsi);
9787 break;
9788
9789 default:
9790 /* no netdev or rings for the other VSI types */
9791 break;
9792 }
9793
Anjali Singhai Jaine25d00b82015-06-23 19:00:04 -04009794 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9795 (vsi->type == I40E_VSI_VMDQ2)) {
9796 ret = i40e_vsi_config_rss(vsi);
9797 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009798 return vsi;
9799
9800err_rings:
9801 i40e_vsi_free_q_vectors(vsi);
9802err_msix:
9803 if (vsi->netdev_registered) {
9804 vsi->netdev_registered = false;
9805 unregister_netdev(vsi->netdev);
9806 free_netdev(vsi->netdev);
9807 vsi->netdev = NULL;
9808 }
9809err_netdev:
9810 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9811err_vsi:
9812 i40e_vsi_clear(vsi);
9813err_alloc:
9814 return NULL;
9815}
9816
9817/**
9818 * i40e_veb_get_bw_info - Query VEB BW information
9819 * @veb: the veb to query
9820 *
9821 * Query the Tx scheduler BW configuration data for given VEB
9822 **/
9823static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9824{
9825 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9826 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9827 struct i40e_pf *pf = veb->pf;
9828 struct i40e_hw *hw = &pf->hw;
9829 u32 tc_bw_max;
9830 int ret = 0;
9831 int i;
9832
9833 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9834 &bw_data, NULL);
9835 if (ret) {
9836 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009837 "query veb bw config failed, err %s aq_err %s\n",
9838 i40e_stat_str(&pf->hw, ret),
9839 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009840 goto out;
9841 }
9842
9843 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9844 &ets_data, NULL);
9845 if (ret) {
9846 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -04009847 "query veb bw ets config failed, err %s aq_err %s\n",
9848 i40e_stat_str(&pf->hw, ret),
9849 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009850 goto out;
9851 }
9852
9853 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9854 veb->bw_max_quanta = ets_data.tc_bw_max;
9855 veb->is_abs_credits = bw_data.absolute_credits_enable;
Neerav Parikh23cd1f02014-11-12 00:18:41 +00009856 veb->enabled_tc = ets_data.tc_valid_bits;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009857 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9858 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9859 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9860 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9861 veb->bw_tc_limit_credits[i] =
9862 le16_to_cpu(bw_data.tc_bw_limits[i]);
9863 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9864 }
9865
9866out:
9867 return ret;
9868}
9869
9870/**
9871 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9872 * @pf: board private structure
9873 *
9874 * On error: returns error code (negative)
9875 * On success: returns vsi index in PF (positive)
9876 **/
9877static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9878{
9879 int ret = -ENOENT;
9880 struct i40e_veb *veb;
9881 int i;
9882
9883 /* Need to protect the allocation of switch elements at the PF level */
9884 mutex_lock(&pf->switch_mutex);
9885
9886 /* VEB list may be fragmented if VEB creation/destruction has
9887 * been happening. We can afford to do a quick scan to look
9888 * for any free slots in the list.
9889 *
9890 * find next empty veb slot, looping back around if necessary
9891 */
9892 i = 0;
9893 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9894 i++;
9895 if (i >= I40E_MAX_VEB) {
9896 ret = -ENOMEM;
9897 goto err_alloc_veb; /* out of VEB slots! */
9898 }
9899
9900 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9901 if (!veb) {
9902 ret = -ENOMEM;
9903 goto err_alloc_veb;
9904 }
9905 veb->pf = pf;
9906 veb->idx = i;
9907 veb->enabled_tc = 1;
9908
9909 pf->veb[i] = veb;
9910 ret = i;
9911err_alloc_veb:
9912 mutex_unlock(&pf->switch_mutex);
9913 return ret;
9914}
9915
9916/**
9917 * i40e_switch_branch_release - Delete a branch of the switch tree
9918 * @branch: where to start deleting
9919 *
9920 * This uses recursion to find the tips of the branch to be
9921 * removed, deleting until we get back to and can delete this VEB.
9922 **/
9923static void i40e_switch_branch_release(struct i40e_veb *branch)
9924{
9925 struct i40e_pf *pf = branch->pf;
9926 u16 branch_seid = branch->seid;
9927 u16 veb_idx = branch->idx;
9928 int i;
9929
9930 /* release any VEBs on this VEB - RECURSION */
9931 for (i = 0; i < I40E_MAX_VEB; i++) {
9932 if (!pf->veb[i])
9933 continue;
9934 if (pf->veb[i]->uplink_seid == branch->seid)
9935 i40e_switch_branch_release(pf->veb[i]);
9936 }
9937
9938 /* Release the VSIs on this VEB, but not the owner VSI.
9939 *
9940 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
9941 * the VEB itself, so don't use (*branch) after this loop.
9942 */
Mitch Williams505682c2014-05-20 08:01:37 +00009943 for (i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009944 if (!pf->vsi[i])
9945 continue;
9946 if (pf->vsi[i]->uplink_seid == branch_seid &&
9947 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9948 i40e_vsi_release(pf->vsi[i]);
9949 }
9950 }
9951
9952 /* There's one corner case where the VEB might not have been
9953 * removed, so double check it here and remove it if needed.
9954 * This case happens if the veb was created from the debugfs
9955 * commands and no VSIs were added to it.
9956 */
9957 if (pf->veb[veb_idx])
9958 i40e_veb_release(pf->veb[veb_idx]);
9959}
9960
9961/**
9962 * i40e_veb_clear - remove veb struct
9963 * @veb: the veb to remove
9964 **/
9965static void i40e_veb_clear(struct i40e_veb *veb)
9966{
9967 if (!veb)
9968 return;
9969
9970 if (veb->pf) {
9971 struct i40e_pf *pf = veb->pf;
9972
9973 mutex_lock(&pf->switch_mutex);
9974 if (pf->veb[veb->idx] == veb)
9975 pf->veb[veb->idx] = NULL;
9976 mutex_unlock(&pf->switch_mutex);
9977 }
9978
9979 kfree(veb);
9980}
9981
9982/**
9983 * i40e_veb_release - Delete a VEB and free its resources
9984 * @veb: the VEB being removed
9985 **/
9986void i40e_veb_release(struct i40e_veb *veb)
9987{
9988 struct i40e_vsi *vsi = NULL;
9989 struct i40e_pf *pf;
9990 int i, n = 0;
9991
9992 pf = veb->pf;
9993
9994 /* find the remaining VSI and check for extras */
Mitch Williams505682c2014-05-20 08:01:37 +00009995 for (i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00009996 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
9997 n++;
9998 vsi = pf->vsi[i];
9999 }
10000 }
10001 if (n != 1) {
10002 dev_info(&pf->pdev->dev,
10003 "can't remove VEB %d with %d VSIs left\n",
10004 veb->seid, n);
10005 return;
10006 }
10007
10008 /* move the remaining VSI to uplink veb */
10009 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10010 if (veb->uplink_seid) {
10011 vsi->uplink_seid = veb->uplink_seid;
10012 if (veb->uplink_seid == pf->mac_seid)
10013 vsi->veb_idx = I40E_NO_VEB;
10014 else
10015 vsi->veb_idx = veb->veb_idx;
10016 } else {
10017 /* floating VEB */
10018 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10019 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10020 }
10021
10022 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10023 i40e_veb_clear(veb);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010024}
10025
10026/**
10027 * i40e_add_veb - create the VEB in the switch
10028 * @veb: the VEB to be instantiated
10029 * @vsi: the controlling VSI
10030 **/
10031static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10032{
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010033 struct i40e_pf *pf = veb->pf;
Shannon Nelson66fc3602016-01-13 16:51:42 -080010034 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010035 int ret;
10036
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010037 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
Mitch Williams5bc16032016-05-16 10:26:43 -070010038 veb->enabled_tc, false,
Shannon Nelson66fc3602016-01-13 16:51:42 -080010039 &veb->seid, enable_stats, NULL);
Mitch Williams5bc16032016-05-16 10:26:43 -070010040
10041 /* get a VEB from the hardware */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010042 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010043 dev_info(&pf->pdev->dev,
10044 "couldn't add VEB, err %s aq_err %s\n",
10045 i40e_stat_str(&pf->hw, ret),
10046 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010047 return -EPERM;
10048 }
10049
10050 /* get statistics counter */
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010051 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010052 &veb->stats_idx, NULL, NULL, NULL);
10053 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010054 dev_info(&pf->pdev->dev,
10055 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10056 i40e_stat_str(&pf->hw, ret),
10057 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010058 return -EPERM;
10059 }
10060 ret = i40e_veb_get_bw_info(veb);
10061 if (ret) {
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010062 dev_info(&pf->pdev->dev,
10063 "couldn't get VEB bw info, err %s aq_err %s\n",
10064 i40e_stat_str(&pf->hw, ret),
10065 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10066 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010067 return -ENOENT;
10068 }
10069
10070 vsi->uplink_seid = veb->seid;
10071 vsi->veb_idx = veb->idx;
10072 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10073
10074 return 0;
10075}
10076
10077/**
10078 * i40e_veb_setup - Set up a VEB
10079 * @pf: board private structure
10080 * @flags: VEB setup flags
10081 * @uplink_seid: the switch element to link to
10082 * @vsi_seid: the initial VSI seid
10083 * @enabled_tc: Enabled TC bit-map
10084 *
10085 * This allocates the sw VEB structure and links it into the switch
10086 * It is possible and legal for this to be a duplicate of an already
10087 * existing VEB. It is also possible for both uplink and vsi seids
10088 * to be zero, in order to create a floating VEB.
10089 *
10090 * Returns pointer to the successfully allocated VEB sw struct on
10091 * success, otherwise returns NULL on failure.
10092 **/
10093struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10094 u16 uplink_seid, u16 vsi_seid,
10095 u8 enabled_tc)
10096{
10097 struct i40e_veb *veb, *uplink_veb = NULL;
10098 int vsi_idx, veb_idx;
10099 int ret;
10100
10101 /* if one seid is 0, the other must be 0 to create a floating relay */
10102 if ((uplink_seid == 0 || vsi_seid == 0) &&
10103 (uplink_seid + vsi_seid != 0)) {
10104 dev_info(&pf->pdev->dev,
10105 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10106 uplink_seid, vsi_seid);
10107 return NULL;
10108 }
10109
10110 /* make sure there is such a vsi and uplink */
Mitch Williams505682c2014-05-20 08:01:37 +000010111 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010112 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10113 break;
Mitch Williams505682c2014-05-20 08:01:37 +000010114 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010115 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10116 vsi_seid);
10117 return NULL;
10118 }
10119
10120 if (uplink_seid && uplink_seid != pf->mac_seid) {
10121 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10122 if (pf->veb[veb_idx] &&
10123 pf->veb[veb_idx]->seid == uplink_seid) {
10124 uplink_veb = pf->veb[veb_idx];
10125 break;
10126 }
10127 }
10128 if (!uplink_veb) {
10129 dev_info(&pf->pdev->dev,
10130 "uplink seid %d not found\n", uplink_seid);
10131 return NULL;
10132 }
10133 }
10134
10135 /* get veb sw struct */
10136 veb_idx = i40e_veb_mem_alloc(pf);
10137 if (veb_idx < 0)
10138 goto err_alloc;
10139 veb = pf->veb[veb_idx];
10140 veb->flags = flags;
10141 veb->uplink_seid = uplink_seid;
10142 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10143 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10144
10145 /* create the VEB in the switch */
10146 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10147 if (ret)
10148 goto err_veb;
Shannon Nelson1bb8b932014-04-23 04:49:54 +000010149 if (vsi_idx == pf->lan_vsi)
10150 pf->lan_veb = veb->idx;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010151
10152 return veb;
10153
10154err_veb:
10155 i40e_veb_clear(veb);
10156err_alloc:
10157 return NULL;
10158}
10159
10160/**
Jeff Kirsherb40c82e62015-02-27 09:18:34 +000010161 * i40e_setup_pf_switch_element - set PF vars based on switch type
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010162 * @pf: board private structure
10163 * @ele: element we are building info from
10164 * @num_reported: total number of elements
10165 * @printconfig: should we print the contents
10166 *
10167 * helper function to assist in extracting a few useful SEID values.
10168 **/
10169static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10170 struct i40e_aqc_switch_config_element_resp *ele,
10171 u16 num_reported, bool printconfig)
10172{
10173 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10174 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10175 u8 element_type = ele->element_type;
10176 u16 seid = le16_to_cpu(ele->seid);
10177
10178 if (printconfig)
10179 dev_info(&pf->pdev->dev,
10180 "type=%d seid=%d uplink=%d downlink=%d\n",
10181 element_type, seid, uplink_seid, downlink_seid);
10182
10183 switch (element_type) {
10184 case I40E_SWITCH_ELEMENT_TYPE_MAC:
10185 pf->mac_seid = seid;
10186 break;
10187 case I40E_SWITCH_ELEMENT_TYPE_VEB:
10188 /* Main VEB? */
10189 if (uplink_seid != pf->mac_seid)
10190 break;
10191 if (pf->lan_veb == I40E_NO_VEB) {
10192 int v;
10193
10194 /* find existing or else empty VEB */
10195 for (v = 0; v < I40E_MAX_VEB; v++) {
10196 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10197 pf->lan_veb = v;
10198 break;
10199 }
10200 }
10201 if (pf->lan_veb == I40E_NO_VEB) {
10202 v = i40e_veb_mem_alloc(pf);
10203 if (v < 0)
10204 break;
10205 pf->lan_veb = v;
10206 }
10207 }
10208
10209 pf->veb[pf->lan_veb]->seid = seid;
10210 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10211 pf->veb[pf->lan_veb]->pf = pf;
10212 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10213 break;
10214 case I40E_SWITCH_ELEMENT_TYPE_VSI:
10215 if (num_reported != 1)
10216 break;
10217 /* This is immediately after a reset so we can assume this is
10218 * the PF's VSI
10219 */
10220 pf->mac_seid = uplink_seid;
10221 pf->pf_seid = downlink_seid;
10222 pf->main_vsi_seid = seid;
10223 if (printconfig)
10224 dev_info(&pf->pdev->dev,
10225 "pf_seid=%d main_vsi_seid=%d\n",
10226 pf->pf_seid, pf->main_vsi_seid);
10227 break;
10228 case I40E_SWITCH_ELEMENT_TYPE_PF:
10229 case I40E_SWITCH_ELEMENT_TYPE_VF:
10230 case I40E_SWITCH_ELEMENT_TYPE_EMP:
10231 case I40E_SWITCH_ELEMENT_TYPE_BMC:
10232 case I40E_SWITCH_ELEMENT_TYPE_PE:
10233 case I40E_SWITCH_ELEMENT_TYPE_PA:
10234 /* ignore these for now */
10235 break;
10236 default:
10237 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10238 element_type, seid);
10239 break;
10240 }
10241}
10242
10243/**
10244 * i40e_fetch_switch_configuration - Get switch config from firmware
10245 * @pf: board private structure
10246 * @printconfig: should we print the contents
10247 *
10248 * Get the current switch configuration from the device and
10249 * extract a few useful SEID values.
10250 **/
10251int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10252{
10253 struct i40e_aqc_get_switch_config_resp *sw_config;
10254 u16 next_seid = 0;
10255 int ret = 0;
10256 u8 *aq_buf;
10257 int i;
10258
10259 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10260 if (!aq_buf)
10261 return -ENOMEM;
10262
10263 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10264 do {
10265 u16 num_reported, num_total;
10266
10267 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10268 I40E_AQ_LARGE_BUF,
10269 &next_seid, NULL);
10270 if (ret) {
10271 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010272 "get switch config failed err %s aq_err %s\n",
10273 i40e_stat_str(&pf->hw, ret),
10274 i40e_aq_str(&pf->hw,
10275 pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010276 kfree(aq_buf);
10277 return -ENOENT;
10278 }
10279
10280 num_reported = le16_to_cpu(sw_config->header.num_reported);
10281 num_total = le16_to_cpu(sw_config->header.num_total);
10282
10283 if (printconfig)
10284 dev_info(&pf->pdev->dev,
10285 "header: %d reported %d total\n",
10286 num_reported, num_total);
10287
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010288 for (i = 0; i < num_reported; i++) {
10289 struct i40e_aqc_switch_config_element_resp *ele =
10290 &sw_config->element[i];
10291
10292 i40e_setup_pf_switch_element(pf, ele, num_reported,
10293 printconfig);
10294 }
10295 } while (next_seid != 0);
10296
10297 kfree(aq_buf);
10298 return ret;
10299}
10300
10301/**
10302 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
10303 * @pf: board private structure
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000010304 * @reinit: if the Main VSI needs to re-initialized.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010305 *
10306 * Returns 0 on success, negative value on failure
10307 **/
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000010308static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010309{
Anjali Singhai Jainb5569892016-05-03 15:13:12 -070010310 u16 flags = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010311 int ret;
10312
10313 /* find out what's out there already */
10314 ret = i40e_fetch_switch_configuration(pf, false);
10315 if (ret) {
10316 dev_info(&pf->pdev->dev,
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010317 "couldn't fetch switch config, err %s aq_err %s\n",
10318 i40e_stat_str(&pf->hw, ret),
10319 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010320 return ret;
10321 }
10322 i40e_pf_reset_stats(pf);
10323
Anjali Singhai Jainb5569892016-05-03 15:13:12 -070010324 /* set the switch config bit for the whole device to
10325 * support limited promisc or true promisc
10326 * when user requests promisc. The default is limited
10327 * promisc.
10328 */
10329
10330 if ((pf->hw.pf_id == 0) &&
10331 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
10332 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10333
10334 if (pf->hw.pf_id == 0) {
10335 u16 valid_flags;
10336
10337 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10338 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
10339 NULL);
10340 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
10341 dev_info(&pf->pdev->dev,
10342 "couldn't set switch config bits, err %s aq_err %s\n",
10343 i40e_stat_str(&pf->hw, ret),
10344 i40e_aq_str(&pf->hw,
10345 pf->hw.aq.asq_last_status));
10346 /* not a fatal problem, just keep going */
10347 }
10348 }
10349
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010350 /* first time setup */
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000010351 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010352 struct i40e_vsi *vsi = NULL;
10353 u16 uplink_seid;
10354
10355 /* Set up the PF VSI associated with the PF's main VSI
10356 * that is already in the HW switch
10357 */
10358 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
10359 uplink_seid = pf->veb[pf->lan_veb]->seid;
10360 else
10361 uplink_seid = pf->mac_seid;
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000010362 if (pf->lan_vsi == I40E_NO_VSI)
10363 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
10364 else if (reinit)
10365 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010366 if (!vsi) {
10367 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
10368 i40e_fdir_teardown(pf);
10369 return -EAGAIN;
10370 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010371 } else {
10372 /* force a reset of TC and queue layout configurations */
10373 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
Jesse Brandeburg6995b362015-08-28 17:55:54 -040010374
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010375 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10376 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10377 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10378 }
10379 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
10380
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010381 i40e_fdir_sb_setup(pf);
10382
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010383 /* Setup static PF queue filter control settings */
10384 ret = i40e_setup_pf_filter_control(pf);
10385 if (ret) {
10386 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
10387 ret);
10388 /* Failure here should not stop continuing other steps */
10389 }
10390
10391 /* enable RSS in the HW, even for only one queue, as the stack can use
10392 * the hash
10393 */
10394 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
Helin Zhang043dd652015-10-21 19:56:23 -040010395 i40e_pf_config_rss(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010396
10397 /* fill in link information and enable LSE reporting */
Catherine Sullivan0a862b42015-08-31 19:54:53 -040010398 i40e_update_link_info(&pf->hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010399 i40e_link_event(pf);
10400
Jesse Brandeburgd52c20b2013-11-26 10:49:15 +000010401 /* Initialize user-specific link properties */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010402 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
10403 I40E_AQ_AN_COMPLETED) ? true : false);
Jesse Brandeburgd52c20b2013-11-26 10:49:15 +000010404
Jacob Kellerbeb0dff2014-01-11 05:43:19 +000010405 i40e_ptp_init(pf);
10406
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010407 return ret;
10408}
10409
10410/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010411 * i40e_determine_queue_usage - Work out queue distribution
10412 * @pf: board private structure
10413 **/
10414static void i40e_determine_queue_usage(struct i40e_pf *pf)
10415{
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010416 int queues_left;
10417
10418 pf->num_lan_qps = 0;
Vasu Dev38e00432014-08-01 13:27:03 -070010419#ifdef I40E_FCOE
10420 pf->num_fcoe_qps = 0;
10421#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010422
10423 /* Find the max queues to be put into basic use. We'll always be
10424 * using TC0, whether or not DCB is running, and TC0 will get the
10425 * big RSS set.
10426 */
10427 queues_left = pf->hw.func_caps.num_tx_qp;
10428
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010429 if ((queues_left == 1) ||
Frank Zhang9aa7e932014-05-20 08:01:42 +000010430 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010431 /* one qp for PF, no queues for anything else */
10432 queues_left = 0;
Helin Zhangacd65442015-10-26 19:44:28 -040010433 pf->alloc_rss_size = pf->num_lan_qps = 1;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010434
10435 /* make sure all the fancies are disabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -080010436 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -060010437 I40E_FLAG_IWARP_ENABLED |
Vasu Dev38e00432014-08-01 13:27:03 -070010438#ifdef I40E_FCOE
10439 I40E_FLAG_FCOE_ENABLED |
10440#endif
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -080010441 I40E_FLAG_FD_SB_ENABLED |
10442 I40E_FLAG_FD_ATR_ENABLED |
Neerav Parikh4d9b6042014-05-22 06:31:51 +000010443 I40E_FLAG_DCB_CAPABLE |
Dave Ertmana0362442016-08-29 17:38:26 -070010444 I40E_FLAG_DCB_ENABLED |
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -080010445 I40E_FLAG_SRIOV_ENABLED |
10446 I40E_FLAG_VMDQ_ENABLED);
Frank Zhang9aa7e932014-05-20 08:01:42 +000010447 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
10448 I40E_FLAG_FD_SB_ENABLED |
Anjali Singhai Jainbbe7d0e2014-05-20 08:01:44 +000010449 I40E_FLAG_FD_ATR_ENABLED |
Neerav Parikh4d9b6042014-05-22 06:31:51 +000010450 I40E_FLAG_DCB_CAPABLE))) {
Frank Zhang9aa7e932014-05-20 08:01:42 +000010451 /* one qp for PF */
Helin Zhangacd65442015-10-26 19:44:28 -040010452 pf->alloc_rss_size = pf->num_lan_qps = 1;
Frank Zhang9aa7e932014-05-20 08:01:42 +000010453 queues_left -= pf->num_lan_qps;
10454
10455 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -060010456 I40E_FLAG_IWARP_ENABLED |
Vasu Dev38e00432014-08-01 13:27:03 -070010457#ifdef I40E_FCOE
10458 I40E_FLAG_FCOE_ENABLED |
10459#endif
Frank Zhang9aa7e932014-05-20 08:01:42 +000010460 I40E_FLAG_FD_SB_ENABLED |
10461 I40E_FLAG_FD_ATR_ENABLED |
10462 I40E_FLAG_DCB_ENABLED |
10463 I40E_FLAG_VMDQ_ENABLED);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010464 } else {
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010465 /* Not enough queues for all TCs */
Neerav Parikh4d9b6042014-05-22 06:31:51 +000010466 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010467 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
Dave Ertmana0362442016-08-29 17:38:26 -070010468 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
10469 I40E_FLAG_DCB_ENABLED);
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010470 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10471 }
Anjali Singhai Jain9a3bd2f2015-02-24 06:58:44 +000010472 pf->num_lan_qps = max_t(int, pf->rss_size_max,
10473 num_online_cpus());
10474 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
10475 pf->hw.func_caps.num_tx_qp);
10476
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010477 queues_left -= pf->num_lan_qps;
10478 }
10479
Vasu Dev38e00432014-08-01 13:27:03 -070010480#ifdef I40E_FCOE
10481 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
10482 if (I40E_DEFAULT_FCOE <= queues_left) {
10483 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
10484 } else if (I40E_MINIMUM_FCOE <= queues_left) {
10485 pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
10486 } else {
10487 pf->num_fcoe_qps = 0;
10488 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
10489 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
10490 }
10491
10492 queues_left -= pf->num_fcoe_qps;
10493 }
10494
10495#endif
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010496 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10497 if (queues_left > 1) {
10498 queues_left -= 1; /* save 1 queue for FD */
10499 } else {
10500 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10501 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
10502 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010503 }
10504
10505 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10506 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
Anjali Singhai Jaincbf61322014-01-17 15:36:35 -080010507 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
10508 (queues_left / pf->num_vf_qps));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010509 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
10510 }
10511
10512 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10513 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
10514 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
10515 (queues_left / pf->num_vmdq_qps));
10516 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
10517 }
10518
Anjali Singhai Jainf8ff1462013-11-26 10:49:19 +000010519 pf->queues_left = queues_left;
Neerav Parikh8279e492015-09-03 17:18:50 -040010520 dev_dbg(&pf->pdev->dev,
10521 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
10522 pf->hw.func_caps.num_tx_qp,
10523 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
Helin Zhangacd65442015-10-26 19:44:28 -040010524 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
10525 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
10526 queues_left);
Vasu Dev38e00432014-08-01 13:27:03 -070010527#ifdef I40E_FCOE
Neerav Parikh8279e492015-09-03 17:18:50 -040010528 dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
Vasu Dev38e00432014-08-01 13:27:03 -070010529#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010530}
10531
10532/**
10533 * i40e_setup_pf_filter_control - Setup PF static filter control
10534 * @pf: PF to be setup
10535 *
Jeff Kirsherb40c82e62015-02-27 09:18:34 +000010536 * i40e_setup_pf_filter_control sets up a PF's initial filter control
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010537 * settings. If PE/FCoE are enabled then it will also set the per PF
10538 * based filter sizes required for them. It also enables Flow director,
10539 * ethertype and macvlan type filter settings for the pf.
10540 *
10541 * Returns 0 on success, negative on failure
10542 **/
10543static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
10544{
10545 struct i40e_filter_control_settings *settings = &pf->filter_settings;
10546
10547 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
10548
10549 /* Flow Director is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -080010550 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010551 settings->enable_fdir = true;
10552
10553 /* Ethtype and MACVLAN filters enabled for PF */
10554 settings->enable_ethtype = true;
10555 settings->enable_macvlan = true;
10556
10557 if (i40e_set_filter_control(&pf->hw, settings))
10558 return -ENOENT;
10559
10560 return 0;
10561}
10562
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010563#define INFO_STRING_LEN 255
Shannon Nelson7fd89542015-10-21 19:47:04 -040010564#define REMAIN(__x) (INFO_STRING_LEN - (__x))
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010565static void i40e_print_features(struct i40e_pf *pf)
10566{
10567 struct i40e_hw *hw = &pf->hw;
Joe Perches3b195842015-12-03 04:20:57 -080010568 char *buf;
10569 int i;
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010570
Joe Perches3b195842015-12-03 04:20:57 -080010571 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
10572 if (!buf)
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010573 return;
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010574
Joe Perches3b195842015-12-03 04:20:57 -080010575 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010576#ifdef CONFIG_PCI_IOV
Joe Perches3b195842015-12-03 04:20:57 -080010577 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010578#endif
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -070010579 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
Shannon Nelson7fd89542015-10-21 19:47:04 -040010580 pf->hw.func_caps.num_vsis,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -070010581 pf->vsi[pf->lan_vsi]->num_queue_pairs);
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010582 if (pf->flags & I40E_FLAG_RSS_ENABLED)
Joe Perches3b195842015-12-03 04:20:57 -080010583 i += snprintf(&buf[i], REMAIN(i), " RSS");
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010584 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
Joe Perches3b195842015-12-03 04:20:57 -080010585 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
Akeem G Abodunrinc6423ff2014-05-10 04:49:08 +000010586 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
Joe Perches3b195842015-12-03 04:20:57 -080010587 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
10588 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
Akeem G Abodunrinc6423ff2014-05-10 04:49:08 +000010589 }
Neerav Parikh4d9b6042014-05-22 06:31:51 +000010590 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
Joe Perches3b195842015-12-03 04:20:57 -080010591 i += snprintf(&buf[i], REMAIN(i), " DCB");
Joe Perches3b195842015-12-03 04:20:57 -080010592 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
Singhai, Anjali6a899022015-12-14 12:21:18 -080010593 i += snprintf(&buf[i], REMAIN(i), " Geneve");
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010594 if (pf->flags & I40E_FLAG_PTP)
Joe Perches3b195842015-12-03 04:20:57 -080010595 i += snprintf(&buf[i], REMAIN(i), " PTP");
Vasu Dev38e00432014-08-01 13:27:03 -070010596#ifdef I40E_FCOE
10597 if (pf->flags & I40E_FLAG_FCOE_ENABLED)
Joe Perches3b195842015-12-03 04:20:57 -080010598 i += snprintf(&buf[i], REMAIN(i), " FCOE");
Vasu Dev38e00432014-08-01 13:27:03 -070010599#endif
Shannon Nelson6dec1012015-09-28 14:12:30 -040010600 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
Joe Perches3b195842015-12-03 04:20:57 -080010601 i += snprintf(&buf[i], REMAIN(i), " VEB");
Shannon Nelson6dec1012015-09-28 14:12:30 -040010602 else
Joe Perches3b195842015-12-03 04:20:57 -080010603 i += snprintf(&buf[i], REMAIN(i), " VEPA");
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010604
Joe Perches3b195842015-12-03 04:20:57 -080010605 dev_info(&pf->pdev->dev, "%s\n", buf);
10606 kfree(buf);
Shannon Nelson7fd89542015-10-21 19:47:04 -040010607 WARN_ON(i > INFO_STRING_LEN);
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000010608}
10609
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010610/**
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050010611 * i40e_get_platform_mac_addr - get platform-specific MAC address
10612 *
10613 * @pdev: PCI device information struct
10614 * @pf: board private structure
10615 *
10616 * Look up the MAC address in Open Firmware on systems that support it,
10617 * and use IDPROM on SPARC if no OF address is found. On return, the
10618 * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
10619 * has been selected.
10620 **/
10621static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
10622{
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050010623 pf->flags &= ~I40E_FLAG_PF_MAC;
Sowmini Varadhanba942722016-01-12 19:32:31 -080010624 if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050010625 pf->flags |= I40E_FLAG_PF_MAC;
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050010626}
10627
10628/**
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010629 * i40e_probe - Device initialization routine
10630 * @pdev: PCI device information struct
10631 * @ent: entry in i40e_pci_tbl
10632 *
Jeff Kirsherb40c82e62015-02-27 09:18:34 +000010633 * i40e_probe initializes a PF identified by a pci_dev structure.
10634 * The OS initialization, configuring of the PF private structure,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010635 * and a hardware reset occur.
10636 *
10637 * Returns 0 on success, negative on failure
10638 **/
10639static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10640{
Catherine Sullivane8278452015-02-06 08:52:08 +000010641 struct i40e_aq_get_phy_abilities_resp abilities;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010642 struct i40e_pf *pf;
10643 struct i40e_hw *hw;
Anjali Singhai Jain93cd7652013-11-20 10:03:01 +000010644 static u16 pfs_found;
Shannon Nelson1d5109d2015-08-26 15:14:08 -040010645 u16 wol_nvm_bits;
Catherine Sullivand4dfb812013-11-28 06:39:21 +000010646 u16 link_status;
Jean Sacren6f66a482015-09-19 05:08:45 -060010647 int err;
Anjali Singhai Jain4f2f017c2015-10-21 19:47:07 -040010648 u32 val;
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +000010649 u32 i;
Helin Zhang58fc3262015-10-01 14:37:38 -040010650 u8 set_fc_aq_fail;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010651
10652 err = pci_enable_device_mem(pdev);
10653 if (err)
10654 return err;
10655
10656 /* set up for high or low dma */
Mitch Williams64942942014-02-11 08:26:33 +000010657 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Mitch Williams64942942014-02-11 08:26:33 +000010658 if (err) {
Jean Sacrene3e3bfd2014-03-25 04:30:27 +000010659 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10660 if (err) {
10661 dev_err(&pdev->dev,
10662 "DMA configuration failed: 0x%x\n", err);
10663 goto err_dma;
10664 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010665 }
10666
10667 /* set up pci connections */
Johannes Thumshirn56d766d2016-06-07 09:44:05 +020010668 err = pci_request_mem_regions(pdev, i40e_driver_name);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010669 if (err) {
10670 dev_info(&pdev->dev,
10671 "pci_request_selected_regions failed %d\n", err);
10672 goto err_pci_reg;
10673 }
10674
10675 pci_enable_pcie_error_reporting(pdev);
10676 pci_set_master(pdev);
10677
10678 /* Now that we have a PCI connection, we need to do the
10679 * low level device setup. This is primarily setting up
10680 * the Admin Queue structures and then querying for the
10681 * device's current profile information.
10682 */
10683 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
10684 if (!pf) {
10685 err = -ENOMEM;
10686 goto err_pf_alloc;
10687 }
10688 pf->next_vsi = 0;
10689 pf->pdev = pdev;
10690 set_bit(__I40E_DOWN, &pf->state);
10691
10692 hw = &pf->hw;
10693 hw->back = pf;
Anjali Singhai232f4702015-02-26 16:15:39 +000010694
Shannon Nelson2ac8b672015-07-23 16:54:37 -040010695 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10696 I40E_MAX_CSR_SPACE);
Anjali Singhai232f4702015-02-26 16:15:39 +000010697
Shannon Nelson2ac8b672015-07-23 16:54:37 -040010698 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010699 if (!hw->hw_addr) {
10700 err = -EIO;
10701 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10702 (unsigned int)pci_resource_start(pdev, 0),
Shannon Nelson2ac8b672015-07-23 16:54:37 -040010703 pf->ioremap_len, err);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010704 goto err_ioremap;
10705 }
10706 hw->vendor_id = pdev->vendor;
10707 hw->device_id = pdev->device;
10708 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10709 hw->subsystem_vendor_id = pdev->subsystem_vendor;
10710 hw->subsystem_device_id = pdev->subsystem_device;
10711 hw->bus.device = PCI_SLOT(pdev->devfn);
10712 hw->bus.func = PCI_FUNC(pdev->devfn);
Anjali Singhai Jain93cd7652013-11-20 10:03:01 +000010713 pf->instance = pfs_found;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010714
Shannon Nelsonde03d2b2016-03-10 14:59:44 -080010715 /* set up the locks for the AQ, do this only once in probe
10716 * and destroy them only once in remove
10717 */
10718 mutex_init(&hw->aq.asq_mutex);
10719 mutex_init(&hw->aq.arq_mutex);
10720
Alexander Duyck5d4ca232016-09-30 08:21:46 -040010721 pf->msg_enable = netif_msg_init(debug,
10722 NETIF_MSG_DRV |
10723 NETIF_MSG_PROBE |
10724 NETIF_MSG_LINK);
10725 if (debug < -1)
10726 pf->hw.debug_mask = debug;
Shannon Nelson5b5faa42014-10-17 03:14:51 +000010727
Jesse Brandeburg7134f9c2013-11-26 08:56:05 +000010728 /* do a special CORER for clearing PXE mode once at init */
10729 if (hw->revision_id == 0 &&
10730 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10731 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10732 i40e_flush(hw);
10733 msleep(200);
10734 pf->corer_count++;
10735
10736 i40e_clear_pxe_mode(hw);
10737 }
10738
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010739 /* Reset here to make sure all is clean and to define PF 'n' */
Shannon Nelson838d41d2014-06-04 20:41:27 +000010740 i40e_clear_hw(hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010741 err = i40e_pf_reset(hw);
10742 if (err) {
10743 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10744 goto err_pf_reset;
10745 }
10746 pf->pfr_count++;
10747
10748 hw->aq.num_arq_entries = I40E_AQ_LEN;
10749 hw->aq.num_asq_entries = I40E_AQ_LEN;
10750 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10751 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10752 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
Carolyn Wybornyb2008cb2014-11-11 20:05:26 +000010753
Carolyn Wybornyb294ac72014-12-11 07:06:39 +000010754 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
Carolyn Wybornyb2008cb2014-11-11 20:05:26 +000010755 "%s-%s:misc",
10756 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010757
10758 err = i40e_init_shared_code(hw);
10759 if (err) {
Anjali Singhai Jainb2a75c52015-04-27 14:57:20 -040010760 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10761 err);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010762 goto err_pf_reset;
10763 }
10764
Jesse Brandeburgd52c20b2013-11-26 10:49:15 +000010765 /* set up a default setting for link flow control */
10766 pf->hw.fc.requested_mode = I40E_FC_NONE;
10767
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010768 err = i40e_init_adminq(hw);
Carolyn Wyborny2b2426a762015-10-26 19:44:35 -040010769 if (err) {
10770 if (err == I40E_ERR_FIRMWARE_API_VERSION)
10771 dev_info(&pdev->dev,
10772 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10773 else
10774 dev_info(&pdev->dev,
10775 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
10776
10777 goto err_pf_reset;
10778 }
Carolyn Wybornyf0b44442015-08-31 19:54:49 -040010779
Shannon Nelson6dec1012015-09-28 14:12:30 -040010780 /* provide nvm, fw, api versions */
10781 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
10782 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
10783 hw->aq.api_maj_ver, hw->aq.api_min_ver,
10784 i40e_nvm_version_str(hw));
Carolyn Wybornyf0b44442015-08-31 19:54:49 -040010785
Catherine Sullivan7aa67612014-07-09 07:46:17 +000010786 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10787 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
Shannon Nelson278b6f62014-06-04 01:41:03 +000010788 dev_info(&pdev->dev,
Catherine Sullivan7aa67612014-07-09 07:46:17 +000010789 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10790 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10791 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
Shannon Nelson278b6f62014-06-04 01:41:03 +000010792 dev_info(&pdev->dev,
Catherine Sullivan7aa67612014-07-09 07:46:17 +000010793 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
Shannon Nelson278b6f62014-06-04 01:41:03 +000010794
Shannon Nelson4eb3f762014-03-06 08:59:58 +000010795 i40e_verify_eeprom(pf);
10796
Jesse Brandeburg2c5fe332014-04-23 04:49:57 +000010797 /* Rev 0 hardware was never productized */
10798 if (hw->revision_id < 1)
10799 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10800
Shannon Nelson6ff4ef82013-12-21 05:44:49 +000010801 i40e_clear_pxe_mode(hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010802 err = i40e_get_capabilities(pf);
10803 if (err)
10804 goto err_adminq_setup;
10805
10806 err = i40e_sw_init(pf);
10807 if (err) {
10808 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10809 goto err_sw_init;
10810 }
10811
10812 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10813 hw->func_caps.num_rx_qp,
10814 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10815 if (err) {
10816 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10817 goto err_init_lan_hmc;
10818 }
10819
10820 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10821 if (err) {
10822 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10823 err = -ENOENT;
10824 goto err_configure_lan_hmc;
10825 }
10826
Neerav Parikhb686ece2014-12-14 01:55:11 +000010827 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
10828 * Ignore error return codes because if it was already disabled via
10829 * hardware settings this will fail
10830 */
Neerav Parikhf1bbad32016-01-13 16:51:39 -080010831 if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
Neerav Parikhb686ece2014-12-14 01:55:11 +000010832 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10833 i40e_aq_stop_lldp(hw, true, NULL);
10834 }
10835
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010836 i40e_get_mac_addr(hw, hw->mac.addr);
Sowmini Varadhanb499ffb2015-12-07 15:06:34 -050010837 /* allow a platform config to override the HW addr */
10838 i40e_get_platform_mac_addr(pdev, pf);
Jesse Brandeburgf62b5062013-11-28 06:39:27 +000010839 if (!is_valid_ether_addr(hw->mac.addr)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010840 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10841 err = -EIO;
10842 goto err_mac_addr;
10843 }
10844 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
Greg Rose9a173902014-05-22 06:32:02 +000010845 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
Neerav Parikh1f224ad2014-02-12 01:45:31 +000010846 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10847 if (is_valid_ether_addr(hw->mac.port_addr))
10848 pf->flags |= I40E_FLAG_PORT_ID_VALID;
Vasu Dev38e00432014-08-01 13:27:03 -070010849#ifdef I40E_FCOE
10850 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10851 if (err)
10852 dev_info(&pdev->dev,
10853 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10854 if (!is_valid_ether_addr(hw->mac.san_addr)) {
10855 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10856 hw->mac.san_addr);
10857 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10858 }
10859 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10860#endif /* I40E_FCOE */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010861
10862 pci_set_drvdata(pdev, pf);
10863 pci_save_state(pdev);
Neerav Parikh4e3b35b2014-01-17 15:36:37 -080010864#ifdef CONFIG_I40E_DCB
10865 err = i40e_init_pf_dcb(pf);
10866 if (err) {
Shannon Nelsonaebfc812014-12-11 07:06:38 +000010867 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
David Ertmanc17ef432016-09-30 01:36:21 -070010868 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
Neerav Parikh014269f2014-04-01 07:11:48 +000010869 /* Continue without DCB enabled */
Neerav Parikh4e3b35b2014-01-17 15:36:37 -080010870 }
10871#endif /* CONFIG_I40E_DCB */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010872
10873 /* set up periodic task facility */
10874 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10875 pf->service_timer_period = HZ;
10876
10877 INIT_WORK(&pf->service_task, i40e_service_task);
10878 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10879 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010880
Shannon Nelson1d5109d2015-08-26 15:14:08 -040010881 /* NVM bit on means WoL disabled for the port */
10882 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
Jesse Brandeburg75f5cea2015-11-19 11:34:14 -080010883 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
Shannon Nelson1d5109d2015-08-26 15:14:08 -040010884 pf->wol_en = false;
10885 else
10886 pf->wol_en = true;
Shannon Nelson8e2773a2013-11-28 06:39:22 +000010887 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10888
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010889 /* set up the main switch operations */
10890 i40e_determine_queue_usage(pf);
Jesse Brandeburgc11472802015-04-07 19:45:39 -040010891 err = i40e_init_interrupt_scheme(pf);
10892 if (err)
10893 goto err_switch_setup;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010894
Mitch Williams505682c2014-05-20 08:01:37 +000010895 /* The number of VSIs reported by the FW is the minimum guaranteed
10896 * to us; HW supports far more and we share the remaining pool with
10897 * the other PFs. We allocate space for more than the guarantee with
10898 * the understanding that we might not get them all later.
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010899 */
Mitch Williams505682c2014-05-20 08:01:37 +000010900 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10901 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10902 else
10903 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10904
10905 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
Jesse Brandeburgd17038d2015-12-23 12:05:55 -080010906 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
10907 GFP_KERNEL);
Wei Yongjuned87ac02013-09-24 05:17:25 +000010908 if (!pf->vsi) {
10909 err = -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010910 goto err_switch_setup;
Wei Yongjuned87ac02013-09-24 05:17:25 +000010911 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010912
Anjali Singhai Jainfa11cb32015-05-27 12:06:14 -040010913#ifdef CONFIG_PCI_IOV
10914 /* prep for VF support */
10915 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10916 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10917 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10918 if (pci_num_vf(pdev))
10919 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10920 }
10921#endif
Anjali Singhai Jainbc7d3382013-11-26 10:49:18 +000010922 err = i40e_setup_pf_switch(pf, false);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010923 if (err) {
10924 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10925 goto err_vsis;
10926 }
Helin Zhang58fc3262015-10-01 14:37:38 -040010927
10928 /* Make sure flow control is set according to current settings */
10929 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
10930 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
10931 dev_dbg(&pf->pdev->dev,
10932 "Set fc with err %s aq_err %s on get_phy_cap\n",
10933 i40e_stat_str(hw, err),
10934 i40e_aq_str(hw, hw->aq.asq_last_status));
10935 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
10936 dev_dbg(&pf->pdev->dev,
10937 "Set fc with err %s aq_err %s on set_phy_config\n",
10938 i40e_stat_str(hw, err),
10939 i40e_aq_str(hw, hw->aq.asq_last_status));
10940 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
10941 dev_dbg(&pf->pdev->dev,
10942 "Set fc with err %s aq_err %s on get_link_info\n",
10943 i40e_stat_str(hw, err),
10944 i40e_aq_str(hw, hw->aq.asq_last_status));
10945
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +000010946 /* if FDIR VSI was set up, start it now */
Mitch Williams505682c2014-05-20 08:01:37 +000010947 for (i = 0; i < pf->num_alloc_vsi; i++) {
Shannon Nelson8a9eb7d2014-03-14 07:32:28 +000010948 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
10949 i40e_vsi_open(pf->vsi[i]);
10950 break;
10951 }
10952 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010953
Shannon Nelson2f0aff42016-01-04 10:33:08 -080010954 /* The driver only wants link up/down and module qualification
10955 * reports from firmware. Note the negative logic.
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +000010956 */
10957 err = i40e_aq_set_phy_int_mask(&pf->hw,
Shannon Nelson2f0aff42016-01-04 10:33:08 -080010958 ~(I40E_AQ_EVENT_LINK_UPDOWN |
Shannon Nelson867a79e2016-03-18 12:18:15 -070010959 I40E_AQ_EVENT_MEDIA_NA |
Shannon Nelson2f0aff42016-01-04 10:33:08 -080010960 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +000010961 if (err)
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010962 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10963 i40e_stat_str(&pf->hw, err),
10964 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Jesse Brandeburg7e2453f2014-09-13 07:40:41 +000010965
Anjali Singhai Jain4f2f017c2015-10-21 19:47:07 -040010966 /* Reconfigure hardware for allowing smaller MSS in the case
10967 * of TSO, so that we avoid the MDD being fired and causing
10968 * a reset in the case of small MSS+TSO.
10969 */
10970 val = rd32(hw, I40E_REG_MSS);
10971 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10972 val &= ~I40E_REG_MSS_MIN_MASK;
10973 val |= I40E_64BYTE_MSS;
10974 wr32(hw, I40E_REG_MSS, val);
10975 }
10976
Anjali Singhai Jain8eed76f2015-12-09 15:50:31 -080010977 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
Anjali Singhai Jain025b4a52015-02-24 06:58:46 +000010978 msleep(75);
10979 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10980 if (err)
Shannon Nelsonf1c7e722015-06-04 16:24:01 -040010981 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10982 i40e_stat_str(&pf->hw, err),
10983 i40e_aq_str(&pf->hw,
10984 pf->hw.aq.asq_last_status));
Anjali Singhai Jaincafa2ee2014-09-13 07:40:45 +000010985 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000010986 /* The main driver is (mostly) up and happy. We need to set this state
10987 * before setting up the misc vector or we get a race and the vector
10988 * ends up disabled forever.
10989 */
10990 clear_bit(__I40E_DOWN, &pf->state);
10991
10992 /* In case of MSIX we are going to setup the misc vector right here
10993 * to handle admin queue events etc. In case of legacy and MSI
10994 * the misc functionality and queue processing is combined in
10995 * the same vector and that gets setup at open.
10996 */
10997 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10998 err = i40e_setup_misc_vector(pf);
10999 if (err) {
11000 dev_info(&pdev->dev,
11001 "setup of misc vector failed: %d\n", err);
11002 goto err_vsis;
11003 }
11004 }
11005
Greg Rosedf805f62014-04-04 04:43:16 +000011006#ifdef CONFIG_PCI_IOV
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011007 /* prep for VF support */
11008 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
Shannon Nelson4eb3f762014-03-06 08:59:58 +000011009 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11010 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011011 /* disable link interrupts for VFs */
11012 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11013 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11014 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11015 i40e_flush(hw);
Mitch Williams4aeec012014-02-13 03:48:47 -080011016
11017 if (pci_num_vf(pdev)) {
11018 dev_info(&pdev->dev,
11019 "Active VFs found, allocating resources.\n");
11020 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11021 if (err)
11022 dev_info(&pdev->dev,
11023 "Error %d allocating resources for existing VFs\n",
11024 err);
11025 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011026 }
Greg Rosedf805f62014-04-04 04:43:16 +000011027#endif /* CONFIG_PCI_IOV */
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011028
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -060011029 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11030 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11031 pf->num_iwarp_msix,
11032 I40E_IWARP_IRQ_PILE_ID);
11033 if (pf->iwarp_base_vector < 0) {
11034 dev_info(&pdev->dev,
11035 "failed to get tracking for %d vectors for IWARP err=%d\n",
11036 pf->num_iwarp_msix, pf->iwarp_base_vector);
11037 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11038 }
11039 }
Anjali Singhai Jain93cd7652013-11-20 10:03:01 +000011040
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011041 i40e_dbg_pf_init(pf);
11042
11043 /* tell the firmware that we're starting */
Jesse Brandeburg44033fa2014-04-23 04:50:15 +000011044 i40e_send_version(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011045
11046 /* since everything's happy, start the service_task timer */
11047 mod_timer(&pf->service_timer,
11048 round_jiffies(jiffies + pf->service_timer_period));
11049
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -060011050 /* add this PF to client device list and launch a client service task */
11051 err = i40e_lan_add_device(pf);
11052 if (err)
11053 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11054 err);
11055
Vasu Dev38e00432014-08-01 13:27:03 -070011056#ifdef I40E_FCOE
11057 /* create FCoE interface */
11058 i40e_fcoe_vsi_setup(pf);
11059
11060#endif
Anjali Singhai Jain3fced532015-09-03 17:18:59 -040011061#define PCI_SPEED_SIZE 8
11062#define PCI_WIDTH_SIZE 8
11063 /* Devices on the IOSF bus do not have this information
11064 * and will report PCI Gen 1 x 1 by default so don't bother
11065 * checking them.
11066 */
11067 if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
11068 char speed[PCI_SPEED_SIZE] = "Unknown";
11069 char width[PCI_WIDTH_SIZE] = "Unknown";
Catherine Sullivand4dfb812013-11-28 06:39:21 +000011070
Anjali Singhai Jain3fced532015-09-03 17:18:59 -040011071 /* Get the negotiated link width and speed from PCI config
11072 * space
11073 */
11074 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11075 &link_status);
Catherine Sullivand4dfb812013-11-28 06:39:21 +000011076
Anjali Singhai Jain3fced532015-09-03 17:18:59 -040011077 i40e_set_pci_config_data(hw, link_status);
Catherine Sullivand4dfb812013-11-28 06:39:21 +000011078
Anjali Singhai Jain3fced532015-09-03 17:18:59 -040011079 switch (hw->bus.speed) {
11080 case i40e_bus_speed_8000:
11081 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11082 case i40e_bus_speed_5000:
11083 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11084 case i40e_bus_speed_2500:
11085 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11086 default:
11087 break;
11088 }
11089 switch (hw->bus.width) {
11090 case i40e_bus_width_pcie_x8:
11091 strncpy(width, "8", PCI_WIDTH_SIZE); break;
11092 case i40e_bus_width_pcie_x4:
11093 strncpy(width, "4", PCI_WIDTH_SIZE); break;
11094 case i40e_bus_width_pcie_x2:
11095 strncpy(width, "2", PCI_WIDTH_SIZE); break;
11096 case i40e_bus_width_pcie_x1:
11097 strncpy(width, "1", PCI_WIDTH_SIZE); break;
11098 default:
11099 break;
11100 }
11101
11102 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11103 speed, width);
11104
11105 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11106 hw->bus.speed < i40e_bus_speed_8000) {
11107 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11108 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11109 }
Catherine Sullivand4dfb812013-11-28 06:39:21 +000011110 }
11111
Catherine Sullivane8278452015-02-06 08:52:08 +000011112 /* get the requested speeds from the fw */
11113 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11114 if (err)
Neerav Parikh8279e492015-09-03 17:18:50 -040011115 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
11116 i40e_stat_str(&pf->hw, err),
11117 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
Catherine Sullivane8278452015-02-06 08:52:08 +000011118 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11119
Catherine Sullivanfc72dbc2015-09-01 11:36:30 -040011120 /* get the supported phy types from the fw */
11121 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11122 if (err)
11123 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
11124 i40e_stat_str(&pf->hw, err),
11125 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11126 pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
11127
Anjali Singhai Jaine7358f52015-10-01 14:37:34 -040011128 /* Add a filter to drop all Flow control frames from any VSI from being
11129 * transmitted. By doing so we stop a malicious VF from sending out
11130 * PAUSE or PFC frames and potentially controlling traffic for other
11131 * PF/VF VSIs.
11132 * The FW can still send Flow control frames if enabled.
11133 */
11134 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11135 pf->main_vsi_seid);
11136
Carolyn Wyborny31b606d2016-02-17 16:12:12 -080011137 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11138 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11139 pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
11140
Jesse Brandeburg0c22b3d2014-02-11 08:24:14 +000011141 /* print a string summarizing features */
11142 i40e_print_features(pf);
11143
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011144 return 0;
11145
11146 /* Unwind what we've done if something failed in the setup */
11147err_vsis:
11148 set_bit(__I40E_DOWN, &pf->state);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011149 i40e_clear_interrupt_scheme(pf);
11150 kfree(pf->vsi);
Shannon Nelson04b03012013-11-28 06:39:34 +000011151err_switch_setup:
11152 i40e_reset_interrupt_capability(pf);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011153 del_timer_sync(&pf->service_timer);
11154err_mac_addr:
11155err_configure_lan_hmc:
11156 (void)i40e_shutdown_lan_hmc(hw);
11157err_init_lan_hmc:
11158 kfree(pf->qp_pile);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011159err_sw_init:
11160err_adminq_setup:
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011161err_pf_reset:
11162 iounmap(hw->hw_addr);
11163err_ioremap:
11164 kfree(pf);
11165err_pf_alloc:
11166 pci_disable_pcie_error_reporting(pdev);
Johannes Thumshirn56d766d2016-06-07 09:44:05 +020011167 pci_release_mem_regions(pdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011168err_pci_reg:
11169err_dma:
11170 pci_disable_device(pdev);
11171 return err;
11172}
11173
11174/**
11175 * i40e_remove - Device removal routine
11176 * @pdev: PCI device information struct
11177 *
11178 * i40e_remove is called by the PCI subsystem to alert the driver
11179 * that is should release a PCI device. This could be caused by a
11180 * Hot-Plug event, or because the driver is going to be removed from
11181 * memory.
11182 **/
11183static void i40e_remove(struct pci_dev *pdev)
11184{
11185 struct i40e_pf *pf = pci_get_drvdata(pdev);
Carolyn Wybornybcab2db2015-09-28 14:16:55 -040011186 struct i40e_hw *hw = &pf->hw;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011187 i40e_status ret_code;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011188 int i;
11189
11190 i40e_dbg_pf_exit(pf);
11191
Jacob Kellerbeb0dff2014-01-11 05:43:19 +000011192 i40e_ptp_stop(pf);
11193
Carolyn Wybornybcab2db2015-09-28 14:16:55 -040011194 /* Disable RSS in hw */
Shannon Nelson272cdaf22016-02-17 16:12:21 -080011195 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11196 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
Carolyn Wybornybcab2db2015-09-28 14:16:55 -040011197
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011198 /* no more scheduling of any task */
Pandi Kumar Maharajana4618ec2016-02-18 09:19:25 -080011199 set_bit(__I40E_SUSPENDED, &pf->state);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011200 set_bit(__I40E_DOWN, &pf->state);
Shannon Nelsonc99abb42016-03-10 14:59:45 -080011201 if (pf->service_timer.data)
11202 del_timer_sync(&pf->service_timer);
11203 if (pf->service_task.func)
11204 cancel_work_sync(&pf->service_task);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011205
Mitch Williamseb2d80b2014-02-13 03:48:48 -080011206 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11207 i40e_free_vfs(pf);
11208 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11209 }
11210
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011211 i40e_fdir_teardown(pf);
11212
11213 /* If there is a switch structure or any orphans, remove them.
11214 * This will leave only the PF's VSI remaining.
11215 */
11216 for (i = 0; i < I40E_MAX_VEB; i++) {
11217 if (!pf->veb[i])
11218 continue;
11219
11220 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11221 pf->veb[i]->uplink_seid == 0)
11222 i40e_switch_branch_release(pf->veb[i]);
11223 }
11224
11225 /* Now we can shutdown the PF's VSI, just before we kill
11226 * adminq and hmc.
11227 */
11228 if (pf->vsi[pf->lan_vsi])
11229 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11230
Anjali Singhai Jaine3219ce2016-01-20 13:40:01 -060011231 /* remove attached clients */
11232 ret_code = i40e_lan_del_device(pf);
11233 if (ret_code) {
11234 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11235 ret_code);
11236 }
11237
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011238 /* shutdown and destroy the HMC */
Jesse Brandeburgf734dff2016-01-15 14:33:11 -080011239 if (hw->hmc.hmc_obj) {
11240 ret_code = i40e_shutdown_lan_hmc(hw);
Shannon Nelson60442de2014-04-23 04:50:13 +000011241 if (ret_code)
11242 dev_warn(&pdev->dev,
11243 "Failed to destroy the HMC resources: %d\n",
11244 ret_code);
11245 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011246
11247 /* shutdown the adminq */
Henry Tiemanac9c5c62016-09-06 18:05:11 -070011248 i40e_shutdown_adminq(hw);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011249
Jesse Brandeburg8ddb3322015-11-18 15:47:06 -080011250 /* destroy the locks only once, here */
11251 mutex_destroy(&hw->aq.arq_mutex);
11252 mutex_destroy(&hw->aq.asq_mutex);
11253
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011254 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11255 i40e_clear_interrupt_scheme(pf);
Mitch Williams505682c2014-05-20 08:01:37 +000011256 for (i = 0; i < pf->num_alloc_vsi; i++) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011257 if (pf->vsi[i]) {
11258 i40e_vsi_clear_rings(pf->vsi[i]);
11259 i40e_vsi_clear(pf->vsi[i]);
11260 pf->vsi[i] = NULL;
11261 }
11262 }
11263
11264 for (i = 0; i < I40E_MAX_VEB; i++) {
11265 kfree(pf->veb[i]);
11266 pf->veb[i] = NULL;
11267 }
11268
11269 kfree(pf->qp_pile);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011270 kfree(pf->vsi);
11271
Jesse Brandeburgf734dff2016-01-15 14:33:11 -080011272 iounmap(hw->hw_addr);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011273 kfree(pf);
Johannes Thumshirn56d766d2016-06-07 09:44:05 +020011274 pci_release_mem_regions(pdev);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011275
11276 pci_disable_pcie_error_reporting(pdev);
11277 pci_disable_device(pdev);
11278}
11279
11280/**
11281 * i40e_pci_error_detected - warning that something funky happened in PCI land
11282 * @pdev: PCI device information struct
11283 *
11284 * Called to warn that something happened and the error handling steps
11285 * are in progress. Allows the driver to quiesce things, be ready for
11286 * remediation.
11287 **/
11288static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11289 enum pci_channel_state error)
11290{
11291 struct i40e_pf *pf = pci_get_drvdata(pdev);
11292
11293 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11294
Guilherme G Piccoliedfc23ee2016-10-03 00:31:12 -070011295 if (!pf) {
11296 dev_info(&pdev->dev,
11297 "Cannot recover - error happened during device probe\n");
11298 return PCI_ERS_RESULT_DISCONNECT;
11299 }
11300
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011301 /* shutdown all operations */
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011302 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
11303 rtnl_lock();
11304 i40e_prep_for_reset(pf);
11305 rtnl_unlock();
11306 }
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011307
11308 /* Request a slot reset */
11309 return PCI_ERS_RESULT_NEED_RESET;
11310}
11311
11312/**
11313 * i40e_pci_error_slot_reset - a PCI slot reset just happened
11314 * @pdev: PCI device information struct
11315 *
11316 * Called to find if the driver can work with the device now that
11317 * the pci slot has been reset. If a basic connection seems good
11318 * (registers are readable and have sane content) then return a
11319 * happy little PCI_ERS_RESULT_xxx.
11320 **/
11321static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11322{
11323 struct i40e_pf *pf = pci_get_drvdata(pdev);
11324 pci_ers_result_t result;
11325 int err;
11326 u32 reg;
11327
Shannon Nelsonfb43201f2015-08-26 15:14:17 -040011328 dev_dbg(&pdev->dev, "%s\n", __func__);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011329 if (pci_enable_device_mem(pdev)) {
11330 dev_info(&pdev->dev,
11331 "Cannot re-enable PCI device after reset.\n");
11332 result = PCI_ERS_RESULT_DISCONNECT;
11333 } else {
11334 pci_set_master(pdev);
11335 pci_restore_state(pdev);
11336 pci_save_state(pdev);
11337 pci_wake_from_d3(pdev, false);
11338
11339 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11340 if (reg == 0)
11341 result = PCI_ERS_RESULT_RECOVERED;
11342 else
11343 result = PCI_ERS_RESULT_DISCONNECT;
11344 }
11345
11346 err = pci_cleanup_aer_uncorrect_error_status(pdev);
11347 if (err) {
11348 dev_info(&pdev->dev,
11349 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11350 err);
11351 /* non-fatal, continue */
11352 }
11353
11354 return result;
11355}
11356
11357/**
11358 * i40e_pci_error_resume - restart operations after PCI error recovery
11359 * @pdev: PCI device information struct
11360 *
11361 * Called to allow the driver to bring things back up after PCI error
11362 * and/or reset recovery has finished.
11363 **/
11364static void i40e_pci_error_resume(struct pci_dev *pdev)
11365{
11366 struct i40e_pf *pf = pci_get_drvdata(pdev);
11367
Shannon Nelsonfb43201f2015-08-26 15:14:17 -040011368 dev_dbg(&pdev->dev, "%s\n", __func__);
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011369 if (test_bit(__I40E_SUSPENDED, &pf->state))
11370 return;
11371
11372 rtnl_lock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011373 i40e_handle_reset_warning(pf);
Vasily Averin4c4935a2015-07-08 15:04:26 +030011374 rtnl_unlock();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011375}
11376
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011377/**
11378 * i40e_shutdown - PCI callback for shutting down
11379 * @pdev: PCI device information struct
11380 **/
11381static void i40e_shutdown(struct pci_dev *pdev)
11382{
11383 struct i40e_pf *pf = pci_get_drvdata(pdev);
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011384 struct i40e_hw *hw = &pf->hw;
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011385
11386 set_bit(__I40E_SUSPENDED, &pf->state);
11387 set_bit(__I40E_DOWN, &pf->state);
11388 rtnl_lock();
11389 i40e_prep_for_reset(pf);
11390 rtnl_unlock();
11391
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011392 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11393 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11394
Catherine Sullivan02b42492015-07-10 19:35:59 -040011395 del_timer_sync(&pf->service_timer);
11396 cancel_work_sync(&pf->service_task);
11397 i40e_fdir_teardown(pf);
11398
11399 rtnl_lock();
11400 i40e_prep_for_reset(pf);
11401 rtnl_unlock();
11402
11403 wr32(hw, I40E_PFPM_APM,
11404 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11405 wr32(hw, I40E_PFPM_WUFC,
11406 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11407
Shannon Nelsone1477582015-02-21 06:44:33 +000011408 i40e_clear_interrupt_scheme(pf);
11409
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011410 if (system_state == SYSTEM_POWER_OFF) {
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011411 pci_wake_from_d3(pdev, pf->wol_en);
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011412 pci_set_power_state(pdev, PCI_D3hot);
11413 }
11414}
11415
11416#ifdef CONFIG_PM
11417/**
11418 * i40e_suspend - PCI callback for moving to D3
11419 * @pdev: PCI device information struct
11420 **/
11421static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11422{
11423 struct i40e_pf *pf = pci_get_drvdata(pdev);
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011424 struct i40e_hw *hw = &pf->hw;
Greg Rose059ff692016-05-16 10:26:38 -070011425 int retval = 0;
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011426
11427 set_bit(__I40E_SUSPENDED, &pf->state);
11428 set_bit(__I40E_DOWN, &pf->state);
Mitch Williams3932dbf2015-03-31 00:45:04 -070011429
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011430 rtnl_lock();
11431 i40e_prep_for_reset(pf);
11432 rtnl_unlock();
11433
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011434 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11435 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11436
Greg Roseb33d3b72016-05-16 10:26:37 -070011437 i40e_stop_misc_vector(pf);
11438
Greg Rose059ff692016-05-16 10:26:38 -070011439 retval = pci_save_state(pdev);
11440 if (retval)
11441 return retval;
11442
Shannon Nelson8e2773a2013-11-28 06:39:22 +000011443 pci_wake_from_d3(pdev, pf->wol_en);
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011444 pci_set_power_state(pdev, PCI_D3hot);
11445
Greg Rose059ff692016-05-16 10:26:38 -070011446 return retval;
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011447}
11448
11449/**
11450 * i40e_resume - PCI callback for waking up from D3
11451 * @pdev: PCI device information struct
11452 **/
11453static int i40e_resume(struct pci_dev *pdev)
11454{
11455 struct i40e_pf *pf = pci_get_drvdata(pdev);
11456 u32 err;
11457
11458 pci_set_power_state(pdev, PCI_D0);
11459 pci_restore_state(pdev);
11460 /* pci_restore_state() clears dev->state_saves, so
11461 * call pci_save_state() again to restore it.
11462 */
11463 pci_save_state(pdev);
11464
11465 err = pci_enable_device_mem(pdev);
11466 if (err) {
Shannon Nelsonfb43201f2015-08-26 15:14:17 -040011467 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011468 return err;
11469 }
11470 pci_set_master(pdev);
11471
11472 /* no wakeup events while running */
11473 pci_wake_from_d3(pdev, false);
11474
11475 /* handling the reset will rebuild the device state */
11476 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
11477 clear_bit(__I40E_DOWN, &pf->state);
11478 rtnl_lock();
11479 i40e_reset_and_rebuild(pf, false);
11480 rtnl_unlock();
11481 }
11482
11483 return 0;
11484}
11485
11486#endif
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011487static const struct pci_error_handlers i40e_err_handler = {
11488 .error_detected = i40e_pci_error_detected,
11489 .slot_reset = i40e_pci_error_slot_reset,
11490 .resume = i40e_pci_error_resume,
11491};
11492
11493static struct pci_driver i40e_driver = {
11494 .name = i40e_driver_name,
11495 .id_table = i40e_pci_tbl,
11496 .probe = i40e_probe,
11497 .remove = i40e_remove,
Shannon Nelson9007bcc2013-11-26 10:49:23 +000011498#ifdef CONFIG_PM
11499 .suspend = i40e_suspend,
11500 .resume = i40e_resume,
11501#endif
11502 .shutdown = i40e_shutdown,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011503 .err_handler = &i40e_err_handler,
11504 .sriov_configure = i40e_pci_sriov_configure,
11505};
11506
11507/**
11508 * i40e_init_module - Driver registration routine
11509 *
11510 * i40e_init_module is the first routine called when the driver is
11511 * loaded. All it does is register with the PCI subsystem.
11512 **/
11513static int __init i40e_init_module(void)
11514{
11515 pr_info("%s: %s - version %s\n", i40e_driver_name,
11516 i40e_driver_string, i40e_driver_version_str);
11517 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
Greg Rose96664482015-02-06 08:52:13 +000011518
Jesse Brandeburg2803b162015-12-22 14:25:08 -080011519 /* we will see if single thread per module is enough for now,
11520 * it can't be any worse than using the system workqueue which
11521 * was already single threaded
11522 */
Jacob Keller6992a6c2016-08-04 11:37:01 -070011523 i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
11524 i40e_driver_name);
Jesse Brandeburg2803b162015-12-22 14:25:08 -080011525 if (!i40e_wq) {
11526 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
11527 return -ENOMEM;
11528 }
11529
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011530 i40e_dbg_init();
11531 return pci_register_driver(&i40e_driver);
11532}
11533module_init(i40e_init_module);
11534
11535/**
11536 * i40e_exit_module - Driver exit cleanup routine
11537 *
11538 * i40e_exit_module is called just before the driver is removed
11539 * from memory.
11540 **/
11541static void __exit i40e_exit_module(void)
11542{
11543 pci_unregister_driver(&i40e_driver);
Jesse Brandeburg2803b162015-12-22 14:25:08 -080011544 destroy_workqueue(i40e_wq);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011545 i40e_dbg_exit();
Jesse Brandeburg41c445f2013-09-11 08:39:46 +000011546}
11547module_exit(i40e_exit_module);