blob: 221aa4795017649ccdd57accb01520c4bccb42e8 [file] [log] [blame]
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28/* Local includes */
29#include "i40e.h"
30
31const char i40e_driver_name[] = "i40e";
32static const char i40e_driver_string[] =
33 "Intel(R) Ethernet Connection XL710 Network Driver";
34
35#define DRV_KERN "-k"
36
37#define DRV_VERSION_MAJOR 0
38#define DRV_VERSION_MINOR 3
39#define DRV_VERSION_BUILD 9
40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
41 __stringify(DRV_VERSION_MINOR) "." \
42 __stringify(DRV_VERSION_BUILD) DRV_KERN
43const char i40e_driver_version_str[] = DRV_VERSION;
44static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
45
46/* a bit of forward declarations */
47static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
48static void i40e_handle_reset_warning(struct i40e_pf *pf);
49static int i40e_add_vsi(struct i40e_vsi *vsi);
50static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
51static int i40e_setup_pf_switch(struct i40e_pf *pf);
52static int i40e_setup_misc_vector(struct i40e_pf *pf);
53static void i40e_determine_queue_usage(struct i40e_pf *pf);
54static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
55
56/* i40e_pci_tbl - PCI Device ID Table
57 *
58 * Last entry must be all 0s
59 *
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
61 * Class, Class Mask, private data (not used) }
62 */
63static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
64 {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
65 {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
66 {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
67 {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
68 {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
69 {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
70 {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
71 {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
72 {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
73 {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
74 /* required last entry */
75 {0, }
76};
77MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
78
79#define I40E_MAX_VF_COUNT 128
80static int debug = -1;
81module_param(debug, int, 0);
82MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
83
84MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
85MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(DRV_VERSION);
88
89/**
90 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
91 * @hw: pointer to the HW structure
92 * @mem: ptr to mem struct to fill out
93 * @size: size of memory requested
94 * @alignment: what to align the allocation to
95 **/
96int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
97 u64 size, u32 alignment)
98{
99 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
100
101 mem->size = ALIGN(size, alignment);
102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
103 &mem->pa, GFP_KERNEL);
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000104 if (!mem->va)
105 return -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000106
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000107 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000108}
109
110/**
111 * i40e_free_dma_mem_d - OS specific memory free for shared code
112 * @hw: pointer to the HW structure
113 * @mem: ptr to mem struct to free
114 **/
115int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
116{
117 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
118
119 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
120 mem->va = NULL;
121 mem->pa = 0;
122 mem->size = 0;
123
124 return 0;
125}
126
127/**
128 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
129 * @hw: pointer to the HW structure
130 * @mem: ptr to mem struct to fill out
131 * @size: size of memory requested
132 **/
133int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
134 u32 size)
135{
136 mem->size = size;
137 mem->va = kzalloc(size, GFP_KERNEL);
138
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000139 if (!mem->va)
140 return -ENOMEM;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000141
Jesse Brandeburg93bc73b2013-09-13 08:23:18 +0000142 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000143}
144
145/**
146 * i40e_free_virt_mem_d - OS specific memory free for shared code
147 * @hw: pointer to the HW structure
148 * @mem: ptr to mem struct to free
149 **/
150int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
151{
152 /* it's ok to kfree a NULL pointer */
153 kfree(mem->va);
154 mem->va = NULL;
155 mem->size = 0;
156
157 return 0;
158}
159
160/**
161 * i40e_get_lump - find a lump of free generic resource
162 * @pf: board private structure
163 * @pile: the pile of resource to search
164 * @needed: the number of items needed
165 * @id: an owner id to stick on the items assigned
166 *
167 * Returns the base item index of the lump, or negative for error
168 *
169 * The search_hint trick and lack of advanced fit-finding only work
170 * because we're highly likely to have all the same size lump requests.
171 * Linear search time and any fragmentation should be minimal.
172 **/
173static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
174 u16 needed, u16 id)
175{
176 int ret = -ENOMEM;
Jesse Brandeburgddf434a2013-09-13 08:23:19 +0000177 int i, j;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000178
179 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
180 dev_info(&pf->pdev->dev,
181 "param err: pile=%p needed=%d id=0x%04x\n",
182 pile, needed, id);
183 return -EINVAL;
184 }
185
186 /* start the linear search with an imperfect hint */
187 i = pile->search_hint;
Jesse Brandeburgddf434a2013-09-13 08:23:19 +0000188 while (i < pile->num_entries) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000189 /* skip already allocated entries */
190 if (pile->list[i] & I40E_PILE_VALID_BIT) {
191 i++;
192 continue;
193 }
194
195 /* do we have enough in this lump? */
196 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
197 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
198 break;
199 }
200
201 if (j == needed) {
202 /* there was enough, so assign it to the requestor */
203 for (j = 0; j < needed; j++)
204 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
205 ret = i;
206 pile->search_hint = i + j;
Jesse Brandeburgddf434a2013-09-13 08:23:19 +0000207 break;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +0000208 } else {
209 /* not enough, so skip over it and continue looking */
210 i += j;
211 }
212 }
213
214 return ret;
215}
216
217/**
218 * i40e_put_lump - return a lump of generic resource
219 * @pile: the pile of resource to search
220 * @index: the base item index
221 * @id: the owner id of the items assigned
222 *
223 * Returns the count of items in the lump
224 **/
225static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
226{
227 int valid_id = (id | I40E_PILE_VALID_BIT);
228 int count = 0;
229 int i;
230
231 if (!pile || index >= pile->num_entries)
232 return -EINVAL;
233
234 for (i = index;
235 i < pile->num_entries && pile->list[i] == valid_id;
236 i++) {
237 pile->list[i] = 0;
238 count++;
239 }
240
241 if (count && index < pile->search_hint)
242 pile->search_hint = index;
243
244 return count;
245}
246
247/**
248 * i40e_service_event_schedule - Schedule the service task to wake up
249 * @pf: board private structure
250 *
251 * If not already scheduled, this puts the task into the work queue
252 **/
253static void i40e_service_event_schedule(struct i40e_pf *pf)
254{
255 if (!test_bit(__I40E_DOWN, &pf->state) &&
256 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
257 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
258 schedule_work(&pf->service_task);
259}
260
261/**
262 * i40e_tx_timeout - Respond to a Tx Hang
263 * @netdev: network interface device structure
264 *
265 * If any port has noticed a Tx timeout, it is likely that the whole
266 * device is munged, not just the one netdev port, so go for the full
267 * reset.
268 **/
269static void i40e_tx_timeout(struct net_device *netdev)
270{
271 struct i40e_netdev_priv *np = netdev_priv(netdev);
272 struct i40e_vsi *vsi = np->vsi;
273 struct i40e_pf *pf = vsi->back;
274
275 pf->tx_timeout_count++;
276
277 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
278 pf->tx_timeout_recovery_level = 0;
279 pf->tx_timeout_last_recovery = jiffies;
280 netdev_info(netdev, "tx_timeout recovery level %d\n",
281 pf->tx_timeout_recovery_level);
282
283 switch (pf->tx_timeout_recovery_level) {
284 case 0:
285 /* disable and re-enable queues for the VSI */
286 if (in_interrupt()) {
287 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
288 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
289 } else {
290 i40e_vsi_reinit_locked(vsi);
291 }
292 break;
293 case 1:
294 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
295 break;
296 case 2:
297 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
298 break;
299 case 3:
300 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
301 break;
302 default:
303 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
304 i40e_down(vsi);
305 break;
306 }
307 i40e_service_event_schedule(pf);
308 pf->tx_timeout_recovery_level++;
309}
310
311/**
312 * i40e_release_rx_desc - Store the new tail and head values
313 * @rx_ring: ring to bump
314 * @val: new head index
315 **/
316static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
317{
318 rx_ring->next_to_use = val;
319
320 /* Force memory writes to complete before letting h/w
321 * know there are new descriptors to fetch. (Only
322 * applicable for weak-ordered memory model archs,
323 * such as IA-64).
324 */
325 wmb();
326 writel(val, rx_ring->tail);
327}
328
329/**
330 * i40e_get_vsi_stats_struct - Get System Network Statistics
331 * @vsi: the VSI we care about
332 *
333 * Returns the address of the device statistics structure.
334 * The statistics are actually updated from the service task.
335 **/
336struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
337{
338 return &vsi->net_stats;
339}
340
341/**
342 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
343 * @netdev: network interface device structure
344 *
345 * Returns the address of the device statistics structure.
346 * The statistics are actually updated from the service task.
347 **/
348static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
349 struct net_device *netdev,
350 struct rtnl_link_stats64 *storage)
351{
352 struct i40e_netdev_priv *np = netdev_priv(netdev);
353 struct i40e_vsi *vsi = np->vsi;
354
355 *storage = *i40e_get_vsi_stats_struct(vsi);
356
357 return storage;
358}
359
360/**
361 * i40e_vsi_reset_stats - Resets all stats of the given vsi
362 * @vsi: the VSI to have its stats reset
363 **/
364void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
365{
366 struct rtnl_link_stats64 *ns;
367 int i;
368
369 if (!vsi)
370 return;
371
372 ns = i40e_get_vsi_stats_struct(vsi);
373 memset(ns, 0, sizeof(*ns));
374 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
375 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
376 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
377 if (vsi->rx_rings)
378 for (i = 0; i < vsi->num_queue_pairs; i++) {
379 memset(&vsi->rx_rings[i].rx_stats, 0 ,
380 sizeof(vsi->rx_rings[i].rx_stats));
381 memset(&vsi->tx_rings[i].tx_stats, 0,
382 sizeof(vsi->tx_rings[i].tx_stats));
383 }
384 vsi->stat_offsets_loaded = false;
385}
386
387/**
388 * i40e_pf_reset_stats - Reset all of the stats for the given pf
389 * @pf: the PF to be reset
390 **/
391void i40e_pf_reset_stats(struct i40e_pf *pf)
392{
393 memset(&pf->stats, 0, sizeof(pf->stats));
394 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
395 pf->stat_offsets_loaded = false;
396}
397
398/**
399 * i40e_stat_update48 - read and update a 48 bit stat from the chip
400 * @hw: ptr to the hardware info
401 * @hireg: the high 32 bit reg to read
402 * @loreg: the low 32 bit reg to read
403 * @offset_loaded: has the initial offset been loaded yet
404 * @offset: ptr to current offset value
405 * @stat: ptr to the stat
406 *
407 * Since the device stats are not reset at PFReset, they likely will not
408 * be zeroed when the driver starts. We'll save the first values read
409 * and use them as offsets to be subtracted from the raw values in order
410 * to report stats that count from zero. In the process, we also manage
411 * the potential roll-over.
412 **/
413static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
414 bool offset_loaded, u64 *offset, u64 *stat)
415{
416 u64 new_data;
417
418 if (hw->device_id == I40E_QEMU_DEVICE_ID) {
419 new_data = rd32(hw, loreg);
420 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
421 } else {
422 new_data = rd64(hw, loreg);
423 }
424 if (!offset_loaded)
425 *offset = new_data;
426 if (likely(new_data >= *offset))
427 *stat = new_data - *offset;
428 else
429 *stat = (new_data + ((u64)1 << 48)) - *offset;
430 *stat &= 0xFFFFFFFFFFFFULL;
431}
432
433/**
434 * i40e_stat_update32 - read and update a 32 bit stat from the chip
435 * @hw: ptr to the hardware info
436 * @reg: the hw reg to read
437 * @offset_loaded: has the initial offset been loaded yet
438 * @offset: ptr to current offset value
439 * @stat: ptr to the stat
440 **/
441static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
442 bool offset_loaded, u64 *offset, u64 *stat)
443{
444 u32 new_data;
445
446 new_data = rd32(hw, reg);
447 if (!offset_loaded)
448 *offset = new_data;
449 if (likely(new_data >= *offset))
450 *stat = (u32)(new_data - *offset);
451 else
452 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
453}
454
455/**
456 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
457 * @vsi: the VSI to be updated
458 **/
459void i40e_update_eth_stats(struct i40e_vsi *vsi)
460{
461 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
462 struct i40e_pf *pf = vsi->back;
463 struct i40e_hw *hw = &pf->hw;
464 struct i40e_eth_stats *oes;
465 struct i40e_eth_stats *es; /* device's eth stats */
466
467 es = &vsi->eth_stats;
468 oes = &vsi->eth_stats_offsets;
469
470 /* Gather up the stats that the hw collects */
471 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
472 vsi->stat_offsets_loaded,
473 &oes->tx_errors, &es->tx_errors);
474 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
475 vsi->stat_offsets_loaded,
476 &oes->rx_discards, &es->rx_discards);
477
478 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
479 I40E_GLV_GORCL(stat_idx),
480 vsi->stat_offsets_loaded,
481 &oes->rx_bytes, &es->rx_bytes);
482 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
483 I40E_GLV_UPRCL(stat_idx),
484 vsi->stat_offsets_loaded,
485 &oes->rx_unicast, &es->rx_unicast);
486 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
487 I40E_GLV_MPRCL(stat_idx),
488 vsi->stat_offsets_loaded,
489 &oes->rx_multicast, &es->rx_multicast);
490 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
491 I40E_GLV_BPRCL(stat_idx),
492 vsi->stat_offsets_loaded,
493 &oes->rx_broadcast, &es->rx_broadcast);
494
495 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
496 I40E_GLV_GOTCL(stat_idx),
497 vsi->stat_offsets_loaded,
498 &oes->tx_bytes, &es->tx_bytes);
499 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
500 I40E_GLV_UPTCL(stat_idx),
501 vsi->stat_offsets_loaded,
502 &oes->tx_unicast, &es->tx_unicast);
503 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
504 I40E_GLV_MPTCL(stat_idx),
505 vsi->stat_offsets_loaded,
506 &oes->tx_multicast, &es->tx_multicast);
507 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
508 I40E_GLV_BPTCL(stat_idx),
509 vsi->stat_offsets_loaded,
510 &oes->tx_broadcast, &es->tx_broadcast);
511 vsi->stat_offsets_loaded = true;
512}
513
514/**
515 * i40e_update_veb_stats - Update Switch component statistics
516 * @veb: the VEB being updated
517 **/
518static void i40e_update_veb_stats(struct i40e_veb *veb)
519{
520 struct i40e_pf *pf = veb->pf;
521 struct i40e_hw *hw = &pf->hw;
522 struct i40e_eth_stats *oes;
523 struct i40e_eth_stats *es; /* device's eth stats */
524 int idx = 0;
525
526 idx = veb->stats_idx;
527 es = &veb->stats;
528 oes = &veb->stats_offsets;
529
530 /* Gather up the stats that the hw collects */
531 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
532 veb->stat_offsets_loaded,
533 &oes->tx_discards, &es->tx_discards);
534 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
535 veb->stat_offsets_loaded,
536 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
537
538 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
539 veb->stat_offsets_loaded,
540 &oes->rx_bytes, &es->rx_bytes);
541 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
542 veb->stat_offsets_loaded,
543 &oes->rx_unicast, &es->rx_unicast);
544 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
545 veb->stat_offsets_loaded,
546 &oes->rx_multicast, &es->rx_multicast);
547 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
548 veb->stat_offsets_loaded,
549 &oes->rx_broadcast, &es->rx_broadcast);
550
551 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
552 veb->stat_offsets_loaded,
553 &oes->tx_bytes, &es->tx_bytes);
554 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
555 veb->stat_offsets_loaded,
556 &oes->tx_unicast, &es->tx_unicast);
557 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
558 veb->stat_offsets_loaded,
559 &oes->tx_multicast, &es->tx_multicast);
560 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
561 veb->stat_offsets_loaded,
562 &oes->tx_broadcast, &es->tx_broadcast);
563 veb->stat_offsets_loaded = true;
564}
565
566/**
567 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
568 * @pf: the corresponding PF
569 *
570 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
571 **/
572static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
573{
574 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
575 struct i40e_hw_port_stats *nsd = &pf->stats;
576 struct i40e_hw *hw = &pf->hw;
577 u64 xoff = 0;
578 u16 i, v;
579
580 if ((hw->fc.current_mode != I40E_FC_FULL) &&
581 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
582 return;
583
584 xoff = nsd->link_xoff_rx;
585 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
586 pf->stat_offsets_loaded,
587 &osd->link_xoff_rx, &nsd->link_xoff_rx);
588
589 /* No new LFC xoff rx */
590 if (!(nsd->link_xoff_rx - xoff))
591 return;
592
593 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
594 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
595 struct i40e_vsi *vsi = pf->vsi[v];
596
597 if (!vsi)
598 continue;
599
600 for (i = 0; i < vsi->num_queue_pairs; i++) {
601 struct i40e_ring *ring = &vsi->tx_rings[i];
602 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
603 }
604 }
605}
606
607/**
608 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
609 * @pf: the corresponding PF
610 *
611 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
612 **/
613static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
614{
615 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
616 struct i40e_hw_port_stats *nsd = &pf->stats;
617 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
618 struct i40e_dcbx_config *dcb_cfg;
619 struct i40e_hw *hw = &pf->hw;
620 u16 i, v;
621 u8 tc;
622
623 dcb_cfg = &hw->local_dcbx_config;
624
625 /* See if DCB enabled with PFC TC */
626 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
627 !(dcb_cfg->pfc.pfcenable)) {
628 i40e_update_link_xoff_rx(pf);
629 return;
630 }
631
632 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
633 u64 prio_xoff = nsd->priority_xoff_rx[i];
634 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
635 pf->stat_offsets_loaded,
636 &osd->priority_xoff_rx[i],
637 &nsd->priority_xoff_rx[i]);
638
639 /* No new PFC xoff rx */
640 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
641 continue;
642 /* Get the TC for given priority */
643 tc = dcb_cfg->etscfg.prioritytable[i];
644 xoff[tc] = true;
645 }
646
647 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
648 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
649 struct i40e_vsi *vsi = pf->vsi[v];
650
651 if (!vsi)
652 continue;
653
654 for (i = 0; i < vsi->num_queue_pairs; i++) {
655 struct i40e_ring *ring = &vsi->tx_rings[i];
656
657 tc = ring->dcb_tc;
658 if (xoff[tc])
659 clear_bit(__I40E_HANG_CHECK_ARMED,
660 &ring->state);
661 }
662 }
663}
664
665/**
666 * i40e_update_stats - Update the board statistics counters.
667 * @vsi: the VSI to be updated
668 *
669 * There are a few instances where we store the same stat in a
670 * couple of different structs. This is partly because we have
671 * the netdev stats that need to be filled out, which is slightly
672 * different from the "eth_stats" defined by the chip and used in
673 * VF communications. We sort it all out here in a central place.
674 **/
675void i40e_update_stats(struct i40e_vsi *vsi)
676{
677 struct i40e_pf *pf = vsi->back;
678 struct i40e_hw *hw = &pf->hw;
679 struct rtnl_link_stats64 *ons;
680 struct rtnl_link_stats64 *ns; /* netdev stats */
681 struct i40e_eth_stats *oes;
682 struct i40e_eth_stats *es; /* device's eth stats */
683 u32 tx_restart, tx_busy;
684 u32 rx_page, rx_buf;
685 u64 rx_p, rx_b;
686 u64 tx_p, tx_b;
687 int i;
688 u16 q;
689
690 if (test_bit(__I40E_DOWN, &vsi->state) ||
691 test_bit(__I40E_CONFIG_BUSY, &pf->state))
692 return;
693
694 ns = i40e_get_vsi_stats_struct(vsi);
695 ons = &vsi->net_stats_offsets;
696 es = &vsi->eth_stats;
697 oes = &vsi->eth_stats_offsets;
698
699 /* Gather up the netdev and vsi stats that the driver collects
700 * on the fly during packet processing
701 */
702 rx_b = rx_p = 0;
703 tx_b = tx_p = 0;
704 tx_restart = tx_busy = 0;
705 rx_page = 0;
706 rx_buf = 0;
707 for (q = 0; q < vsi->num_queue_pairs; q++) {
708 struct i40e_ring *p;
709
710 p = &vsi->rx_rings[q];
711 rx_b += p->rx_stats.bytes;
712 rx_p += p->rx_stats.packets;
713 rx_buf += p->rx_stats.alloc_rx_buff_failed;
714 rx_page += p->rx_stats.alloc_rx_page_failed;
715
716 p = &vsi->tx_rings[q];
717 tx_b += p->tx_stats.bytes;
718 tx_p += p->tx_stats.packets;
719 tx_restart += p->tx_stats.restart_queue;
720 tx_busy += p->tx_stats.tx_busy;
721 }
722 vsi->tx_restart = tx_restart;
723 vsi->tx_busy = tx_busy;
724 vsi->rx_page_failed = rx_page;
725 vsi->rx_buf_failed = rx_buf;
726
727 ns->rx_packets = rx_p;
728 ns->rx_bytes = rx_b;
729 ns->tx_packets = tx_p;
730 ns->tx_bytes = tx_b;
731
732 i40e_update_eth_stats(vsi);
733 /* update netdev stats from eth stats */
734 ons->rx_errors = oes->rx_errors;
735 ns->rx_errors = es->rx_errors;
736 ons->tx_errors = oes->tx_errors;
737 ns->tx_errors = es->tx_errors;
738 ons->multicast = oes->rx_multicast;
739 ns->multicast = es->rx_multicast;
740 ons->tx_dropped = oes->tx_discards;
741 ns->tx_dropped = es->tx_discards;
742
743 /* Get the port data only if this is the main PF VSI */
744 if (vsi == pf->vsi[pf->lan_vsi]) {
745 struct i40e_hw_port_stats *nsd = &pf->stats;
746 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
747
748 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
749 I40E_GLPRT_GORCL(hw->port),
750 pf->stat_offsets_loaded,
751 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
752 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
753 I40E_GLPRT_GOTCL(hw->port),
754 pf->stat_offsets_loaded,
755 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
756 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
757 pf->stat_offsets_loaded,
758 &osd->eth.rx_discards,
759 &nsd->eth.rx_discards);
760 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
761 pf->stat_offsets_loaded,
762 &osd->eth.tx_discards,
763 &nsd->eth.tx_discards);
764 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
765 I40E_GLPRT_MPRCL(hw->port),
766 pf->stat_offsets_loaded,
767 &osd->eth.rx_multicast,
768 &nsd->eth.rx_multicast);
769
770 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
771 pf->stat_offsets_loaded,
772 &osd->tx_dropped_link_down,
773 &nsd->tx_dropped_link_down);
774
775 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
776 pf->stat_offsets_loaded,
777 &osd->crc_errors, &nsd->crc_errors);
778 ns->rx_crc_errors = nsd->crc_errors;
779
780 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
781 pf->stat_offsets_loaded,
782 &osd->illegal_bytes, &nsd->illegal_bytes);
783 ns->rx_errors = nsd->crc_errors
784 + nsd->illegal_bytes;
785
786 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
787 pf->stat_offsets_loaded,
788 &osd->mac_local_faults,
789 &nsd->mac_local_faults);
790 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
791 pf->stat_offsets_loaded,
792 &osd->mac_remote_faults,
793 &nsd->mac_remote_faults);
794
795 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
796 pf->stat_offsets_loaded,
797 &osd->rx_length_errors,
798 &nsd->rx_length_errors);
799 ns->rx_length_errors = nsd->rx_length_errors;
800
801 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
802 pf->stat_offsets_loaded,
803 &osd->link_xon_rx, &nsd->link_xon_rx);
804 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
805 pf->stat_offsets_loaded,
806 &osd->link_xon_tx, &nsd->link_xon_tx);
807 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
808 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
809 pf->stat_offsets_loaded,
810 &osd->link_xoff_tx, &nsd->link_xoff_tx);
811
812 for (i = 0; i < 8; i++) {
813 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
814 pf->stat_offsets_loaded,
815 &osd->priority_xon_rx[i],
816 &nsd->priority_xon_rx[i]);
817 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
818 pf->stat_offsets_loaded,
819 &osd->priority_xon_tx[i],
820 &nsd->priority_xon_tx[i]);
821 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
822 pf->stat_offsets_loaded,
823 &osd->priority_xoff_tx[i],
824 &nsd->priority_xoff_tx[i]);
825 i40e_stat_update32(hw,
826 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
827 pf->stat_offsets_loaded,
828 &osd->priority_xon_2_xoff[i],
829 &nsd->priority_xon_2_xoff[i]);
830 }
831
832 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
833 I40E_GLPRT_PRC64L(hw->port),
834 pf->stat_offsets_loaded,
835 &osd->rx_size_64, &nsd->rx_size_64);
836 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
837 I40E_GLPRT_PRC127L(hw->port),
838 pf->stat_offsets_loaded,
839 &osd->rx_size_127, &nsd->rx_size_127);
840 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
841 I40E_GLPRT_PRC255L(hw->port),
842 pf->stat_offsets_loaded,
843 &osd->rx_size_255, &nsd->rx_size_255);
844 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
845 I40E_GLPRT_PRC511L(hw->port),
846 pf->stat_offsets_loaded,
847 &osd->rx_size_511, &nsd->rx_size_511);
848 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
849 I40E_GLPRT_PRC1023L(hw->port),
850 pf->stat_offsets_loaded,
851 &osd->rx_size_1023, &nsd->rx_size_1023);
852 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
853 I40E_GLPRT_PRC1522L(hw->port),
854 pf->stat_offsets_loaded,
855 &osd->rx_size_1522, &nsd->rx_size_1522);
856 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
857 I40E_GLPRT_PRC9522L(hw->port),
858 pf->stat_offsets_loaded,
859 &osd->rx_size_big, &nsd->rx_size_big);
860
861 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
862 I40E_GLPRT_PTC64L(hw->port),
863 pf->stat_offsets_loaded,
864 &osd->tx_size_64, &nsd->tx_size_64);
865 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
866 I40E_GLPRT_PTC127L(hw->port),
867 pf->stat_offsets_loaded,
868 &osd->tx_size_127, &nsd->tx_size_127);
869 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
870 I40E_GLPRT_PTC255L(hw->port),
871 pf->stat_offsets_loaded,
872 &osd->tx_size_255, &nsd->tx_size_255);
873 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
874 I40E_GLPRT_PTC511L(hw->port),
875 pf->stat_offsets_loaded,
876 &osd->tx_size_511, &nsd->tx_size_511);
877 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
878 I40E_GLPRT_PTC1023L(hw->port),
879 pf->stat_offsets_loaded,
880 &osd->tx_size_1023, &nsd->tx_size_1023);
881 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
882 I40E_GLPRT_PTC1522L(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->tx_size_1522, &nsd->tx_size_1522);
885 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
886 I40E_GLPRT_PTC9522L(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->tx_size_big, &nsd->tx_size_big);
889
890 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
891 pf->stat_offsets_loaded,
892 &osd->rx_undersize, &nsd->rx_undersize);
893 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
894 pf->stat_offsets_loaded,
895 &osd->rx_fragments, &nsd->rx_fragments);
896 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->rx_oversize, &nsd->rx_oversize);
899 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
900 pf->stat_offsets_loaded,
901 &osd->rx_jabber, &nsd->rx_jabber);
902 }
903
904 pf->stat_offsets_loaded = true;
905}
906
907/**
908 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
909 * @vsi: the VSI to be searched
910 * @macaddr: the MAC address
911 * @vlan: the vlan
912 * @is_vf: make sure its a vf filter, else doesn't matter
913 * @is_netdev: make sure its a netdev filter, else doesn't matter
914 *
915 * Returns ptr to the filter object or NULL
916 **/
917static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
918 u8 *macaddr, s16 vlan,
919 bool is_vf, bool is_netdev)
920{
921 struct i40e_mac_filter *f;
922
923 if (!vsi || !macaddr)
924 return NULL;
925
926 list_for_each_entry(f, &vsi->mac_filter_list, list) {
927 if ((ether_addr_equal(macaddr, f->macaddr)) &&
928 (vlan == f->vlan) &&
929 (!is_vf || f->is_vf) &&
930 (!is_netdev || f->is_netdev))
931 return f;
932 }
933 return NULL;
934}
935
936/**
937 * i40e_find_mac - Find a mac addr in the macvlan filters list
938 * @vsi: the VSI to be searched
939 * @macaddr: the MAC address we are searching for
940 * @is_vf: make sure its a vf filter, else doesn't matter
941 * @is_netdev: make sure its a netdev filter, else doesn't matter
942 *
943 * Returns the first filter with the provided MAC address or NULL if
944 * MAC address was not found
945 **/
946struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
947 bool is_vf, bool is_netdev)
948{
949 struct i40e_mac_filter *f;
950
951 if (!vsi || !macaddr)
952 return NULL;
953
954 list_for_each_entry(f, &vsi->mac_filter_list, list) {
955 if ((ether_addr_equal(macaddr, f->macaddr)) &&
956 (!is_vf || f->is_vf) &&
957 (!is_netdev || f->is_netdev))
958 return f;
959 }
960 return NULL;
961}
962
963/**
964 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
965 * @vsi: the VSI to be searched
966 *
967 * Returns true if VSI is in vlan mode or false otherwise
968 **/
969bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
970{
971 struct i40e_mac_filter *f;
972
973 /* Only -1 for all the filters denotes not in vlan mode
974 * so we have to go through all the list in order to make sure
975 */
976 list_for_each_entry(f, &vsi->mac_filter_list, list) {
977 if (f->vlan >= 0)
978 return true;
979 }
980
981 return false;
982}
983
984/**
985 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
986 * @vsi: the VSI to be searched
987 * @macaddr: the mac address to be filtered
988 * @is_vf: true if it is a vf
989 * @is_netdev: true if it is a netdev
990 *
991 * Goes through all the macvlan filters and adds a
992 * macvlan filter for each unique vlan that already exists
993 *
994 * Returns first filter found on success, else NULL
995 **/
996struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
997 bool is_vf, bool is_netdev)
998{
999 struct i40e_mac_filter *f;
1000
1001 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1002 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1003 is_vf, is_netdev)) {
1004 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1005 is_vf, is_netdev))
1006 return NULL;
1007 }
1008 }
1009
1010 return list_first_entry_or_null(&vsi->mac_filter_list,
1011 struct i40e_mac_filter, list);
1012}
1013
1014/**
1015 * i40e_add_filter - Add a mac/vlan filter to the VSI
1016 * @vsi: the VSI to be searched
1017 * @macaddr: the MAC address
1018 * @vlan: the vlan
1019 * @is_vf: make sure its a vf filter, else doesn't matter
1020 * @is_netdev: make sure its a netdev filter, else doesn't matter
1021 *
1022 * Returns ptr to the filter object or NULL when no memory available.
1023 **/
1024struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1025 u8 *macaddr, s16 vlan,
1026 bool is_vf, bool is_netdev)
1027{
1028 struct i40e_mac_filter *f;
1029
1030 if (!vsi || !macaddr)
1031 return NULL;
1032
1033 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1034 if (!f) {
1035 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1036 if (!f)
1037 goto add_filter_out;
1038
1039 memcpy(f->macaddr, macaddr, ETH_ALEN);
1040 f->vlan = vlan;
1041 f->changed = true;
1042
1043 INIT_LIST_HEAD(&f->list);
1044 list_add(&f->list, &vsi->mac_filter_list);
1045 }
1046
1047 /* increment counter and add a new flag if needed */
1048 if (is_vf) {
1049 if (!f->is_vf) {
1050 f->is_vf = true;
1051 f->counter++;
1052 }
1053 } else if (is_netdev) {
1054 if (!f->is_netdev) {
1055 f->is_netdev = true;
1056 f->counter++;
1057 }
1058 } else {
1059 f->counter++;
1060 }
1061
1062 /* changed tells sync_filters_subtask to
1063 * push the filter down to the firmware
1064 */
1065 if (f->changed) {
1066 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1067 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1068 }
1069
1070add_filter_out:
1071 return f;
1072}
1073
1074/**
1075 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1076 * @vsi: the VSI to be searched
1077 * @macaddr: the MAC address
1078 * @vlan: the vlan
1079 * @is_vf: make sure it's a vf filter, else doesn't matter
1080 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1081 **/
1082void i40e_del_filter(struct i40e_vsi *vsi,
1083 u8 *macaddr, s16 vlan,
1084 bool is_vf, bool is_netdev)
1085{
1086 struct i40e_mac_filter *f;
1087
1088 if (!vsi || !macaddr)
1089 return;
1090
1091 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1092 if (!f || f->counter == 0)
1093 return;
1094
1095 if (is_vf) {
1096 if (f->is_vf) {
1097 f->is_vf = false;
1098 f->counter--;
1099 }
1100 } else if (is_netdev) {
1101 if (f->is_netdev) {
1102 f->is_netdev = false;
1103 f->counter--;
1104 }
1105 } else {
1106 /* make sure we don't remove a filter in use by vf or netdev */
1107 int min_f = 0;
1108 min_f += (f->is_vf ? 1 : 0);
1109 min_f += (f->is_netdev ? 1 : 0);
1110
1111 if (f->counter > min_f)
1112 f->counter--;
1113 }
1114
1115 /* counter == 0 tells sync_filters_subtask to
1116 * remove the filter from the firmware's list
1117 */
1118 if (f->counter == 0) {
1119 f->changed = true;
1120 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1121 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1122 }
1123}
1124
1125/**
1126 * i40e_set_mac - NDO callback to set mac address
1127 * @netdev: network interface device structure
1128 * @p: pointer to an address structure
1129 *
1130 * Returns 0 on success, negative on failure
1131 **/
1132static int i40e_set_mac(struct net_device *netdev, void *p)
1133{
1134 struct i40e_netdev_priv *np = netdev_priv(netdev);
1135 struct i40e_vsi *vsi = np->vsi;
1136 struct sockaddr *addr = p;
1137 struct i40e_mac_filter *f;
1138
1139 if (!is_valid_ether_addr(addr->sa_data))
1140 return -EADDRNOTAVAIL;
1141
1142 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
1143
1144 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1145 return 0;
1146
1147 if (vsi->type == I40E_VSI_MAIN) {
1148 i40e_status ret;
1149 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1150 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1151 addr->sa_data, NULL);
1152 if (ret) {
1153 netdev_info(netdev,
1154 "Addr change for Main VSI failed: %d\n",
1155 ret);
1156 return -EADDRNOTAVAIL;
1157 }
1158
1159 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
1160 }
1161
1162 /* In order to be sure to not drop any packets, add the new address
1163 * then delete the old one.
1164 */
1165 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
1166 if (!f)
1167 return -ENOMEM;
1168
1169 i40e_sync_vsi_filters(vsi);
1170 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1171 i40e_sync_vsi_filters(vsi);
1172
1173 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1174
1175 return 0;
1176}
1177
1178/**
1179 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1180 * @vsi: the VSI being setup
1181 * @ctxt: VSI context structure
1182 * @enabled_tc: Enabled TCs bitmap
1183 * @is_add: True if called before Add VSI
1184 *
1185 * Setup VSI queue mapping for enabled traffic classes.
1186 **/
1187static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1188 struct i40e_vsi_context *ctxt,
1189 u8 enabled_tc,
1190 bool is_add)
1191{
1192 struct i40e_pf *pf = vsi->back;
1193 u16 sections = 0;
1194 u8 netdev_tc = 0;
1195 u16 numtc = 0;
1196 u16 qcount;
1197 u8 offset;
1198 u16 qmap;
1199 int i;
1200
1201 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1202 offset = 0;
1203
1204 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1205 /* Find numtc from enabled TC bitmap */
1206 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1207 if (enabled_tc & (1 << i)) /* TC is enabled */
1208 numtc++;
1209 }
1210 if (!numtc) {
1211 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1212 numtc = 1;
1213 }
1214 } else {
1215 /* At least TC0 is enabled in case of non-DCB case */
1216 numtc = 1;
1217 }
1218
1219 vsi->tc_config.numtc = numtc;
1220 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1221
1222 /* Setup queue offset/count for all TCs for given VSI */
1223 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1224 /* See if the given TC is enabled for the given VSI */
1225 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1226 int pow, num_qps;
1227
1228 vsi->tc_config.tc_info[i].qoffset = offset;
1229 switch (vsi->type) {
1230 case I40E_VSI_MAIN:
1231 if (i == 0)
1232 qcount = pf->rss_size;
1233 else
1234 qcount = pf->num_tc_qps;
1235 vsi->tc_config.tc_info[i].qcount = qcount;
1236 break;
1237 case I40E_VSI_FDIR:
1238 case I40E_VSI_SRIOV:
1239 case I40E_VSI_VMDQ2:
1240 default:
1241 qcount = vsi->alloc_queue_pairs;
1242 vsi->tc_config.tc_info[i].qcount = qcount;
1243 WARN_ON(i != 0);
1244 break;
1245 }
1246
1247 /* find the power-of-2 of the number of queue pairs */
1248 num_qps = vsi->tc_config.tc_info[i].qcount;
1249 pow = 0;
1250 while (num_qps &&
1251 ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
1252 pow++;
1253 num_qps >>= 1;
1254 }
1255
1256 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1257 qmap =
1258 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1259 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1260
1261 offset += vsi->tc_config.tc_info[i].qcount;
1262 } else {
1263 /* TC is not enabled so set the offset to
1264 * default queue and allocate one queue
1265 * for the given TC.
1266 */
1267 vsi->tc_config.tc_info[i].qoffset = 0;
1268 vsi->tc_config.tc_info[i].qcount = 1;
1269 vsi->tc_config.tc_info[i].netdev_tc = 0;
1270
1271 qmap = 0;
1272 }
1273 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1274 }
1275
1276 /* Set actual Tx/Rx queue pairs */
1277 vsi->num_queue_pairs = offset;
1278
1279 /* Scheduler section valid can only be set for ADD VSI */
1280 if (is_add) {
1281 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1282
1283 ctxt->info.up_enable_bits = enabled_tc;
1284 }
1285 if (vsi->type == I40E_VSI_SRIOV) {
1286 ctxt->info.mapping_flags |=
1287 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1288 for (i = 0; i < vsi->num_queue_pairs; i++)
1289 ctxt->info.queue_mapping[i] =
1290 cpu_to_le16(vsi->base_queue + i);
1291 } else {
1292 ctxt->info.mapping_flags |=
1293 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1294 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1295 }
1296 ctxt->info.valid_sections |= cpu_to_le16(sections);
1297}
1298
1299/**
1300 * i40e_set_rx_mode - NDO callback to set the netdev filters
1301 * @netdev: network interface device structure
1302 **/
1303static void i40e_set_rx_mode(struct net_device *netdev)
1304{
1305 struct i40e_netdev_priv *np = netdev_priv(netdev);
1306 struct i40e_mac_filter *f, *ftmp;
1307 struct i40e_vsi *vsi = np->vsi;
1308 struct netdev_hw_addr *uca;
1309 struct netdev_hw_addr *mca;
1310 struct netdev_hw_addr *ha;
1311
1312 /* add addr if not already in the filter list */
1313 netdev_for_each_uc_addr(uca, netdev) {
1314 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1315 if (i40e_is_vsi_in_vlan(vsi))
1316 i40e_put_mac_in_vlan(vsi, uca->addr,
1317 false, true);
1318 else
1319 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1320 false, true);
1321 }
1322 }
1323
1324 netdev_for_each_mc_addr(mca, netdev) {
1325 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1326 if (i40e_is_vsi_in_vlan(vsi))
1327 i40e_put_mac_in_vlan(vsi, mca->addr,
1328 false, true);
1329 else
1330 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1331 false, true);
1332 }
1333 }
1334
1335 /* remove filter if not in netdev list */
1336 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1337 bool found = false;
1338
1339 if (!f->is_netdev)
1340 continue;
1341
1342 if (is_multicast_ether_addr(f->macaddr)) {
1343 netdev_for_each_mc_addr(mca, netdev) {
1344 if (ether_addr_equal(mca->addr, f->macaddr)) {
1345 found = true;
1346 break;
1347 }
1348 }
1349 } else {
1350 netdev_for_each_uc_addr(uca, netdev) {
1351 if (ether_addr_equal(uca->addr, f->macaddr)) {
1352 found = true;
1353 break;
1354 }
1355 }
1356
1357 for_each_dev_addr(netdev, ha) {
1358 if (ether_addr_equal(ha->addr, f->macaddr)) {
1359 found = true;
1360 break;
1361 }
1362 }
1363 }
1364 if (!found)
1365 i40e_del_filter(
1366 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1367 }
1368
1369 /* check for other flag changes */
1370 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1371 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1372 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1373 }
1374}
1375
1376/**
1377 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1378 * @vsi: ptr to the VSI
1379 *
1380 * Push any outstanding VSI filter changes through the AdminQ.
1381 *
1382 * Returns 0 or error value
1383 **/
1384int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1385{
1386 struct i40e_mac_filter *f, *ftmp;
1387 bool promisc_forced_on = false;
1388 bool add_happened = false;
1389 int filter_list_len = 0;
1390 u32 changed_flags = 0;
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001391 i40e_status aq_ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001392 struct i40e_pf *pf;
1393 int num_add = 0;
1394 int num_del = 0;
1395 u16 cmd_flags;
1396
1397 /* empty array typed pointers, kcalloc later */
1398 struct i40e_aqc_add_macvlan_element_data *add_list;
1399 struct i40e_aqc_remove_macvlan_element_data *del_list;
1400
1401 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1402 usleep_range(1000, 2000);
1403 pf = vsi->back;
1404
1405 if (vsi->netdev) {
1406 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1407 vsi->current_netdev_flags = vsi->netdev->flags;
1408 }
1409
1410 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1411 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1412
1413 filter_list_len = pf->hw.aq.asq_buf_size /
1414 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1415 del_list = kcalloc(filter_list_len,
1416 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1417 GFP_KERNEL);
1418 if (!del_list)
1419 return -ENOMEM;
1420
1421 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1422 if (!f->changed)
1423 continue;
1424
1425 if (f->counter != 0)
1426 continue;
1427 f->changed = false;
1428 cmd_flags = 0;
1429
1430 /* add to delete list */
1431 memcpy(del_list[num_del].mac_addr,
1432 f->macaddr, ETH_ALEN);
1433 del_list[num_del].vlan_tag =
1434 cpu_to_le16((u16)(f->vlan ==
1435 I40E_VLAN_ANY ? 0 : f->vlan));
1436
1437 /* vlan0 as wild card to allow packets from all vlans */
1438 if (f->vlan == I40E_VLAN_ANY ||
1439 (vsi->netdev && !(vsi->netdev->features &
1440 NETIF_F_HW_VLAN_CTAG_FILTER)))
1441 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1442 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1443 del_list[num_del].flags = cmd_flags;
1444 num_del++;
1445
1446 /* unlink from filter list */
1447 list_del(&f->list);
1448 kfree(f);
1449
1450 /* flush a full buffer */
1451 if (num_del == filter_list_len) {
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001452 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001453 vsi->seid, del_list, num_del,
1454 NULL);
1455 num_del = 0;
1456 memset(del_list, 0, sizeof(*del_list));
1457
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001458 if (aq_ret)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001459 dev_info(&pf->pdev->dev,
1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001461 aq_ret,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001462 pf->hw.aq.asq_last_status);
1463 }
1464 }
1465 if (num_del) {
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001466 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001467 del_list, num_del, NULL);
1468 num_del = 0;
1469
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001470 if (aq_ret)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001471 dev_info(&pf->pdev->dev,
1472 "ignoring delete macvlan error, err %d, aq_err %d\n",
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001473 aq_ret, pf->hw.aq.asq_last_status);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001474 }
1475
1476 kfree(del_list);
1477 del_list = NULL;
1478
1479 /* do all the adds now */
1480 filter_list_len = pf->hw.aq.asq_buf_size /
1481 sizeof(struct i40e_aqc_add_macvlan_element_data),
1482 add_list = kcalloc(filter_list_len,
1483 sizeof(struct i40e_aqc_add_macvlan_element_data),
1484 GFP_KERNEL);
1485 if (!add_list)
1486 return -ENOMEM;
1487
1488 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1489 if (!f->changed)
1490 continue;
1491
1492 if (f->counter == 0)
1493 continue;
1494 f->changed = false;
1495 add_happened = true;
1496 cmd_flags = 0;
1497
1498 /* add to add array */
1499 memcpy(add_list[num_add].mac_addr,
1500 f->macaddr, ETH_ALEN);
1501 add_list[num_add].vlan_tag =
1502 cpu_to_le16(
1503 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1504 add_list[num_add].queue_number = 0;
1505
1506 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1507
1508 /* vlan0 as wild card to allow packets from all vlans */
1509 if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
1510 !(vsi->netdev->features &
1511 NETIF_F_HW_VLAN_CTAG_FILTER)))
1512 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1513 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1514 num_add++;
1515
1516 /* flush a full buffer */
1517 if (num_add == filter_list_len) {
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001518 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1519 add_list, num_add,
1520 NULL);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001521 num_add = 0;
1522
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001523 if (aq_ret)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001524 break;
1525 memset(add_list, 0, sizeof(*add_list));
1526 }
1527 }
1528 if (num_add) {
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001529 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1530 add_list, num_add, NULL);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001531 num_add = 0;
1532 }
1533 kfree(add_list);
1534 add_list = NULL;
1535
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001536 if (add_happened && (!aq_ret)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001537 /* do nothing */;
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001538 } else if (add_happened && (aq_ret)) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001539 dev_info(&pf->pdev->dev,
1540 "add filter failed, err %d, aq_err %d\n",
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001541 aq_ret, pf->hw.aq.asq_last_status);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001542 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1543 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1544 &vsi->state)) {
1545 promisc_forced_on = true;
1546 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1547 &vsi->state);
1548 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1549 }
1550 }
1551 }
1552
1553 /* check for changes in promiscuous modes */
1554 if (changed_flags & IFF_ALLMULTI) {
1555 bool cur_multipromisc;
1556 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001557 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1558 vsi->seid,
1559 cur_multipromisc,
1560 NULL);
1561 if (aq_ret)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001562 dev_info(&pf->pdev->dev,
1563 "set multi promisc failed, err %d, aq_err %d\n",
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001564 aq_ret, pf->hw.aq.asq_last_status);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001565 }
1566 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1567 bool cur_promisc;
1568 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1569 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1570 &vsi->state));
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001571 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1572 vsi->seid,
1573 cur_promisc, NULL);
1574 if (aq_ret)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001575 dev_info(&pf->pdev->dev,
1576 "set uni promisc failed, err %d, aq_err %d\n",
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001577 aq_ret, pf->hw.aq.asq_last_status);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001578 }
1579
1580 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1581 return 0;
1582}
1583
1584/**
1585 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1586 * @pf: board private structure
1587 **/
1588static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1589{
1590 int v;
1591
1592 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1593 return;
1594 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1595
1596 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
1597 if (pf->vsi[v] &&
1598 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1599 i40e_sync_vsi_filters(pf->vsi[v]);
1600 }
1601}
1602
1603/**
1604 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1605 * @netdev: network interface device structure
1606 * @new_mtu: new value for maximum frame size
1607 *
1608 * Returns 0 on success, negative on failure
1609 **/
1610static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1611{
1612 struct i40e_netdev_priv *np = netdev_priv(netdev);
1613 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1614 struct i40e_vsi *vsi = np->vsi;
1615
1616 /* MTU < 68 is an error and causes problems on some kernels */
1617 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1618 return -EINVAL;
1619
1620 netdev_info(netdev, "changing MTU from %d to %d\n",
1621 netdev->mtu, new_mtu);
1622 netdev->mtu = new_mtu;
1623 if (netif_running(netdev))
1624 i40e_vsi_reinit_locked(vsi);
1625
1626 return 0;
1627}
1628
1629/**
1630 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1631 * @vsi: the vsi being adjusted
1632 **/
1633void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1634{
1635 struct i40e_vsi_context ctxt;
1636 i40e_status ret;
1637
1638 if ((vsi->info.valid_sections &
1639 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1640 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1641 return; /* already enabled */
1642
1643 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1644 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1645 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1646
1647 ctxt.seid = vsi->seid;
1648 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1649 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1650 if (ret) {
1651 dev_info(&vsi->back->pdev->dev,
1652 "%s: update vsi failed, aq_err=%d\n",
1653 __func__, vsi->back->hw.aq.asq_last_status);
1654 }
1655}
1656
1657/**
1658 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1659 * @vsi: the vsi being adjusted
1660 **/
1661void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1662{
1663 struct i40e_vsi_context ctxt;
1664 i40e_status ret;
1665
1666 if ((vsi->info.valid_sections &
1667 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1668 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1669 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1670 return; /* already disabled */
1671
1672 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1673 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1674 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1675
1676 ctxt.seid = vsi->seid;
1677 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1678 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1679 if (ret) {
1680 dev_info(&vsi->back->pdev->dev,
1681 "%s: update vsi failed, aq_err=%d\n",
1682 __func__, vsi->back->hw.aq.asq_last_status);
1683 }
1684}
1685
1686/**
1687 * i40e_vlan_rx_register - Setup or shutdown vlan offload
1688 * @netdev: network interface to be adjusted
1689 * @features: netdev features to test if VLAN offload is enabled or not
1690 **/
1691static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
1692{
1693 struct i40e_netdev_priv *np = netdev_priv(netdev);
1694 struct i40e_vsi *vsi = np->vsi;
1695
1696 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1697 i40e_vlan_stripping_enable(vsi);
1698 else
1699 i40e_vlan_stripping_disable(vsi);
1700}
1701
1702/**
1703 * i40e_vsi_add_vlan - Add vsi membership for given vlan
1704 * @vsi: the vsi being configured
1705 * @vid: vlan id to be added (0 = untagged only , -1 = any)
1706 **/
1707int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1708{
1709 struct i40e_mac_filter *f, *add_f;
1710 bool is_netdev, is_vf;
1711 int ret;
1712
1713 is_vf = (vsi->type == I40E_VSI_SRIOV);
1714 is_netdev = !!(vsi->netdev);
1715
1716 if (is_netdev) {
1717 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
1718 is_vf, is_netdev);
1719 if (!add_f) {
1720 dev_info(&vsi->back->pdev->dev,
1721 "Could not add vlan filter %d for %pM\n",
1722 vid, vsi->netdev->dev_addr);
1723 return -ENOMEM;
1724 }
1725 }
1726
1727 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1728 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1729 if (!add_f) {
1730 dev_info(&vsi->back->pdev->dev,
1731 "Could not add vlan filter %d for %pM\n",
1732 vid, f->macaddr);
1733 return -ENOMEM;
1734 }
1735 }
1736
1737 ret = i40e_sync_vsi_filters(vsi);
1738 if (ret) {
1739 dev_info(&vsi->back->pdev->dev,
1740 "Could not sync filters for vid %d\n", vid);
1741 return ret;
1742 }
1743
1744 /* Now if we add a vlan tag, make sure to check if it is the first
1745 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
1746 * with 0, so we now accept untagged and specified tagged traffic
1747 * (and not any taged and untagged)
1748 */
1749 if (vid > 0) {
1750 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
1751 I40E_VLAN_ANY,
1752 is_vf, is_netdev)) {
1753 i40e_del_filter(vsi, vsi->netdev->dev_addr,
1754 I40E_VLAN_ANY, is_vf, is_netdev);
1755 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
1756 is_vf, is_netdev);
1757 if (!add_f) {
1758 dev_info(&vsi->back->pdev->dev,
1759 "Could not add filter 0 for %pM\n",
1760 vsi->netdev->dev_addr);
1761 return -ENOMEM;
1762 }
1763 }
1764
1765 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1766 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1767 is_vf, is_netdev)) {
1768 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1769 is_vf, is_netdev);
1770 add_f = i40e_add_filter(vsi, f->macaddr,
1771 0, is_vf, is_netdev);
1772 if (!add_f) {
1773 dev_info(&vsi->back->pdev->dev,
1774 "Could not add filter 0 for %pM\n",
1775 f->macaddr);
1776 return -ENOMEM;
1777 }
1778 }
1779 }
1780 ret = i40e_sync_vsi_filters(vsi);
1781 }
1782
1783 return ret;
1784}
1785
1786/**
1787 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1788 * @vsi: the vsi being configured
1789 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
Jesse Brandeburg078b5872013-09-25 23:41:14 +00001790 *
1791 * Return: 0 on success or negative otherwise
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001792 **/
1793int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1794{
1795 struct net_device *netdev = vsi->netdev;
1796 struct i40e_mac_filter *f, *add_f;
1797 bool is_vf, is_netdev;
1798 int filter_count = 0;
1799 int ret;
1800
1801 is_vf = (vsi->type == I40E_VSI_SRIOV);
1802 is_netdev = !!(netdev);
1803
1804 if (is_netdev)
1805 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
1806
1807 list_for_each_entry(f, &vsi->mac_filter_list, list)
1808 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1809
1810 ret = i40e_sync_vsi_filters(vsi);
1811 if (ret) {
1812 dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
1813 return ret;
1814 }
1815
1816 /* go through all the filters for this VSI and if there is only
1817 * vid == 0 it means there are no other filters, so vid 0 must
1818 * be replaced with -1. This signifies that we should from now
1819 * on accept any traffic (with any tag present, or untagged)
1820 */
1821 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1822 if (is_netdev) {
1823 if (f->vlan &&
1824 ether_addr_equal(netdev->dev_addr, f->macaddr))
1825 filter_count++;
1826 }
1827
1828 if (f->vlan)
1829 filter_count++;
1830 }
1831
1832 if (!filter_count && is_netdev) {
1833 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
1834 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1835 is_vf, is_netdev);
1836 if (!f) {
1837 dev_info(&vsi->back->pdev->dev,
1838 "Could not add filter %d for %pM\n",
1839 I40E_VLAN_ANY, netdev->dev_addr);
1840 return -ENOMEM;
1841 }
1842 }
1843
1844 if (!filter_count) {
1845 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1846 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
1847 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1848 is_vf, is_netdev);
1849 if (!add_f) {
1850 dev_info(&vsi->back->pdev->dev,
1851 "Could not add filter %d for %pM\n",
1852 I40E_VLAN_ANY, f->macaddr);
1853 return -ENOMEM;
1854 }
1855 }
1856 }
1857
1858 return i40e_sync_vsi_filters(vsi);
1859}
1860
1861/**
1862 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1863 * @netdev: network interface to be adjusted
1864 * @vid: vlan id to be added
Jesse Brandeburg078b5872013-09-25 23:41:14 +00001865 *
1866 * net_device_ops implementation for adding vlan ids
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001867 **/
1868static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1869 __always_unused __be16 proto, u16 vid)
1870{
1871 struct i40e_netdev_priv *np = netdev_priv(netdev);
1872 struct i40e_vsi *vsi = np->vsi;
Jesse Brandeburg078b5872013-09-25 23:41:14 +00001873 int ret = 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001874
1875 if (vid > 4095)
Jesse Brandeburg078b5872013-09-25 23:41:14 +00001876 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001877
Jesse Brandeburg078b5872013-09-25 23:41:14 +00001878 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
1879
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001880 /* If the network stack called us with vid = 0, we should
1881 * indicate to i40e_vsi_add_vlan() that we want to receive
1882 * any traffic (i.e. with any vlan tag, or untagged)
1883 */
1884 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1885
Jesse Brandeburg078b5872013-09-25 23:41:14 +00001886 if (!ret && (vid < VLAN_N_VID))
1887 set_bit(vid, vsi->active_vlans);
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001888
Jesse Brandeburg078b5872013-09-25 23:41:14 +00001889 return ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001890}
1891
1892/**
1893 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1894 * @netdev: network interface to be adjusted
1895 * @vid: vlan id to be removed
Jesse Brandeburg078b5872013-09-25 23:41:14 +00001896 *
1897 * net_device_ops implementation for adding vlan ids
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001898 **/
1899static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1900 __always_unused __be16 proto, u16 vid)
1901{
1902 struct i40e_netdev_priv *np = netdev_priv(netdev);
1903 struct i40e_vsi *vsi = np->vsi;
1904
Jesse Brandeburg078b5872013-09-25 23:41:14 +00001905 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
1906
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001907 /* return code is ignored as there is nothing a user
1908 * can do about failure to remove and a log message was
Jesse Brandeburg078b5872013-09-25 23:41:14 +00001909 * already printed from the other function
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001910 */
1911 i40e_vsi_kill_vlan(vsi, vid);
1912
1913 clear_bit(vid, vsi->active_vlans);
Jesse Brandeburg078b5872013-09-25 23:41:14 +00001914
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001915 return 0;
1916}
1917
1918/**
1919 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
1920 * @vsi: the vsi being brought back up
1921 **/
1922static void i40e_restore_vlan(struct i40e_vsi *vsi)
1923{
1924 u16 vid;
1925
1926 if (!vsi->netdev)
1927 return;
1928
1929 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
1930
1931 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
1932 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
1933 vid);
1934}
1935
1936/**
1937 * i40e_vsi_add_pvid - Add pvid for the VSI
1938 * @vsi: the vsi being adjusted
1939 * @vid: the vlan id to set as a PVID
1940 **/
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001941int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001942{
1943 struct i40e_vsi_context ctxt;
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001944 i40e_status aq_ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001945
1946 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1947 vsi->info.pvid = cpu_to_le16(vid);
1948 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
1949 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
1950
1951 ctxt.seid = vsi->seid;
1952 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001953 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1954 if (aq_ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001955 dev_info(&vsi->back->pdev->dev,
1956 "%s: update vsi failed, aq_err=%d\n",
1957 __func__, vsi->back->hw.aq.asq_last_status);
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001958 return -ENOENT;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001959 }
1960
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00001961 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00001962}
1963
1964/**
1965 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
1966 * @vsi: the vsi being adjusted
1967 *
1968 * Just use the vlan_rx_register() service to put it back to normal
1969 **/
1970void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
1971{
1972 vsi->info.pvid = 0;
1973 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
1974}
1975
1976/**
1977 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
1978 * @vsi: ptr to the VSI
1979 *
1980 * If this function returns with an error, then it's possible one or
1981 * more of the rings is populated (while the rest are not). It is the
1982 * callers duty to clean those orphaned rings.
1983 *
1984 * Return 0 on success, negative on failure
1985 **/
1986static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
1987{
1988 int i, err = 0;
1989
1990 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
1991 err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
1992
1993 return err;
1994}
1995
1996/**
1997 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
1998 * @vsi: ptr to the VSI
1999 *
2000 * Free VSI's transmit software resources
2001 **/
2002static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2003{
2004 int i;
2005
2006 for (i = 0; i < vsi->num_queue_pairs; i++)
2007 if (vsi->tx_rings[i].desc)
2008 i40e_free_tx_resources(&vsi->tx_rings[i]);
2009}
2010
2011/**
2012 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2013 * @vsi: ptr to the VSI
2014 *
2015 * If this function returns with an error, then it's possible one or
2016 * more of the rings is populated (while the rest are not). It is the
2017 * callers duty to clean those orphaned rings.
2018 *
2019 * Return 0 on success, negative on failure
2020 **/
2021static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2022{
2023 int i, err = 0;
2024
2025 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2026 err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
2027 return err;
2028}
2029
2030/**
2031 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2032 * @vsi: ptr to the VSI
2033 *
2034 * Free all receive software resources
2035 **/
2036static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2037{
2038 int i;
2039
2040 for (i = 0; i < vsi->num_queue_pairs; i++)
2041 if (vsi->rx_rings[i].desc)
2042 i40e_free_rx_resources(&vsi->rx_rings[i]);
2043}
2044
2045/**
2046 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2047 * @ring: The Tx ring to configure
2048 *
2049 * Configure the Tx descriptor ring in the HMC context.
2050 **/
2051static int i40e_configure_tx_ring(struct i40e_ring *ring)
2052{
2053 struct i40e_vsi *vsi = ring->vsi;
2054 u16 pf_q = vsi->base_queue + ring->queue_index;
2055 struct i40e_hw *hw = &vsi->back->hw;
2056 struct i40e_hmc_obj_txq tx_ctx;
2057 i40e_status err = 0;
2058 u32 qtx_ctl = 0;
2059
2060 /* some ATR related tx ring init */
2061 if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
2062 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2063 ring->atr_count = 0;
2064 } else {
2065 ring->atr_sample_rate = 0;
2066 }
2067
2068 /* initialize XPS */
2069 if (ring->q_vector && ring->netdev &&
2070 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2071 netif_set_xps_queue(ring->netdev,
2072 &ring->q_vector->affinity_mask,
2073 ring->queue_index);
2074
2075 /* clear the context structure first */
2076 memset(&tx_ctx, 0, sizeof(tx_ctx));
2077
2078 tx_ctx.new_context = 1;
2079 tx_ctx.base = (ring->dma / 128);
2080 tx_ctx.qlen = ring->count;
2081 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
2082 I40E_FLAG_FDIR_ATR_ENABLED));
2083
2084 /* As part of VSI creation/update, FW allocates certain
2085 * Tx arbitration queue sets for each TC enabled for
2086 * the VSI. The FW returns the handles to these queue
2087 * sets as part of the response buffer to Add VSI,
2088 * Update VSI, etc. AQ commands. It is expected that
2089 * these queue set handles be associated with the Tx
2090 * queues by the driver as part of the TX queue context
2091 * initialization. This has to be done regardless of
2092 * DCB as by default everything is mapped to TC0.
2093 */
2094 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2095 tx_ctx.rdylist_act = 0;
2096
2097 /* clear the context in the HMC */
2098 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2099 if (err) {
2100 dev_info(&vsi->back->pdev->dev,
2101 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2102 ring->queue_index, pf_q, err);
2103 return -ENOMEM;
2104 }
2105
2106 /* set the context in the HMC */
2107 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2108 if (err) {
2109 dev_info(&vsi->back->pdev->dev,
2110 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2111 ring->queue_index, pf_q, err);
2112 return -ENOMEM;
2113 }
2114
2115 /* Now associate this queue with this PCI function */
2116 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2117 qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
2118 & I40E_QTX_CTL_PF_INDX_MASK);
2119 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2120 i40e_flush(hw);
2121
2122 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2123
2124 /* cache tail off for easier writes later */
2125 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2126
2127 return 0;
2128}
2129
2130/**
2131 * i40e_configure_rx_ring - Configure a receive ring context
2132 * @ring: The Rx ring to configure
2133 *
2134 * Configure the Rx descriptor ring in the HMC context.
2135 **/
2136static int i40e_configure_rx_ring(struct i40e_ring *ring)
2137{
2138 struct i40e_vsi *vsi = ring->vsi;
2139 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2140 u16 pf_q = vsi->base_queue + ring->queue_index;
2141 struct i40e_hw *hw = &vsi->back->hw;
2142 struct i40e_hmc_obj_rxq rx_ctx;
2143 i40e_status err = 0;
2144
2145 ring->state = 0;
2146
2147 /* clear the context structure first */
2148 memset(&rx_ctx, 0, sizeof(rx_ctx));
2149
2150 ring->rx_buf_len = vsi->rx_buf_len;
2151 ring->rx_hdr_len = vsi->rx_hdr_len;
2152
2153 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2154 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2155
2156 rx_ctx.base = (ring->dma / 128);
2157 rx_ctx.qlen = ring->count;
2158
2159 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2160 set_ring_16byte_desc_enabled(ring);
2161 rx_ctx.dsize = 0;
2162 } else {
2163 rx_ctx.dsize = 1;
2164 }
2165
2166 rx_ctx.dtype = vsi->dtype;
2167 if (vsi->dtype) {
2168 set_ring_ps_enabled(ring);
2169 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2170 I40E_RX_SPLIT_IP |
2171 I40E_RX_SPLIT_TCP_UDP |
2172 I40E_RX_SPLIT_SCTP;
2173 } else {
2174 rx_ctx.hsplit_0 = 0;
2175 }
2176
2177 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2178 (chain_len * ring->rx_buf_len));
2179 rx_ctx.tphrdesc_ena = 1;
2180 rx_ctx.tphwdesc_ena = 1;
2181 rx_ctx.tphdata_ena = 1;
2182 rx_ctx.tphhead_ena = 1;
2183 rx_ctx.lrxqthresh = 2;
2184 rx_ctx.crcstrip = 1;
2185 rx_ctx.l2tsel = 1;
2186 rx_ctx.showiv = 1;
2187
2188 /* clear the context in the HMC */
2189 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2190 if (err) {
2191 dev_info(&vsi->back->pdev->dev,
2192 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2193 ring->queue_index, pf_q, err);
2194 return -ENOMEM;
2195 }
2196
2197 /* set the context in the HMC */
2198 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2199 if (err) {
2200 dev_info(&vsi->back->pdev->dev,
2201 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2202 ring->queue_index, pf_q, err);
2203 return -ENOMEM;
2204 }
2205
2206 /* cache tail for quicker writes, and clear the reg before use */
2207 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2208 writel(0, ring->tail);
2209
2210 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2211
2212 return 0;
2213}
2214
2215/**
2216 * i40e_vsi_configure_tx - Configure the VSI for Tx
2217 * @vsi: VSI structure describing this set of rings and resources
2218 *
2219 * Configure the Tx VSI for operation.
2220 **/
2221static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2222{
2223 int err = 0;
2224 u16 i;
2225
2226 for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
2227 err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
2228
2229 return err;
2230}
2231
2232/**
2233 * i40e_vsi_configure_rx - Configure the VSI for Rx
2234 * @vsi: the VSI being configured
2235 *
2236 * Configure the Rx VSI for operation.
2237 **/
2238static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2239{
2240 int err = 0;
2241 u16 i;
2242
2243 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2244 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2245 + ETH_FCS_LEN + VLAN_HLEN;
2246 else
2247 vsi->max_frame = I40E_RXBUFFER_2048;
2248
2249 /* figure out correct receive buffer length */
2250 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2251 I40E_FLAG_RX_PS_ENABLED)) {
2252 case I40E_FLAG_RX_1BUF_ENABLED:
2253 vsi->rx_hdr_len = 0;
2254 vsi->rx_buf_len = vsi->max_frame;
2255 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2256 break;
2257 case I40E_FLAG_RX_PS_ENABLED:
2258 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2259 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2260 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2261 break;
2262 default:
2263 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2264 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2265 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2266 break;
2267 }
2268
2269 /* round up for the chip's needs */
2270 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2271 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2272 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2273 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2274
2275 /* set up individual rings */
2276 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2277 err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
2278
2279 return err;
2280}
2281
2282/**
2283 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2284 * @vsi: ptr to the VSI
2285 **/
2286static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2287{
2288 u16 qoffset, qcount;
2289 int i, n;
2290
2291 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2292 return;
2293
2294 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2295 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2296 continue;
2297
2298 qoffset = vsi->tc_config.tc_info[n].qoffset;
2299 qcount = vsi->tc_config.tc_info[n].qcount;
2300 for (i = qoffset; i < (qoffset + qcount); i++) {
2301 struct i40e_ring *rx_ring = &vsi->rx_rings[i];
2302 struct i40e_ring *tx_ring = &vsi->tx_rings[i];
2303 rx_ring->dcb_tc = n;
2304 tx_ring->dcb_tc = n;
2305 }
2306 }
2307}
2308
2309/**
2310 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2311 * @vsi: ptr to the VSI
2312 **/
2313static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2314{
2315 if (vsi->netdev)
2316 i40e_set_rx_mode(vsi->netdev);
2317}
2318
2319/**
2320 * i40e_vsi_configure - Set up the VSI for action
2321 * @vsi: the VSI being configured
2322 **/
2323static int i40e_vsi_configure(struct i40e_vsi *vsi)
2324{
2325 int err;
2326
2327 i40e_set_vsi_rx_mode(vsi);
2328 i40e_restore_vlan(vsi);
2329 i40e_vsi_config_dcb_rings(vsi);
2330 err = i40e_vsi_configure_tx(vsi);
2331 if (!err)
2332 err = i40e_vsi_configure_rx(vsi);
2333
2334 return err;
2335}
2336
2337/**
2338 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2339 * @vsi: the VSI being configured
2340 **/
2341static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2342{
2343 struct i40e_pf *pf = vsi->back;
2344 struct i40e_q_vector *q_vector;
2345 struct i40e_hw *hw = &pf->hw;
2346 u16 vector;
2347 int i, q;
2348 u32 val;
2349 u32 qp;
2350
2351 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2352 * and PFINT_LNKLSTn registers, e.g.:
2353 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2354 */
2355 qp = vsi->base_queue;
2356 vector = vsi->base_vector;
2357 q_vector = vsi->q_vectors;
2358 for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
2359 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2360 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2361 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2362 q_vector->rx.itr);
2363 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2364 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2365 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2366 q_vector->tx.itr);
2367
2368 /* Linked list for the queuepairs assigned to this vector */
2369 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2370 for (q = 0; q < q_vector->num_ringpairs; q++) {
2371 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2372 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2373 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2374 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2375 (I40E_QUEUE_TYPE_TX
2376 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2377
2378 wr32(hw, I40E_QINT_RQCTL(qp), val);
2379
2380 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2381 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2382 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2383 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2384 (I40E_QUEUE_TYPE_RX
2385 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2386
2387 /* Terminate the linked list */
2388 if (q == (q_vector->num_ringpairs - 1))
2389 val |= (I40E_QUEUE_END_OF_LIST
2390 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2391
2392 wr32(hw, I40E_QINT_TQCTL(qp), val);
2393 qp++;
2394 }
2395 }
2396
2397 i40e_flush(hw);
2398}
2399
2400/**
2401 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2402 * @hw: ptr to the hardware info
2403 **/
2404static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2405{
2406 u32 val;
2407
2408 /* clear things first */
2409 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2410 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2411
2412 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2413 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2414 I40E_PFINT_ICR0_ENA_GRST_MASK |
2415 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2416 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2417 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
2418 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2419 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2420 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2421
2422 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2423
2424 /* SW_ITR_IDX = 0, but don't change INTENA */
2425 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2426 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2427
2428 /* OTHER_ITR_IDX = 0 */
2429 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2430}
2431
2432/**
2433 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2434 * @vsi: the VSI being configured
2435 **/
2436static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2437{
2438 struct i40e_q_vector *q_vector = vsi->q_vectors;
2439 struct i40e_pf *pf = vsi->back;
2440 struct i40e_hw *hw = &pf->hw;
2441 u32 val;
2442
2443 /* set the ITR configuration */
2444 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2445 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2446 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2447 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2448 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2449 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2450
2451 i40e_enable_misc_int_causes(hw);
2452
2453 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2454 wr32(hw, I40E_PFINT_LNKLST0, 0);
2455
2456 /* Associate the queue pair to the vector and enable the q int */
2457 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2458 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2459 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2460
2461 wr32(hw, I40E_QINT_RQCTL(0), val);
2462
2463 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2464 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2465 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2466
2467 wr32(hw, I40E_QINT_TQCTL(0), val);
2468 i40e_flush(hw);
2469}
2470
2471/**
2472 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2473 * @pf: board private structure
2474 **/
2475static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2476{
2477 struct i40e_hw *hw = &pf->hw;
2478 u32 val;
2479
2480 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2481 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2482 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2483
2484 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2485 i40e_flush(hw);
2486}
2487
2488/**
2489 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2490 * @vsi: pointer to a vsi
2491 * @vector: enable a particular Hw Interrupt vector
2492 **/
2493void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2494{
2495 struct i40e_pf *pf = vsi->back;
2496 struct i40e_hw *hw = &pf->hw;
2497 u32 val;
2498
2499 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2500 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2501 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2502 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2503 i40e_flush(hw);
2504}
2505
2506/**
2507 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2508 * @irq: interrupt number
2509 * @data: pointer to a q_vector
2510 **/
2511static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2512{
2513 struct i40e_q_vector *q_vector = data;
2514
2515 if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
2516 return IRQ_HANDLED;
2517
2518 napi_schedule(&q_vector->napi);
2519
2520 return IRQ_HANDLED;
2521}
2522
2523/**
2524 * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
2525 * @irq: interrupt number
2526 * @data: pointer to a q_vector
2527 **/
2528static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
2529{
2530 struct i40e_q_vector *q_vector = data;
2531
2532 if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
2533 return IRQ_HANDLED;
2534
2535 pr_info("fdir ring cleaning needed\n");
2536
2537 return IRQ_HANDLED;
2538}
2539
2540/**
2541 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2542 * @vsi: the VSI being configured
2543 * @basename: name for the vector
2544 *
2545 * Allocates MSI-X vectors and requests interrupts from the kernel.
2546 **/
2547static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2548{
2549 int q_vectors = vsi->num_q_vectors;
2550 struct i40e_pf *pf = vsi->back;
2551 int base = vsi->base_vector;
2552 int rx_int_idx = 0;
2553 int tx_int_idx = 0;
2554 int vector, err;
2555
2556 for (vector = 0; vector < q_vectors; vector++) {
2557 struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
2558
2559 if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
2560 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2561 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2562 tx_int_idx++;
2563 } else if (q_vector->rx.ring[0]) {
2564 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2565 "%s-%s-%d", basename, "rx", rx_int_idx++);
2566 } else if (q_vector->tx.ring[0]) {
2567 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2568 "%s-%s-%d", basename, "tx", tx_int_idx++);
2569 } else {
2570 /* skip this unused q_vector */
2571 continue;
2572 }
2573 err = request_irq(pf->msix_entries[base + vector].vector,
2574 vsi->irq_handler,
2575 0,
2576 q_vector->name,
2577 q_vector);
2578 if (err) {
2579 dev_info(&pf->pdev->dev,
2580 "%s: request_irq failed, error: %d\n",
2581 __func__, err);
2582 goto free_queue_irqs;
2583 }
2584 /* assign the mask for this irq */
2585 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2586 &q_vector->affinity_mask);
2587 }
2588
2589 return 0;
2590
2591free_queue_irqs:
2592 while (vector) {
2593 vector--;
2594 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2595 NULL);
2596 free_irq(pf->msix_entries[base + vector].vector,
2597 &(vsi->q_vectors[vector]));
2598 }
2599 return err;
2600}
2601
2602/**
2603 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
2604 * @vsi: the VSI being un-configured
2605 **/
2606static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2607{
2608 struct i40e_pf *pf = vsi->back;
2609 struct i40e_hw *hw = &pf->hw;
2610 int base = vsi->base_vector;
2611 int i;
2612
2613 for (i = 0; i < vsi->num_queue_pairs; i++) {
2614 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
2615 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
2616 }
2617
2618 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2619 for (i = vsi->base_vector;
2620 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2621 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
2622
2623 i40e_flush(hw);
2624 for (i = 0; i < vsi->num_q_vectors; i++)
2625 synchronize_irq(pf->msix_entries[i + base].vector);
2626 } else {
2627 /* Legacy and MSI mode - this stops all interrupt handling */
2628 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2629 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
2630 i40e_flush(hw);
2631 synchronize_irq(pf->pdev->irq);
2632 }
2633}
2634
2635/**
2636 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
2637 * @vsi: the VSI being configured
2638 **/
2639static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2640{
2641 struct i40e_pf *pf = vsi->back;
2642 int i;
2643
2644 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2645 for (i = vsi->base_vector;
2646 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2647 i40e_irq_dynamic_enable(vsi, i);
2648 } else {
2649 i40e_irq_dynamic_enable_icr0(pf);
2650 }
2651
2652 return 0;
2653}
2654
2655/**
2656 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
2657 * @pf: board private structure
2658 **/
2659static void i40e_stop_misc_vector(struct i40e_pf *pf)
2660{
2661 /* Disable ICR 0 */
2662 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
2663 i40e_flush(&pf->hw);
2664}
2665
2666/**
2667 * i40e_intr - MSI/Legacy and non-queue interrupt handler
2668 * @irq: interrupt number
2669 * @data: pointer to a q_vector
2670 *
2671 * This is the handler used for all MSI/Legacy interrupts, and deals
2672 * with both queue and non-queue interrupts. This is also used in
2673 * MSIX mode to handle the non-queue interrupts.
2674 **/
2675static irqreturn_t i40e_intr(int irq, void *data)
2676{
2677 struct i40e_pf *pf = (struct i40e_pf *)data;
2678 struct i40e_hw *hw = &pf->hw;
2679 u32 icr0, icr0_remaining;
2680 u32 val, ena_mask;
2681
2682 icr0 = rd32(hw, I40E_PFINT_ICR0);
2683
2684 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2685 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2686 return IRQ_NONE;
2687
2688 val = rd32(hw, I40E_PFINT_DYN_CTL0);
2689 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
2690 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2691
2692 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2693
2694 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
2695 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
2696
2697 /* temporarily disable queue cause for NAPI processing */
2698 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
2699 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2700 wr32(hw, I40E_QINT_RQCTL(0), qval);
2701
2702 qval = rd32(hw, I40E_QINT_TQCTL(0));
2703 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2704 wr32(hw, I40E_QINT_TQCTL(0), qval);
2705 i40e_flush(hw);
2706
2707 if (!test_bit(__I40E_DOWN, &pf->state))
2708 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
2709 }
2710
2711 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
2712 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2713 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
2714 }
2715
2716 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
2717 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2718 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
2719 }
2720
2721 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
2722 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
2723 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2724 }
2725
2726 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
2727 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
2728 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
2729 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
2730 val = rd32(hw, I40E_GLGEN_RSTAT);
2731 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2732 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
2733 if (val & I40E_RESET_CORER)
2734 pf->corer_count++;
2735 else if (val & I40E_RESET_GLOBR)
2736 pf->globr_count++;
2737 else if (val & I40E_RESET_EMPR)
2738 pf->empr_count++;
2739 }
2740
2741 /* If a critical error is pending we have no choice but to reset the
2742 * device.
2743 * Report and mask out any remaining unexpected interrupts.
2744 */
2745 icr0_remaining = icr0 & ena_mask;
2746 if (icr0_remaining) {
2747 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
2748 icr0_remaining);
2749 if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
2750 (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
2751 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
2752 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
2753 (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
2754 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
2755 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
2756 } else {
2757 dev_info(&pf->pdev->dev, "device will be reset\n");
2758 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2759 i40e_service_event_schedule(pf);
2760 }
2761 }
2762 ena_mask &= ~icr0_remaining;
2763 }
2764
2765 /* re-enable interrupt causes */
2766 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
2767 i40e_flush(hw);
2768 if (!test_bit(__I40E_DOWN, &pf->state)) {
2769 i40e_service_event_schedule(pf);
2770 i40e_irq_dynamic_enable_icr0(pf);
2771 }
2772
2773 return IRQ_HANDLED;
2774}
2775
2776/**
2777 * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
2778 * @vsi: the VSI being configured
2779 * @v_idx: vector index
2780 * @r_idx: rx queue index
2781 **/
2782static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
2783{
2784 struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
2785 struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
2786
2787 rx_ring->q_vector = q_vector;
2788 q_vector->rx.ring[q_vector->rx.count] = rx_ring;
2789 q_vector->rx.count++;
2790 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2791 q_vector->vsi = vsi;
2792}
2793
2794/**
2795 * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
2796 * @vsi: the VSI being configured
2797 * @v_idx: vector index
2798 * @t_idx: tx queue index
2799 **/
2800static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
2801{
2802 struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
2803 struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
2804
2805 tx_ring->q_vector = q_vector;
2806 q_vector->tx.ring[q_vector->tx.count] = tx_ring;
2807 q_vector->tx.count++;
2808 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2809 q_vector->num_ringpairs++;
2810 q_vector->vsi = vsi;
2811}
2812
2813/**
2814 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
2815 * @vsi: the VSI being configured
2816 *
2817 * This function maps descriptor rings to the queue-specific vectors
2818 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2819 * one vector per queue pair, but on a constrained vector budget, we
2820 * group the queue pairs as "efficiently" as possible.
2821 **/
2822static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
2823{
2824 int qp_remaining = vsi->num_queue_pairs;
2825 int q_vectors = vsi->num_q_vectors;
2826 int qp_per_vector;
2827 int v_start = 0;
2828 int qp_idx = 0;
2829
2830 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
2831 * group them so there are multiple queues per vector.
2832 */
2833 for (; v_start < q_vectors && qp_remaining; v_start++) {
2834 qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
2835 for (; qp_per_vector;
2836 qp_per_vector--, qp_idx++, qp_remaining--) {
2837 map_vector_to_rxq(vsi, v_start, qp_idx);
2838 map_vector_to_txq(vsi, v_start, qp_idx);
2839 }
2840 }
2841}
2842
2843/**
2844 * i40e_vsi_request_irq - Request IRQ from the OS
2845 * @vsi: the VSI being configured
2846 * @basename: name for the vector
2847 **/
2848static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
2849{
2850 struct i40e_pf *pf = vsi->back;
2851 int err;
2852
2853 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2854 err = i40e_vsi_request_irq_msix(vsi, basename);
2855 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
2856 err = request_irq(pf->pdev->irq, i40e_intr, 0,
2857 pf->misc_int_name, pf);
2858 else
2859 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
2860 pf->misc_int_name, pf);
2861
2862 if (err)
2863 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
2864
2865 return err;
2866}
2867
2868#ifdef CONFIG_NET_POLL_CONTROLLER
2869/**
2870 * i40e_netpoll - A Polling 'interrupt'handler
2871 * @netdev: network interface device structure
2872 *
2873 * This is used by netconsole to send skbs without having to re-enable
2874 * interrupts. It's not called while the normal interrupt routine is executing.
2875 **/
2876static void i40e_netpoll(struct net_device *netdev)
2877{
2878 struct i40e_netdev_priv *np = netdev_priv(netdev);
2879 struct i40e_vsi *vsi = np->vsi;
2880 struct i40e_pf *pf = vsi->back;
2881 int i;
2882
2883 /* if interface is down do nothing */
2884 if (test_bit(__I40E_DOWN, &vsi->state))
2885 return;
2886
2887 pf->flags |= I40E_FLAG_IN_NETPOLL;
2888 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2889 for (i = 0; i < vsi->num_q_vectors; i++)
2890 i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
2891 } else {
2892 i40e_intr(pf->pdev->irq, netdev);
2893 }
2894 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
2895}
2896#endif
2897
2898/**
2899 * i40e_vsi_control_tx - Start or stop a VSI's rings
2900 * @vsi: the VSI being configured
2901 * @enable: start or stop the rings
2902 **/
2903static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
2904{
2905 struct i40e_pf *pf = vsi->back;
2906 struct i40e_hw *hw = &pf->hw;
2907 int i, j, pf_q;
2908 u32 tx_reg;
2909
2910 pf_q = vsi->base_queue;
2911 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
2912 j = 1000;
2913 do {
2914 usleep_range(1000, 2000);
2915 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
2916 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
2917 ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
2918
2919 if (enable) {
2920 /* is STAT set ? */
2921 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2922 dev_info(&pf->pdev->dev,
2923 "Tx %d already enabled\n", i);
2924 continue;
2925 }
2926 } else {
2927 /* is !STAT set ? */
2928 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2929 dev_info(&pf->pdev->dev,
2930 "Tx %d already disabled\n", i);
2931 continue;
2932 }
2933 }
2934
2935 /* turn on/off the queue */
2936 if (enable)
2937 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2938 I40E_QTX_ENA_QENA_STAT_MASK;
2939 else
2940 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2941
2942 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
2943
2944 /* wait for the change to finish */
2945 for (j = 0; j < 10; j++) {
2946 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
2947 if (enable) {
2948 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
2949 break;
2950 } else {
2951 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
2952 break;
2953 }
2954
2955 udelay(10);
2956 }
2957 if (j >= 10) {
2958 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
2959 pf_q, (enable ? "en" : "dis"));
2960 return -ETIMEDOUT;
2961 }
2962 }
2963
2964 return 0;
2965}
2966
2967/**
2968 * i40e_vsi_control_rx - Start or stop a VSI's rings
2969 * @vsi: the VSI being configured
2970 * @enable: start or stop the rings
2971 **/
2972static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
2973{
2974 struct i40e_pf *pf = vsi->back;
2975 struct i40e_hw *hw = &pf->hw;
2976 int i, j, pf_q;
2977 u32 rx_reg;
2978
2979 pf_q = vsi->base_queue;
2980 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
2981 j = 1000;
2982 do {
2983 usleep_range(1000, 2000);
2984 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
2985 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
2986 ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
2987
2988 if (enable) {
2989 /* is STAT set ? */
2990 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
2991 continue;
2992 } else {
2993 /* is !STAT set ? */
2994 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
2995 continue;
2996 }
2997
2998 /* turn on/off the queue */
2999 if (enable)
3000 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3001 I40E_QRX_ENA_QENA_STAT_MASK;
3002 else
3003 rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
3004 I40E_QRX_ENA_QENA_STAT_MASK);
3005 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3006
3007 /* wait for the change to finish */
3008 for (j = 0; j < 10; j++) {
3009 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3010
3011 if (enable) {
3012 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3013 break;
3014 } else {
3015 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3016 break;
3017 }
3018
3019 udelay(10);
3020 }
3021 if (j >= 10) {
3022 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
3023 pf_q, (enable ? "en" : "dis"));
3024 return -ETIMEDOUT;
3025 }
3026 }
3027
3028 return 0;
3029}
3030
3031/**
3032 * i40e_vsi_control_rings - Start or stop a VSI's rings
3033 * @vsi: the VSI being configured
3034 * @enable: start or stop the rings
3035 **/
3036static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3037{
3038 int ret;
3039
3040 /* do rx first for enable and last for disable */
3041 if (request) {
3042 ret = i40e_vsi_control_rx(vsi, request);
3043 if (ret)
3044 return ret;
3045 ret = i40e_vsi_control_tx(vsi, request);
3046 } else {
3047 ret = i40e_vsi_control_tx(vsi, request);
3048 if (ret)
3049 return ret;
3050 ret = i40e_vsi_control_rx(vsi, request);
3051 }
3052
3053 return ret;
3054}
3055
3056/**
3057 * i40e_vsi_free_irq - Free the irq association with the OS
3058 * @vsi: the VSI being configured
3059 **/
3060static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3061{
3062 struct i40e_pf *pf = vsi->back;
3063 struct i40e_hw *hw = &pf->hw;
3064 int base = vsi->base_vector;
3065 u32 val, qp;
3066 int i;
3067
3068 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3069 if (!vsi->q_vectors)
3070 return;
3071
3072 for (i = 0; i < vsi->num_q_vectors; i++) {
3073 u16 vector = i + base;
3074
3075 /* free only the irqs that were actually requested */
3076 if (vsi->q_vectors[i].num_ringpairs == 0)
3077 continue;
3078
3079 /* clear the affinity_mask in the IRQ descriptor */
3080 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3081 NULL);
3082 free_irq(pf->msix_entries[vector].vector,
3083 &vsi->q_vectors[i]);
3084
3085 /* Tear down the interrupt queue link list
3086 *
3087 * We know that they come in pairs and always
3088 * the Rx first, then the Tx. To clear the
3089 * link list, stick the EOL value into the
3090 * next_q field of the registers.
3091 */
3092 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3093 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3094 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3095 val |= I40E_QUEUE_END_OF_LIST
3096 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3097 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3098
3099 while (qp != I40E_QUEUE_END_OF_LIST) {
3100 u32 next;
3101
3102 val = rd32(hw, I40E_QINT_RQCTL(qp));
3103
3104 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3105 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3106 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3107 I40E_QINT_RQCTL_INTEVENT_MASK);
3108
3109 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3110 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3111
3112 wr32(hw, I40E_QINT_RQCTL(qp), val);
3113
3114 val = rd32(hw, I40E_QINT_TQCTL(qp));
3115
3116 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3117 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3118
3119 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3120 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3121 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3122 I40E_QINT_TQCTL_INTEVENT_MASK);
3123
3124 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3125 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3126
3127 wr32(hw, I40E_QINT_TQCTL(qp), val);
3128 qp = next;
3129 }
3130 }
3131 } else {
3132 free_irq(pf->pdev->irq, pf);
3133
3134 val = rd32(hw, I40E_PFINT_LNKLST0);
3135 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3136 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3137 val |= I40E_QUEUE_END_OF_LIST
3138 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3139 wr32(hw, I40E_PFINT_LNKLST0, val);
3140
3141 val = rd32(hw, I40E_QINT_RQCTL(qp));
3142 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3143 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3144 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3145 I40E_QINT_RQCTL_INTEVENT_MASK);
3146
3147 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3148 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3149
3150 wr32(hw, I40E_QINT_RQCTL(qp), val);
3151
3152 val = rd32(hw, I40E_QINT_TQCTL(qp));
3153
3154 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3155 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3156 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3157 I40E_QINT_TQCTL_INTEVENT_MASK);
3158
3159 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3160 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3161
3162 wr32(hw, I40E_QINT_TQCTL(qp), val);
3163 }
3164}
3165
3166/**
3167 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3168 * @vsi: the VSI being un-configured
3169 *
3170 * This frees the memory allocated to the q_vectors and
3171 * deletes references to the NAPI struct.
3172 **/
3173static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3174{
3175 int v_idx;
3176
3177 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
3178 struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
3179 int r_idx;
3180
3181 if (!q_vector)
3182 continue;
3183
3184 /* disassociate q_vector from rings */
3185 for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
3186 q_vector->tx.ring[r_idx]->q_vector = NULL;
3187 for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
3188 q_vector->rx.ring[r_idx]->q_vector = NULL;
3189
3190 /* only VSI w/ an associated netdev is set up w/ NAPI */
3191 if (vsi->netdev)
3192 netif_napi_del(&q_vector->napi);
3193 }
3194 kfree(vsi->q_vectors);
3195}
3196
3197/**
3198 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3199 * @pf: board private structure
3200 **/
3201static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3202{
3203 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3204 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3205 pci_disable_msix(pf->pdev);
3206 kfree(pf->msix_entries);
3207 pf->msix_entries = NULL;
3208 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3209 pci_disable_msi(pf->pdev);
3210 }
3211 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3212}
3213
3214/**
3215 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3216 * @pf: board private structure
3217 *
3218 * We go through and clear interrupt specific resources and reset the structure
3219 * to pre-load conditions
3220 **/
3221static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3222{
3223 int i;
3224
3225 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3226 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
3227 if (pf->vsi[i])
3228 i40e_vsi_free_q_vectors(pf->vsi[i]);
3229 i40e_reset_interrupt_capability(pf);
3230}
3231
3232/**
3233 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3234 * @vsi: the VSI being configured
3235 **/
3236static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3237{
3238 int q_idx;
3239
3240 if (!vsi->netdev)
3241 return;
3242
3243 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3244 napi_enable(&vsi->q_vectors[q_idx].napi);
3245}
3246
3247/**
3248 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3249 * @vsi: the VSI being configured
3250 **/
3251static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3252{
3253 int q_idx;
3254
3255 if (!vsi->netdev)
3256 return;
3257
3258 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3259 napi_disable(&vsi->q_vectors[q_idx].napi);
3260}
3261
3262/**
3263 * i40e_quiesce_vsi - Pause a given VSI
3264 * @vsi: the VSI being paused
3265 **/
3266static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3267{
3268 if (test_bit(__I40E_DOWN, &vsi->state))
3269 return;
3270
3271 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3272 if (vsi->netdev && netif_running(vsi->netdev)) {
3273 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3274 } else {
3275 set_bit(__I40E_DOWN, &vsi->state);
3276 i40e_down(vsi);
3277 }
3278}
3279
3280/**
3281 * i40e_unquiesce_vsi - Resume a given VSI
3282 * @vsi: the VSI being resumed
3283 **/
3284static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3285{
3286 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3287 return;
3288
3289 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3290 if (vsi->netdev && netif_running(vsi->netdev))
3291 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3292 else
3293 i40e_up(vsi); /* this clears the DOWN bit */
3294}
3295
3296/**
3297 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3298 * @pf: the PF
3299 **/
3300static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3301{
3302 int v;
3303
3304 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3305 if (pf->vsi[v])
3306 i40e_quiesce_vsi(pf->vsi[v]);
3307 }
3308}
3309
3310/**
3311 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3312 * @pf: the PF
3313 **/
3314static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3315{
3316 int v;
3317
3318 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3319 if (pf->vsi[v])
3320 i40e_unquiesce_vsi(pf->vsi[v]);
3321 }
3322}
3323
3324/**
3325 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
3326 * @dcbcfg: the corresponding DCBx configuration structure
3327 *
3328 * Return the number of TCs from given DCBx configuration
3329 **/
3330static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3331{
Jesse Brandeburg078b5872013-09-25 23:41:14 +00003332 u8 num_tc = 0;
3333 int i;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003334
3335 /* Scan the ETS Config Priority Table to find
3336 * traffic class enabled for a given priority
3337 * and use the traffic class index to get the
3338 * number of traffic classes enabled
3339 */
3340 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3341 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3342 num_tc = dcbcfg->etscfg.prioritytable[i];
3343 }
3344
3345 /* Traffic class index starts from zero so
3346 * increment to return the actual count
3347 */
Jesse Brandeburg078b5872013-09-25 23:41:14 +00003348 return num_tc + 1;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003349}
3350
3351/**
3352 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
3353 * @dcbcfg: the corresponding DCBx configuration structure
3354 *
3355 * Query the current DCB configuration and return the number of
3356 * traffic classes enabled from the given DCBX config
3357 **/
3358static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3359{
3360 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3361 u8 enabled_tc = 1;
3362 u8 i;
3363
3364 for (i = 0; i < num_tc; i++)
3365 enabled_tc |= 1 << i;
3366
3367 return enabled_tc;
3368}
3369
3370/**
3371 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
3372 * @pf: PF being queried
3373 *
3374 * Return number of traffic classes enabled for the given PF
3375 **/
3376static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3377{
3378 struct i40e_hw *hw = &pf->hw;
3379 u8 i, enabled_tc;
3380 u8 num_tc = 0;
3381 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3382
3383 /* If DCB is not enabled then always in single TC */
3384 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3385 return 1;
3386
3387 /* MFP mode return count of enabled TCs for this PF */
3388 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3389 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3390 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3391 if (enabled_tc & (1 << i))
3392 num_tc++;
3393 }
3394 return num_tc;
3395 }
3396
3397 /* SFP mode will be enabled for all TCs on port */
3398 return i40e_dcb_get_num_tc(dcbcfg);
3399}
3400
3401/**
3402 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
3403 * @pf: PF being queried
3404 *
3405 * Return a bitmap for first enabled traffic class for this PF.
3406 **/
3407static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3408{
3409 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3410 u8 i = 0;
3411
3412 if (!enabled_tc)
3413 return 0x1; /* TC0 */
3414
3415 /* Find the first enabled TC */
3416 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3417 if (enabled_tc & (1 << i))
3418 break;
3419 }
3420
3421 return 1 << i;
3422}
3423
3424/**
3425 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
3426 * @pf: PF being queried
3427 *
3428 * Return a bitmap for enabled traffic classes for this PF.
3429 **/
3430static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
3431{
3432 /* If DCB is not enabled for this PF then just return default TC */
3433 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3434 return i40e_pf_get_default_tc(pf);
3435
3436 /* MFP mode will have enabled TCs set by FW */
3437 if (pf->flags & I40E_FLAG_MFP_ENABLED)
3438 return pf->hw.func_caps.enabled_tcmap;
3439
3440 /* SFP mode we want PF to be enabled for all TCs */
3441 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
3442}
3443
3444/**
3445 * i40e_vsi_get_bw_info - Query VSI BW Information
3446 * @vsi: the VSI being queried
3447 *
3448 * Returns 0 on success, negative value on failure
3449 **/
3450static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3451{
3452 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
3453 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3454 struct i40e_pf *pf = vsi->back;
3455 struct i40e_hw *hw = &pf->hw;
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00003456 i40e_status aq_ret;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003457 u32 tc_bw_max;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003458 int i;
3459
3460 /* Get the VSI level BW configuration */
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00003461 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3462 if (aq_ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003463 dev_info(&pf->pdev->dev,
3464 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00003465 aq_ret, pf->hw.aq.asq_last_status);
3466 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003467 }
3468
3469 /* Get the VSI level BW configuration per TC */
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00003470 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3471 NULL);
3472 if (aq_ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003473 dev_info(&pf->pdev->dev,
3474 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00003475 aq_ret, pf->hw.aq.asq_last_status);
3476 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003477 }
3478
3479 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
3480 dev_info(&pf->pdev->dev,
3481 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
3482 bw_config.tc_valid_bits,
3483 bw_ets_config.tc_valid_bits);
3484 /* Still continuing */
3485 }
3486
3487 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
3488 vsi->bw_max_quanta = bw_config.max_bw;
3489 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
3490 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
3491 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3492 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
3493 vsi->bw_ets_limit_credits[i] =
3494 le16_to_cpu(bw_ets_config.credits[i]);
3495 /* 3 bits out of 4 for each TC */
3496 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3497 }
Jesse Brandeburg078b5872013-09-25 23:41:14 +00003498
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00003499 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003500}
3501
3502/**
3503 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
3504 * @vsi: the VSI being configured
3505 * @enabled_tc: TC bitmap
3506 * @bw_credits: BW shared credits per TC
3507 *
3508 * Returns 0 on success, negative value on failure
3509 **/
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00003510static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003511 u8 *bw_share)
3512{
3513 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00003514 i40e_status aq_ret;
3515 int i;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003516
3517 bw_data.tc_valid_bits = enabled_tc;
3518 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3519 bw_data.tc_bw_credits[i] = bw_share[i];
3520
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00003521 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3522 NULL);
3523 if (aq_ret) {
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003524 dev_info(&vsi->back->pdev->dev,
3525 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3526 __func__, vsi->back->hw.aq.asq_last_status);
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00003527 return -EINVAL;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003528 }
3529
3530 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3531 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3532
Jesse Brandeburgdcae29b2013-09-13 08:23:20 +00003533 return 0;
Jesse Brandeburg41c445f2013-09-11 08:39:46 +00003534}
3535
3536/**
3537 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
3538 * @vsi: the VSI being configured
3539 * @enabled_tc: TC map to be enabled
3540 *
3541 **/
3542static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3543{
3544 struct net_device *netdev = vsi->netdev;
3545 struct i40e_pf *pf = vsi->back;
3546 struct i40e_hw *hw = &pf->hw;
3547 u8 netdev_tc = 0;
3548 int i;
3549 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3550
3551 if (!netdev)
3552 return;
3553
3554 if (!enabled_tc) {
3555 netdev_reset_tc(netdev);
3556 return;
3557 }
3558
3559 /* Set up actual enabled TCs on the VSI */
3560 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
3561 return;
3562
3563 /* set per TC queues for the VSI */
3564 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3565 /* Only set TC queues for enabled tcs
3566 *
3567 * e.g. For a VSI that has TC0 and TC3 enabled the
3568 * enabled_tc bitmap would be 0x00001001; the driver
3569 * will set the numtc for netdev as 2 that will be
3570 * referenced by the netdev layer as TC 0 and 1.
3571 */
3572 if (vsi->tc_config.enabled_tc & (1 << i))
3573 netdev_set_tc_queue(netdev,
3574 vsi->tc_config.tc_info[i].netdev_tc,
3575 vsi->tc_config.tc_info[i].qcount,
3576 vsi->tc_config.tc_info[i].qoffset);
3577 }
3578
3579 /* Assign UP2TC map for the VSI */
3580 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3581 /* Get the actual TC# for the UP */
3582 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
3583 /* Get the mapped netdev TC# for the UP */
3584 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
3585 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3586 }
3587}
3588
3589/**
3590 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
3591 * @vsi: the VSI being configured
3592 * @ctxt: the ctxt buffer returned from AQ VSI update param command
3593 **/
3594static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
3595 struct i40e_vsi_context *ctxt)
3596{
3597 /* copy just the sections touched not the entire info
3598 * since not all sections are valid as returned by
3599 * update vsi params
3600 */
3601 vsi->info.mapping_flags = ctxt->info.mapping_flags;
3602 memcpy(&vsi->info.queue_mapping,
3603 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
3604 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
3605 sizeof(vsi->info.tc_mapping));
3606}
3607
3608/**
3609 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
3610 * @vsi: VSI to be configured
3611 * @enabled_tc: TC bitmap
3612 *
3613 * This configures a particular VSI for TCs that are mapped to the
3614 * given TC bitmap. It uses default bandwidth share for TCs across
3615 * VSIs to configure TC for a particular VSI.
3616 *
3617 * NOTE:
3618 * It is expected that the VSI queues have been quisced before calling
3619 * this function.
3620 **/
3621static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3622{
3623 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
3624 struct i40e_vsi_context ctxt;
3625 int ret = 0;
3626 int i;
3627
3628 /* Check if enabled_tc is same as existing or new TCs */
3629 if (vsi->tc_config.enabled_tc == enabled_tc)
3630 return ret;
3631
3632 /* Enable ETS TCs with equal BW Share for now across all VSIs */
3633 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3634 if (enabled_tc & (1 << i))
3635 bw_share[i] = 1;
3636 }
3637
3638 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
3639 if (ret) {
3640 dev_info(&vsi->back->pdev->dev,
3641 "Failed configuring TC map %d for VSI %d\n",
3642 enabled_tc, vsi->seid);
3643 goto out;
3644 }
3645
3646 /* Update Queue Pairs Mapping for currently enabled UPs */
3647 ctxt.seid = vsi->seid;
3648 ctxt.pf_num = vsi->back->hw.pf_id;
3649 ctxt.vf_num = 0;
3650 ctxt.uplink_seid = vsi->uplink_seid;
3651 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3652 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
3653
3654 /* Update the VSI after updating the VSI queue-mapping information */
3655 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3656 if (ret) {
3657 dev_info(&vsi->back->pdev->dev,
3658 "update vsi failed, aq_err=%d\n",
3659 vsi->back->hw.aq.asq_last_status);
3660 goto out;
3661 }
3662 /* update the local VSI info with updated queue map */
3663 i40e_vsi_update_queue_map(vsi, &ctxt);
3664 vsi->info.valid_sections = 0;
3665
3666 /* Update current VSI BW information */
3667 ret = i40e_vsi_get_bw_info(vsi);
3668 if (ret) {
3669 dev_info(&vsi->back->pdev->dev,
3670 "Failed updating vsi bw info, aq_err=%d\n",
3671 vsi->back->hw.aq.asq_last_status);
3672 goto out;
3673 }
3674
3675 /* Update the netdev TC setup */
3676 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
3677out:
3678 return ret;
3679}
3680
3681/**
3682 * i40e_up_complete - Finish the last steps of bringing up a connection
3683 * @vsi: the VSI being configured
3684 **/
3685static int i40e_up_complete(struct i40e_vsi *vsi)
3686{
3687 struct i40e_pf *pf = vsi->back;
3688 int err;
3689
3690 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3691 i40e_vsi_configure_msix(vsi);
3692 else
3693 i40e_configure_msi_and_legacy(vsi);
3694
3695 /* start rings */
3696 err = i40e_vsi_control_rings(vsi, true);
3697 if (err)
3698 return err;
3699
3700 clear_bit(__I40E_DOWN, &vsi->state);
3701 i40e_napi_enable_all(vsi);
3702 i40e_vsi_enable_irq(vsi);
3703
3704 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
3705 (vsi->netdev)) {
3706 netif_tx_start_all_queues(vsi->netdev);
3707 netif_carrier_on(vsi->netdev);
3708 }
3709 i40e_service_event_schedule(pf);
3710
3711 return 0;
3712}
3713
3714/**
3715 * i40e_vsi_reinit_locked - Reset the VSI
3716 * @vsi: the VSI being configured
3717 *
3718 * Rebuild the ring structs after some configuration
3719 * has changed, e.g. MTU size.
3720 **/
3721static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
3722{
3723 struct i40e_pf *pf = vsi->back;
3724
3725 WARN_ON(in_interrupt());
3726 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
3727 usleep_range(1000, 2000);
3728 i40e_down(vsi);
3729
3730 /* Give a VF some time to respond to the reset. The
3731 * two second wait is based upon the watchdog cycle in
3732 * the VF driver.
3733 */
3734 if (vsi->type == I40E_VSI_SRIOV)
3735 msleep(2000);
3736 i40e_up(vsi);
3737 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
3738}
3739
3740/**
3741 * i40e_up - Bring the connection back up after being down
3742 * @vsi: the VSI being configured
3743 **/
3744int i40e_up(struct i40e_vsi *vsi)
3745{
3746 int err;
3747
3748 err = i40e_vsi_configure(vsi);
3749 if (!err)
3750 err = i40e_up_complete(vsi);
3751
3752 return err;
3753}
3754
3755/**
3756 * i40e_down - Shutdown the connection processing
3757 * @vsi: the VSI being stopped
3758 **/
3759void i40e_down(struct i40e_vsi *vsi)
3760{
3761 int i;
3762
3763 /* It is assumed that the caller of this function
3764 * sets the vsi->state __I40E_DOWN bit.
3765 */
3766 if (vsi->netdev) {
3767 netif_carrier_off(vsi->netdev);
3768 netif_tx_disable(vsi->netdev);
3769 }
3770 i40e_vsi_disable_irq(vsi);
3771 i40e_vsi_control_rings(vsi, false);
3772 i40e_napi_disable_all(vsi);
3773
3774 for (i = 0; i < vsi->num_queue_pairs; i++) {
3775 i40e_clean_tx_ring(&vsi->tx_rings[i]);
3776 i40e_clean_rx_ring(&vsi->rx_rings[i]);
3777 }
3778}
3779
3780/**
3781 * i40e_setup_tc - configure multiple traffic classes
3782 * @netdev: net device to configure
3783 * @tc: number of traffic classes to enable
3784 **/
3785static int i40e_setup_tc(struct net_device *netdev, u8 tc)
3786{
3787 struct i40e_netdev_priv *np = netdev_priv(netdev);
3788 struct i40e_vsi *vsi = np->vsi;
3789 struct i40e_pf *pf = vsi->back;
3790 u8 enabled_tc = 0;
3791 int ret = -EINVAL;
3792 int i;
3793
3794 /* Check if DCB enabled to continue */
3795 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
3796 netdev_info(netdev, "DCB is not enabled for adapter\n");
3797 goto exit;
3798 }
3799
3800 /* Check if MFP enabled */
3801 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3802 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
3803 goto exit;
3804 }
3805
3806 /* Check whether tc count is within enabled limit */
3807 if (tc > i40e_pf_get_num_tc(pf)) {
3808 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
3809 goto exit;
3810 }
3811
3812 /* Generate TC map for number of tc requested */
3813 for (i = 0; i < tc; i++)
3814 enabled_tc |= (1 << i);
3815
3816 /* Requesting same TC configuration as already enabled */
3817 if (enabled_tc == vsi->tc_config.enabled_tc)
3818 return 0;
3819
3820 /* Quiesce VSI queues */
3821 i40e_quiesce_vsi(vsi);
3822
3823 /* Configure VSI for enabled TCs */
3824 ret = i40e_vsi_config_tc(vsi, enabled_tc);
3825 if (ret) {
3826 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
3827 vsi->seid);
3828 goto exit;
3829 }
3830
3831 /* Unquiesce VSI */
3832 i40e_unquiesce_vsi(vsi);
3833
3834exit:
3835 return ret;
3836}
3837
3838/**
3839 * i40e_open - Called when a network interface is made active
3840 * @netdev: network interface device structure
3841 *
3842 * The open entry point is called when a network interface is made
3843 * active by the system (IFF_UP). At this point all resources needed
3844 * for transmit and receive operations are allocated, the interrupt
3845 * handler is registered with the OS, the netdev watchdog subtask is
3846 * enabled, and the stack is notified that the interface is ready.
3847 *
3848 * Returns 0 on success, negative value on failure
3849 **/
3850static int i40e_open(struct net_device *netdev)
3851{
3852 struct i40e_netdev_priv *np = netdev_priv(netdev);
3853 struct i40e_vsi *vsi = np->vsi;
3854 struct i40e_pf *pf = vsi->back;
3855 char int_name[IFNAMSIZ];
3856 int err;
3857
3858 /* disallow open during test */
3859 if (test_bit(__I40E_TESTING, &pf->state))
3860 return -EBUSY;
3861
3862 netif_carrier_off(netdev);
3863
3864 /* allocate descriptors */
3865 err = i40e_vsi_setup_tx_resources(vsi);
3866 if (err)
3867 goto err_setup_tx;
3868 err = i40e_vsi_setup_rx_resources(vsi);
3869 if (err)
3870 goto err_setup_rx;
3871
3872 err = i40e_vsi_configure(vsi);
3873 if (err)
3874 goto err_setup_rx;
3875
3876 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3877 dev_driver_string(&pf->pdev->dev), netdev->name);
3878 err = i40e_vsi_request_irq(vsi, int_name);
3879 if (err)
3880 goto err_setup_rx;
3881
3882 err = i40e_up_complete(vsi);
3883 if (err)
3884 goto err_up_complete;
3885
3886 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
3887 err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
3888 if (err)
3889 netdev_info(netdev,
3890 "couldn't set broadcast err %d aq_err %d\n",
3891 err, pf->hw.aq.asq_last_status);
3892 }
3893
3894 return 0;
3895
3896err_up_complete:
3897 i40e_down(vsi);
3898 i40e_vsi_free_irq(vsi);
3899err_setup_rx:
3900 i40e_vsi_free_rx_resources(vsi);
3901err_setup_tx:
3902 i40e_vsi_free_tx_resources(vsi);
3903 if (vsi == pf->vsi[pf->lan_vsi])
3904 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
3905
3906 return err;
3907}
3908
3909/**
3910 * i40e_close - Disables a network interface
3911 * @netdev: network interface device structure
3912 *
3913 * The close entry point is called when an interface is de-activated
3914 * by the OS. The hardware is still under the driver's control, but
3915 * this netdev interface is disabled.
3916 *
3917 * Returns 0, this is not allowed to fail
3918 **/
3919static int i40e_close(struct net_device *netdev)
3920{
3921 struct i40e_netdev_priv *np = netdev_priv(netdev);
3922 struct i40e_vsi *vsi = np->vsi;
3923
3924 if (test_and_set_bit(__I40E_DOWN, &vsi->state))
3925 return 0;
3926
3927 i40e_down(vsi);
3928 i40e_vsi_free_irq(vsi);
3929
3930 i40e_vsi_free_tx_resources(vsi);
3931 i40e_vsi_free_rx_resources(vsi);
3932
3933 return 0;
3934}
3935
3936/**
3937 * i40e_do_reset - Start a PF or Core Reset sequence
3938 * @pf: board private structure
3939 * @reset_flags: which reset is requested
3940 *
3941 * The essential difference in resets is that the PF Reset
3942 * doesn't clear the packet buffers, doesn't reset the PE
3943 * firmware, and doesn't bother the other PFs on the chip.
3944 **/
3945void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
3946{
3947 u32 val;
3948
3949 WARN_ON(in_interrupt());
3950
3951 /* do the biggest reset indicated */
3952 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
3953
3954 /* Request a Global Reset
3955 *
3956 * This will start the chip's countdown to the actual full
3957 * chip reset event, and a warning interrupt to be sent
3958 * to all PFs, including the requestor. Our handler
3959 * for the warning interrupt will deal with the shutdown
3960 * and recovery of the switch setup.
3961 */
3962 dev_info(&pf->pdev->dev, "GlobalR requested\n");
3963 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
3964 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
3965 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
3966
3967 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
3968
3969 /* Request a Core Reset
3970 *
3971 * Same as Global Reset, except does *not* include the MAC/PHY
3972 */
3973 dev_info(&pf->pdev->dev, "CoreR requested\n");
3974 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
3975 val |= I40E_GLGEN_RTRIG_CORER_MASK;
3976 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
3977 i40e_flush(&pf->hw);
3978
3979 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
3980
3981 /* Request a PF Reset
3982 *
3983 * Resets only the PF-specific registers
3984 *
3985 * This goes directly to the tear-down and rebuild of
3986 * the switch, since we need to do all the recovery as
3987 * for the Core Reset.
3988 */
3989 dev_info(&pf->pdev->dev, "PFR requested\n");
3990 i40e_handle_reset_warning(pf);
3991
3992 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
3993 int v;
3994
3995 /* Find the VSI(s) that requested a re-init */
3996 dev_info(&pf->pdev->dev,
3997 "VSI reinit requested\n");
3998 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3999 struct i40e_vsi *vsi = pf->vsi[v];
4000 if (vsi != NULL &&
4001 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4002 i40e_vsi_reinit_locked(pf->vsi[v]);
4003 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4004 }
4005 }
4006
4007 /* no further action needed, so return now */
4008 return;
4009 } else {
4010 dev_info(&pf->pdev->dev,
4011 "bad reset request 0x%08x\n", reset_flags);
4012 return;
4013 }
4014}
4015
4016/**
4017 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
4018 * @pf: board private structure
4019 * @e: event info posted on ARQ
4020 *
4021 * Handler for LAN Queue Overflow Event generated by the firmware for PF
4022 * and VF queues
4023 **/
4024static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4025 struct i40e_arq_event_info *e)
4026{
4027 struct i40e_aqc_lan_overflow *data =
4028 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
4029 u32 queue = le32_to_cpu(data->prtdcb_rupto);
4030 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
4031 struct i40e_hw *hw = &pf->hw;
4032 struct i40e_vf *vf;
4033 u16 vf_id;
4034
4035 dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
4036 __func__, queue, qtx_ctl);
4037
4038 /* Queue belongs to VF, find the VF and issue VF reset */
4039 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
4040 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
4041 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
4042 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
4043 vf_id -= hw->func_caps.vf_base_id;
4044 vf = &pf->vf[vf_id];
4045 i40e_vc_notify_vf_reset(vf);
4046 /* Allow VF to process pending reset notification */
4047 msleep(20);
4048 i40e_reset_vf(vf, false);
4049 }
4050}
4051
4052/**
4053 * i40e_service_event_complete - Finish up the service event
4054 * @pf: board private structure
4055 **/
4056static void i40e_service_event_complete(struct i40e_pf *pf)
4057{
4058 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4059
4060 /* flush memory to make sure state is correct before next watchog */
4061 smp_mb__before_clear_bit();
4062 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4063}
4064
4065/**
4066 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4067 * @pf: board private structure
4068 **/
4069static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4070{
4071 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4072 return;
4073
4074 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4075
4076 /* if interface is down do nothing */
4077 if (test_bit(__I40E_DOWN, &pf->state))
4078 return;
4079}
4080
4081/**
4082 * i40e_vsi_link_event - notify VSI of a link event
4083 * @vsi: vsi to be notified
4084 * @link_up: link up or down
4085 **/
4086static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
4087{
4088 if (!vsi)
4089 return;
4090
4091 switch (vsi->type) {
4092 case I40E_VSI_MAIN:
4093 if (!vsi->netdev || !vsi->netdev_registered)
4094 break;
4095
4096 if (link_up) {
4097 netif_carrier_on(vsi->netdev);
4098 netif_tx_wake_all_queues(vsi->netdev);
4099 } else {
4100 netif_carrier_off(vsi->netdev);
4101 netif_tx_stop_all_queues(vsi->netdev);
4102 }
4103 break;
4104
4105 case I40E_VSI_SRIOV:
4106 break;
4107
4108 case I40E_VSI_VMDQ2:
4109 case I40E_VSI_CTRL:
4110 case I40E_VSI_MIRROR:
4111 default:
4112 /* there is no notification for other VSIs */
4113 break;
4114 }
4115}
4116
4117/**
4118 * i40e_veb_link_event - notify elements on the veb of a link event
4119 * @veb: veb to be notified
4120 * @link_up: link up or down
4121 **/
4122static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4123{
4124 struct i40e_pf *pf;
4125 int i;
4126
4127 if (!veb || !veb->pf)
4128 return;
4129 pf = veb->pf;
4130
4131 /* depth first... */
4132 for (i = 0; i < I40E_MAX_VEB; i++)
4133 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
4134 i40e_veb_link_event(pf->veb[i], link_up);
4135
4136 /* ... now the local VSIs */
4137 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4138 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4139 i40e_vsi_link_event(pf->vsi[i], link_up);
4140}
4141
4142/**
4143 * i40e_link_event - Update netif_carrier status
4144 * @pf: board private structure
4145 **/
4146static void i40e_link_event(struct i40e_pf *pf)
4147{
4148 bool new_link, old_link;
4149
4150 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
4151 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
4152
4153 if (new_link == old_link)
4154 return;
4155
4156 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4157 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
4158
4159 /* Notify the base of the switch tree connected to
4160 * the link. Floating VEBs are not notified.
4161 */
4162 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
4163 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
4164 else
4165 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
4166
4167 if (pf->vf)
4168 i40e_vc_notify_link_state(pf);
4169}
4170
4171/**
4172 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
4173 * @pf: board private structure
4174 *
4175 * Set the per-queue flags to request a check for stuck queues in the irq
4176 * clean functions, then force interrupts to be sure the irq clean is called.
4177 **/
4178static void i40e_check_hang_subtask(struct i40e_pf *pf)
4179{
4180 int i, v;
4181
4182 /* If we're down or resetting, just bail */
4183 if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
4184 return;
4185
4186 /* for each VSI/netdev
4187 * for each Tx queue
4188 * set the check flag
4189 * for each q_vector
4190 * force an interrupt
4191 */
4192 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4193 struct i40e_vsi *vsi = pf->vsi[v];
4194 int armed = 0;
4195
4196 if (!pf->vsi[v] ||
4197 test_bit(__I40E_DOWN, &vsi->state) ||
4198 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
4199 continue;
4200
4201 for (i = 0; i < vsi->num_queue_pairs; i++) {
4202 set_check_for_tx_hang(&vsi->tx_rings[i]);
4203 if (test_bit(__I40E_HANG_CHECK_ARMED,
4204 &vsi->tx_rings[i].state))
4205 armed++;
4206 }
4207
4208 if (armed) {
4209 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
4210 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
4211 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
4212 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
4213 } else {
4214 u16 vec = vsi->base_vector - 1;
4215 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
4216 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
4217 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
4218 wr32(&vsi->back->hw,
4219 I40E_PFINT_DYN_CTLN(vec), val);
4220 }
4221 i40e_flush(&vsi->back->hw);
4222 }
4223 }
4224}
4225
4226/**
4227 * i40e_watchdog_subtask - Check and bring link up
4228 * @pf: board private structure
4229 **/
4230static void i40e_watchdog_subtask(struct i40e_pf *pf)
4231{
4232 int i;
4233
4234 /* if interface is down do nothing */
4235 if (test_bit(__I40E_DOWN, &pf->state) ||
4236 test_bit(__I40E_CONFIG_BUSY, &pf->state))
4237 return;
4238
4239 /* Update the stats for active netdevs so the network stack
4240 * can look at updated numbers whenever it cares to
4241 */
4242 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4243 if (pf->vsi[i] && pf->vsi[i]->netdev)
4244 i40e_update_stats(pf->vsi[i]);
4245
4246 /* Update the stats for the active switching components */
4247 for (i = 0; i < I40E_MAX_VEB; i++)
4248 if (pf->veb[i])
4249 i40e_update_veb_stats(pf->veb[i]);
4250}
4251
4252/**
4253 * i40e_reset_subtask - Set up for resetting the device and driver
4254 * @pf: board private structure
4255 **/
4256static void i40e_reset_subtask(struct i40e_pf *pf)
4257{
4258 u32 reset_flags = 0;
4259
4260 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
4261 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
4262 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
4263 }
4264 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
4265 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
4266 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4267 }
4268 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
4269 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
4270 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
4271 }
4272 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
4273 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
4274 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
4275 }
4276
4277 /* If there's a recovery already waiting, it takes
4278 * precedence before starting a new reset sequence.
4279 */
4280 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
4281 i40e_handle_reset_warning(pf);
4282 return;
4283 }
4284
4285 /* If we're already down or resetting, just bail */
4286 if (reset_flags &&
4287 !test_bit(__I40E_DOWN, &pf->state) &&
4288 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
4289 i40e_do_reset(pf, reset_flags);
4290}
4291
4292/**
4293 * i40e_handle_link_event - Handle link event
4294 * @pf: board private structure
4295 * @e: event info posted on ARQ
4296 **/
4297static void i40e_handle_link_event(struct i40e_pf *pf,
4298 struct i40e_arq_event_info *e)
4299{
4300 struct i40e_hw *hw = &pf->hw;
4301 struct i40e_aqc_get_link_status *status =
4302 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
4303 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
4304
4305 /* save off old link status information */
4306 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
4307 sizeof(pf->hw.phy.link_info_old));
4308
4309 /* update link status */
4310 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
4311 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
4312 hw_link_info->link_info = status->link_info;
4313 hw_link_info->an_info = status->an_info;
4314 hw_link_info->ext_info = status->ext_info;
4315 hw_link_info->lse_enable =
4316 le16_to_cpu(status->command_flags) &
4317 I40E_AQ_LSE_ENABLE;
4318
4319 /* process the event */
4320 i40e_link_event(pf);
4321
4322 /* Do a new status request to re-enable LSE reporting
4323 * and load new status information into the hw struct,
4324 * then see if the status changed while processing the
4325 * initial event.
4326 */
4327 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
4328 i40e_link_event(pf);
4329}
4330
4331/**
4332 * i40e_clean_adminq_subtask - Clean the AdminQ rings
4333 * @pf: board private structure
4334 **/
4335static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4336{
4337 struct i40e_arq_event_info event;
4338 struct i40e_hw *hw = &pf->hw;
4339 u16 pending, i = 0;
4340 i40e_status ret;
4341 u16 opcode;
4342 u32 val;
4343
4344 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
4345 return;
4346
4347 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
4348 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
4349 if (!event.msg_buf)
4350 return;
4351
4352 do {
4353 ret = i40e_clean_arq_element(hw, &event, &pending);
4354 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
4355 dev_info(&pf->pdev->dev, "No ARQ event found\n");
4356 break;
4357 } else if (ret) {
4358 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
4359 break;
4360 }
4361
4362 opcode = le16_to_cpu(event.desc.opcode);
4363 switch (opcode) {
4364
4365 case i40e_aqc_opc_get_link_status:
4366 i40e_handle_link_event(pf, &event);
4367 break;
4368 case i40e_aqc_opc_send_msg_to_pf:
4369 ret = i40e_vc_process_vf_msg(pf,
4370 le16_to_cpu(event.desc.retval),
4371 le32_to_cpu(event.desc.cookie_high),
4372 le32_to_cpu(event.desc.cookie_low),
4373 event.msg_buf,
4374 event.msg_size);
4375 break;
4376 case i40e_aqc_opc_lldp_update_mib:
4377 dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4378 break;
4379 case i40e_aqc_opc_event_lan_overflow:
4380 dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
4381 i40e_handle_lan_overflow_event(pf, &event);
4382 break;
4383 default:
4384 dev_info(&pf->pdev->dev,
4385 "ARQ Error: Unknown event %d received\n",
4386 event.desc.opcode);
4387 break;
4388 }
4389 } while (pending && (i++ < pf->adminq_work_limit));
4390
4391 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
4392 /* re-enable Admin queue interrupt cause */
4393 val = rd32(hw, I40E_PFINT_ICR0_ENA);
4394 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4395 wr32(hw, I40E_PFINT_ICR0_ENA, val);
4396 i40e_flush(hw);
4397
4398 kfree(event.msg_buf);
4399}
4400
4401/**
4402 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
4403 * @veb: pointer to the VEB instance
4404 *
4405 * This is a recursive function that first builds the attached VSIs then
4406 * recurses in to build the next layer of VEB. We track the connections
4407 * through our own index numbers because the seid's from the HW could
4408 * change across the reset.
4409 **/
4410static int i40e_reconstitute_veb(struct i40e_veb *veb)
4411{
4412 struct i40e_vsi *ctl_vsi = NULL;
4413 struct i40e_pf *pf = veb->pf;
4414 int v, veb_idx;
4415 int ret;
4416
4417 /* build VSI that owns this VEB, temporarily attached to base VEB */
4418 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
4419 if (pf->vsi[v] &&
4420 pf->vsi[v]->veb_idx == veb->idx &&
4421 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
4422 ctl_vsi = pf->vsi[v];
4423 break;
4424 }
4425 }
4426 if (!ctl_vsi) {
4427 dev_info(&pf->pdev->dev,
4428 "missing owner VSI for veb_idx %d\n", veb->idx);
4429 ret = -ENOENT;
4430 goto end_reconstitute;
4431 }
4432 if (ctl_vsi != pf->vsi[pf->lan_vsi])
4433 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
4434 ret = i40e_add_vsi(ctl_vsi);
4435 if (ret) {
4436 dev_info(&pf->pdev->dev,
4437 "rebuild of owner VSI failed: %d\n", ret);
4438 goto end_reconstitute;
4439 }
4440 i40e_vsi_reset_stats(ctl_vsi);
4441
4442 /* create the VEB in the switch and move the VSI onto the VEB */
4443 ret = i40e_add_veb(veb, ctl_vsi);
4444 if (ret)
4445 goto end_reconstitute;
4446
4447 /* create the remaining VSIs attached to this VEB */
4448 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4449 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
4450 continue;
4451
4452 if (pf->vsi[v]->veb_idx == veb->idx) {
4453 struct i40e_vsi *vsi = pf->vsi[v];
4454 vsi->uplink_seid = veb->seid;
4455 ret = i40e_add_vsi(vsi);
4456 if (ret) {
4457 dev_info(&pf->pdev->dev,
4458 "rebuild of vsi_idx %d failed: %d\n",
4459 v, ret);
4460 goto end_reconstitute;
4461 }
4462 i40e_vsi_reset_stats(vsi);
4463 }
4464 }
4465
4466 /* create any VEBs attached to this VEB - RECURSION */
4467 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
4468 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
4469 pf->veb[veb_idx]->uplink_seid = veb->seid;
4470 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
4471 if (ret)
4472 break;
4473 }
4474 }
4475
4476end_reconstitute:
4477 return ret;
4478}
4479
4480/**
4481 * i40e_get_capabilities - get info about the HW
4482 * @pf: the PF struct
4483 **/
4484static int i40e_get_capabilities(struct i40e_pf *pf)
4485{
4486 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
4487 u16 data_size;
4488 int buf_len;
4489 int err;
4490
4491 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
4492 do {
4493 cap_buf = kzalloc(buf_len, GFP_KERNEL);
4494 if (!cap_buf)
4495 return -ENOMEM;
4496
4497 /* this loads the data into the hw struct for us */
4498 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
4499 &data_size,
4500 i40e_aqc_opc_list_func_capabilities,
4501 NULL);
4502 /* data loaded, buffer no longer needed */
4503 kfree(cap_buf);
4504
4505 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
4506 /* retry with a larger buffer */
4507 buf_len = data_size;
4508 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
4509 dev_info(&pf->pdev->dev,
4510 "capability discovery failed: aq=%d\n",
4511 pf->hw.aq.asq_last_status);
4512 return -ENODEV;
4513 }
4514 } while (err);
4515
4516 if (pf->hw.debug_mask & I40E_DEBUG_USER)
4517 dev_info(&pf->pdev->dev,
4518 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
4519 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
4520 pf->hw.func_caps.num_msix_vectors,
4521 pf->hw.func_caps.num_msix_vectors_vf,
4522 pf->hw.func_caps.fd_filters_guaranteed,
4523 pf->hw.func_caps.fd_filters_best_effort,
4524 pf->hw.func_caps.num_tx_qp,
4525 pf->hw.func_caps.num_vsis);
4526
4527 return 0;
4528}
4529
4530/**
4531 * i40e_fdir_setup - initialize the Flow Director resources
4532 * @pf: board private structure
4533 **/
4534static void i40e_fdir_setup(struct i40e_pf *pf)
4535{
4536 struct i40e_vsi *vsi;
4537 bool new_vsi = false;
4538 int err, i;
4539
4540 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
4541 return;
4542
4543 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
4544
4545 /* find existing or make new FDIR VSI */
4546 vsi = NULL;
4547 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4548 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
4549 vsi = pf->vsi[i];
4550 if (!vsi) {
4551 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
4552 if (!vsi) {
4553 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
4554 pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
4555 return;
4556 }
4557 new_vsi = true;
4558 }
4559 WARN_ON(vsi->base_queue != I40E_FDIR_RING);
4560 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
4561
4562 err = i40e_vsi_setup_tx_resources(vsi);
4563 if (!err)
4564 err = i40e_vsi_setup_rx_resources(vsi);
4565 if (!err)
4566 err = i40e_vsi_configure(vsi);
4567 if (!err && new_vsi) {
4568 char int_name[IFNAMSIZ + 9];
4569 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4570 dev_driver_string(&pf->pdev->dev));
4571 err = i40e_vsi_request_irq(vsi, int_name);
4572 }
4573 if (!err)
4574 err = i40e_up_complete(vsi);
4575
4576 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4577}
4578
4579/**
4580 * i40e_fdir_teardown - release the Flow Director resources
4581 * @pf: board private structure
4582 **/
4583static void i40e_fdir_teardown(struct i40e_pf *pf)
4584{
4585 int i;
4586
4587 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
4588 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
4589 i40e_vsi_release(pf->vsi[i]);
4590 break;
4591 }
4592 }
4593}
4594
4595/**
4596 * i40e_handle_reset_warning - prep for the core to reset
4597 * @pf: board private structure
4598 *
4599 * Close up the VFs and other things in prep for a Core Reset,
4600 * then get ready to rebuild the world.
4601 **/
4602static void i40e_handle_reset_warning(struct i40e_pf *pf)
4603{
4604 struct i40e_driver_version dv;
4605 struct i40e_hw *hw = &pf->hw;
4606 i40e_status ret;
4607 u32 v;
4608
4609 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
4610 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
4611 return;
4612
4613 dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
4614
4615 i40e_vc_notify_reset(pf);
4616
4617 /* quiesce the VSIs and their queues that are not already DOWN */
4618 i40e_pf_quiesce_all_vsi(pf);
4619
4620 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4621 if (pf->vsi[v])
4622 pf->vsi[v]->seid = 0;
4623 }
4624
4625 i40e_shutdown_adminq(&pf->hw);
4626
4627 /* Now we wait for GRST to settle out.
4628 * We don't have to delete the VEBs or VSIs from the hw switch
4629 * because the reset will make them disappear.
4630 */
4631 ret = i40e_pf_reset(hw);
4632 if (ret)
4633 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
4634 pf->pfr_count++;
4635
4636 if (test_bit(__I40E_DOWN, &pf->state))
4637 goto end_core_reset;
4638 dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
4639
4640 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
4641 ret = i40e_init_adminq(&pf->hw);
4642 if (ret) {
4643 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
4644 goto end_core_reset;
4645 }
4646
4647 ret = i40e_get_capabilities(pf);
4648 if (ret) {
4649 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
4650 ret);
4651 goto end_core_reset;
4652 }
4653
4654 /* call shutdown HMC */
4655 ret = i40e_shutdown_lan_hmc(hw);
4656 if (ret) {
4657 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
4658 goto end_core_reset;
4659 }
4660
4661 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
4662 hw->func_caps.num_rx_qp,
4663 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
4664 if (ret) {
4665 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
4666 goto end_core_reset;
4667 }
4668 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
4669 if (ret) {
4670 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
4671 goto end_core_reset;
4672 }
4673
4674 /* do basic switch setup */
4675 ret = i40e_setup_pf_switch(pf);
4676 if (ret)
4677 goto end_core_reset;
4678
4679 /* Rebuild the VSIs and VEBs that existed before reset.
4680 * They are still in our local switch element arrays, so only
4681 * need to rebuild the switch model in the HW.
4682 *
4683 * If there were VEBs but the reconstitution failed, we'll try
4684 * try to recover minimal use by getting the basic PF VSI working.
4685 */
4686 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
4687 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
4688 /* find the one VEB connected to the MAC, and find orphans */
4689 for (v = 0; v < I40E_MAX_VEB; v++) {
4690 if (!pf->veb[v])
4691 continue;
4692
4693 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
4694 pf->veb[v]->uplink_seid == 0) {
4695 ret = i40e_reconstitute_veb(pf->veb[v]);
4696
4697 if (!ret)
4698 continue;
4699
4700 /* If Main VEB failed, we're in deep doodoo,
4701 * so give up rebuilding the switch and set up
4702 * for minimal rebuild of PF VSI.
4703 * If orphan failed, we'll report the error
4704 * but try to keep going.
4705 */
4706 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
4707 dev_info(&pf->pdev->dev,
4708 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
4709 ret);
4710 pf->vsi[pf->lan_vsi]->uplink_seid
4711 = pf->mac_seid;
4712 break;
4713 } else if (pf->veb[v]->uplink_seid == 0) {
4714 dev_info(&pf->pdev->dev,
4715 "rebuild of orphan VEB failed: %d\n",
4716 ret);
4717 }
4718 }
4719 }
4720 }
4721
4722 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
4723 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
4724 /* no VEB, so rebuild only the Main VSI */
4725 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
4726 if (ret) {
4727 dev_info(&pf->pdev->dev,
4728 "rebuild of Main VSI failed: %d\n", ret);
4729 goto end_core_reset;
4730 }
4731 }
4732
4733 /* reinit the misc interrupt */
4734 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4735 ret = i40e_setup_misc_vector(pf);
4736
4737 /* restart the VSIs that were rebuilt and running before the reset */
4738 i40e_pf_unquiesce_all_vsi(pf);
4739
4740 /* tell the firmware that we're starting */
4741 dv.major_version = DRV_VERSION_MAJOR;
4742 dv.minor_version = DRV_VERSION_MINOR;
4743 dv.build_version = DRV_VERSION_BUILD;
4744 dv.subbuild_version = 0;
4745 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
4746
4747 dev_info(&pf->pdev->dev, "PF reset done\n");
4748
4749end_core_reset:
4750 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
4751}
4752
4753/**
4754 * i40e_handle_mdd_event
4755 * @pf: pointer to the pf structure
4756 *
4757 * Called from the MDD irq handler to identify possibly malicious vfs
4758 **/
4759static void i40e_handle_mdd_event(struct i40e_pf *pf)
4760{
4761 struct i40e_hw *hw = &pf->hw;
4762 bool mdd_detected = false;
4763 struct i40e_vf *vf;
4764 u32 reg;
4765 int i;
4766
4767 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
4768 return;
4769
4770 /* find what triggered the MDD event */
4771 reg = rd32(hw, I40E_GL_MDET_TX);
4772 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4773 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
4774 >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
4775 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
4776 >> I40E_GL_MDET_TX_EVENT_SHIFT;
4777 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
4778 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
4779 dev_info(&pf->pdev->dev,
4780 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
4781 event, queue, func);
4782 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4783 mdd_detected = true;
4784 }
4785 reg = rd32(hw, I40E_GL_MDET_RX);
4786 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4787 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
4788 >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
4789 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
4790 >> I40E_GL_MDET_RX_EVENT_SHIFT;
4791 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
4792 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
4793 dev_info(&pf->pdev->dev,
4794 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
4795 event, queue, func);
4796 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4797 mdd_detected = true;
4798 }
4799
4800 /* see if one of the VFs needs its hand slapped */
4801 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
4802 vf = &(pf->vf[i]);
4803 reg = rd32(hw, I40E_VP_MDET_TX(i));
4804 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
4805 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
4806 vf->num_mdd_events++;
4807 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
4808 }
4809
4810 reg = rd32(hw, I40E_VP_MDET_RX(i));
4811 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
4812 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
4813 vf->num_mdd_events++;
4814 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
4815 }
4816
4817 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
4818 dev_info(&pf->pdev->dev,
4819 "Too many MDD events on VF %d, disabled\n", i);
4820 dev_info(&pf->pdev->dev,
4821 "Use PF Control I/F to re-enable the VF\n");
4822 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
4823 }
4824 }
4825
4826 /* re-enable mdd interrupt cause */
4827 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
4828 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4829 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4830 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4831 i40e_flush(hw);
4832}
4833
4834/**
4835 * i40e_service_task - Run the driver's async subtasks
4836 * @work: pointer to work_struct containing our data
4837 **/
4838static void i40e_service_task(struct work_struct *work)
4839{
4840 struct i40e_pf *pf = container_of(work,
4841 struct i40e_pf,
4842 service_task);
4843 unsigned long start_time = jiffies;
4844
4845 i40e_reset_subtask(pf);
4846 i40e_handle_mdd_event(pf);
4847 i40e_vc_process_vflr_event(pf);
4848 i40e_watchdog_subtask(pf);
4849 i40e_fdir_reinit_subtask(pf);
4850 i40e_check_hang_subtask(pf);
4851 i40e_sync_filters_subtask(pf);
4852 i40e_clean_adminq_subtask(pf);
4853
4854 i40e_service_event_complete(pf);
4855
4856 /* If the tasks have taken longer than one timer cycle or there
4857 * is more work to be done, reschedule the service task now
4858 * rather than wait for the timer to tick again.
4859 */
4860 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
4861 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
4862 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
4863 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
4864 i40e_service_event_schedule(pf);
4865}
4866
4867/**
4868 * i40e_service_timer - timer callback
4869 * @data: pointer to PF struct
4870 **/
4871static void i40e_service_timer(unsigned long data)
4872{
4873 struct i40e_pf *pf = (struct i40e_pf *)data;
4874
4875 mod_timer(&pf->service_timer,
4876 round_jiffies(jiffies + pf->service_timer_period));
4877 i40e_service_event_schedule(pf);
4878}
4879
4880/**
4881 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
4882 * @vsi: the VSI being configured
4883 **/
4884static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
4885{
4886 struct i40e_pf *pf = vsi->back;
4887
4888 switch (vsi->type) {
4889 case I40E_VSI_MAIN:
4890 vsi->alloc_queue_pairs = pf->num_lan_qps;
4891 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4892 I40E_REQ_DESCRIPTOR_MULTIPLE);
4893 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4894 vsi->num_q_vectors = pf->num_lan_msix;
4895 else
4896 vsi->num_q_vectors = 1;
4897
4898 break;
4899
4900 case I40E_VSI_FDIR:
4901 vsi->alloc_queue_pairs = 1;
4902 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
4903 I40E_REQ_DESCRIPTOR_MULTIPLE);
4904 vsi->num_q_vectors = 1;
4905 break;
4906
4907 case I40E_VSI_VMDQ2:
4908 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
4909 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4910 I40E_REQ_DESCRIPTOR_MULTIPLE);
4911 vsi->num_q_vectors = pf->num_vmdq_msix;
4912 break;
4913
4914 case I40E_VSI_SRIOV:
4915 vsi->alloc_queue_pairs = pf->num_vf_qps;
4916 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4917 I40E_REQ_DESCRIPTOR_MULTIPLE);
4918 break;
4919
4920 default:
4921 WARN_ON(1);
4922 return -ENODATA;
4923 }
4924
4925 return 0;
4926}
4927
4928/**
4929 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
4930 * @pf: board private structure
4931 * @type: type of VSI
4932 *
4933 * On error: returns error code (negative)
4934 * On success: returns vsi index in PF (positive)
4935 **/
4936static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4937{
4938 int ret = -ENODEV;
4939 struct i40e_vsi *vsi;
4940 int vsi_idx;
4941 int i;
4942
4943 /* Need to protect the allocation of the VSIs at the PF level */
4944 mutex_lock(&pf->switch_mutex);
4945
4946 /* VSI list may be fragmented if VSI creation/destruction has
4947 * been happening. We can afford to do a quick scan to look
4948 * for any free VSIs in the list.
4949 *
4950 * find next empty vsi slot, looping back around if necessary
4951 */
4952 i = pf->next_vsi;
4953 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
4954 i++;
4955 if (i >= pf->hw.func_caps.num_vsis) {
4956 i = 0;
4957 while (i < pf->next_vsi && pf->vsi[i])
4958 i++;
4959 }
4960
4961 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
4962 vsi_idx = i; /* Found one! */
4963 } else {
4964 ret = -ENODEV;
4965 goto err_alloc_vsi; /* out of VSI slots! */
4966 }
4967 pf->next_vsi = ++i;
4968
4969 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
4970 if (!vsi) {
4971 ret = -ENOMEM;
4972 goto err_alloc_vsi;
4973 }
4974 vsi->type = type;
4975 vsi->back = pf;
4976 set_bit(__I40E_DOWN, &vsi->state);
4977 vsi->flags = 0;
4978 vsi->idx = vsi_idx;
4979 vsi->rx_itr_setting = pf->rx_itr_default;
4980 vsi->tx_itr_setting = pf->tx_itr_default;
4981 vsi->netdev_registered = false;
4982 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
4983 INIT_LIST_HEAD(&vsi->mac_filter_list);
4984
4985 i40e_set_num_rings_in_vsi(vsi);
4986
4987 /* Setup default MSIX irq handler for VSI */
4988 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
4989
4990 pf->vsi[vsi_idx] = vsi;
4991 ret = vsi_idx;
4992err_alloc_vsi:
4993 mutex_unlock(&pf->switch_mutex);
4994 return ret;
4995}
4996
4997/**
4998 * i40e_vsi_clear - Deallocate the VSI provided
4999 * @vsi: the VSI being un-configured
5000 **/
5001static int i40e_vsi_clear(struct i40e_vsi *vsi)
5002{
5003 struct i40e_pf *pf;
5004
5005 if (!vsi)
5006 return 0;
5007
5008 if (!vsi->back)
5009 goto free_vsi;
5010 pf = vsi->back;
5011
5012 mutex_lock(&pf->switch_mutex);
5013 if (!pf->vsi[vsi->idx]) {
5014 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
5015 vsi->idx, vsi->idx, vsi, vsi->type);
5016 goto unlock_vsi;
5017 }
5018
5019 if (pf->vsi[vsi->idx] != vsi) {
5020 dev_err(&pf->pdev->dev,
5021 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
5022 pf->vsi[vsi->idx]->idx,
5023 pf->vsi[vsi->idx],
5024 pf->vsi[vsi->idx]->type,
5025 vsi->idx, vsi, vsi->type);
5026 goto unlock_vsi;
5027 }
5028
5029 /* updates the pf for this cleared vsi */
5030 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5031 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5032
5033 pf->vsi[vsi->idx] = NULL;
5034 if (vsi->idx < pf->next_vsi)
5035 pf->next_vsi = vsi->idx;
5036
5037unlock_vsi:
5038 mutex_unlock(&pf->switch_mutex);
5039free_vsi:
5040 kfree(vsi);
5041
5042 return 0;
5043}
5044
5045/**
5046 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
5047 * @vsi: the VSI being configured
5048 **/
5049static int i40e_alloc_rings(struct i40e_vsi *vsi)
5050{
5051 struct i40e_pf *pf = vsi->back;
5052 int ret = 0;
5053 int i;
5054
5055 vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
5056 sizeof(struct i40e_ring), GFP_KERNEL);
5057 if (!vsi->rx_rings) {
5058 ret = -ENOMEM;
5059 goto err_alloc_rings;
5060 }
5061
5062 vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
5063 sizeof(struct i40e_ring), GFP_KERNEL);
5064 if (!vsi->tx_rings) {
5065 ret = -ENOMEM;
5066 kfree(vsi->rx_rings);
5067 goto err_alloc_rings;
5068 }
5069
5070 /* Set basic values in the rings to be used later during open() */
5071 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5072 struct i40e_ring *rx_ring = &vsi->rx_rings[i];
5073 struct i40e_ring *tx_ring = &vsi->tx_rings[i];
5074
5075 tx_ring->queue_index = i;
5076 tx_ring->reg_idx = vsi->base_queue + i;
5077 tx_ring->ring_active = false;
5078 tx_ring->vsi = vsi;
5079 tx_ring->netdev = vsi->netdev;
5080 tx_ring->dev = &pf->pdev->dev;
5081 tx_ring->count = vsi->num_desc;
5082 tx_ring->size = 0;
5083 tx_ring->dcb_tc = 0;
5084
5085 rx_ring->queue_index = i;
5086 rx_ring->reg_idx = vsi->base_queue + i;
5087 rx_ring->ring_active = false;
5088 rx_ring->vsi = vsi;
5089 rx_ring->netdev = vsi->netdev;
5090 rx_ring->dev = &pf->pdev->dev;
5091 rx_ring->count = vsi->num_desc;
5092 rx_ring->size = 0;
5093 rx_ring->dcb_tc = 0;
5094 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
5095 set_ring_16byte_desc_enabled(rx_ring);
5096 else
5097 clear_ring_16byte_desc_enabled(rx_ring);
5098 }
5099
5100err_alloc_rings:
5101 return ret;
5102}
5103
5104/**
5105 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5106 * @vsi: the VSI being cleaned
5107 **/
5108static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5109{
5110 if (vsi) {
5111 kfree(vsi->rx_rings);
5112 kfree(vsi->tx_rings);
5113 }
5114
5115 return 0;
5116}
5117
5118/**
5119 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
5120 * @pf: board private structure
5121 * @vectors: the number of MSI-X vectors to request
5122 *
5123 * Returns the number of vectors reserved, or error
5124 **/
5125static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
5126{
5127 int err = 0;
5128
5129 pf->num_msix_entries = 0;
5130 while (vectors >= I40E_MIN_MSIX) {
5131 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
5132 if (err == 0) {
5133 /* good to go */
5134 pf->num_msix_entries = vectors;
5135 break;
5136 } else if (err < 0) {
5137 /* total failure */
5138 dev_info(&pf->pdev->dev,
5139 "MSI-X vector reservation failed: %d\n", err);
5140 vectors = 0;
5141 break;
5142 } else {
5143 /* err > 0 is the hint for retry */
5144 dev_info(&pf->pdev->dev,
5145 "MSI-X vectors wanted %d, retrying with %d\n",
5146 vectors, err);
5147 vectors = err;
5148 }
5149 }
5150
5151 if (vectors > 0 && vectors < I40E_MIN_MSIX) {
5152 dev_info(&pf->pdev->dev,
5153 "Couldn't get enough vectors, only %d available\n",
5154 vectors);
5155 vectors = 0;
5156 }
5157
5158 return vectors;
5159}
5160
5161/**
5162 * i40e_init_msix - Setup the MSIX capability
5163 * @pf: board private structure
5164 *
5165 * Work with the OS to set up the MSIX vectors needed.
5166 *
5167 * Returns 0 on success, negative on failure
5168 **/
5169static int i40e_init_msix(struct i40e_pf *pf)
5170{
5171 i40e_status err = 0;
5172 struct i40e_hw *hw = &pf->hw;
5173 int v_budget, i;
5174 int vec;
5175
5176 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
5177 return -ENODEV;
5178
5179 /* The number of vectors we'll request will be comprised of:
5180 * - Add 1 for "other" cause for Admin Queue events, etc.
5181 * - The number of LAN queue pairs
5182 * already adjusted for the NUMA node
5183 * assumes symmetric Tx/Rx pairing
5184 * - The number of VMDq pairs
5185 * Once we count this up, try the request.
5186 *
5187 * If we can't get what we want, we'll simplify to nearly nothing
5188 * and try again. If that still fails, we punt.
5189 */
5190 pf->num_lan_msix = pf->num_lan_qps;
5191 pf->num_vmdq_msix = pf->num_vmdq_qps;
5192 v_budget = 1 + pf->num_lan_msix;
5193 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
5194 if (pf->flags & I40E_FLAG_FDIR_ENABLED)
5195 v_budget++;
5196
5197 /* Scale down if necessary, and the rings will share vectors */
5198 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
5199
5200 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
5201 GFP_KERNEL);
5202 if (!pf->msix_entries)
5203 return -ENOMEM;
5204
5205 for (i = 0; i < v_budget; i++)
5206 pf->msix_entries[i].entry = i;
5207 vec = i40e_reserve_msix_vectors(pf, v_budget);
5208 if (vec < I40E_MIN_MSIX) {
5209 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
5210 kfree(pf->msix_entries);
5211 pf->msix_entries = NULL;
5212 return -ENODEV;
5213
5214 } else if (vec == I40E_MIN_MSIX) {
5215 /* Adjust for minimal MSIX use */
5216 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
5217 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
5218 pf->num_vmdq_vsis = 0;
5219 pf->num_vmdq_qps = 0;
5220 pf->num_vmdq_msix = 0;
5221 pf->num_lan_qps = 1;
5222 pf->num_lan_msix = 1;
5223
5224 } else if (vec != v_budget) {
5225 /* Scale vector usage down */
5226 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
5227 vec--; /* reserve the misc vector */
5228
5229 /* partition out the remaining vectors */
5230 switch (vec) {
5231 case 2:
5232 pf->num_vmdq_vsis = 1;
5233 pf->num_lan_msix = 1;
5234 break;
5235 case 3:
5236 pf->num_vmdq_vsis = 1;
5237 pf->num_lan_msix = 2;
5238 break;
5239 default:
5240 pf->num_lan_msix = min_t(int, (vec / 2),
5241 pf->num_lan_qps);
5242 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
5243 I40E_DEFAULT_NUM_VMDQ_VSI);
5244 break;
5245 }
5246 }
5247
5248 return err;
5249}
5250
5251/**
5252 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
5253 * @vsi: the VSI being configured
5254 *
5255 * We allocate one q_vector per queue interrupt. If allocation fails we
5256 * return -ENOMEM.
5257 **/
5258static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5259{
5260 struct i40e_pf *pf = vsi->back;
5261 int v_idx, num_q_vectors;
5262
5263 /* if not MSIX, give the one vector only to the LAN VSI */
5264 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5265 num_q_vectors = vsi->num_q_vectors;
5266 else if (vsi == pf->vsi[pf->lan_vsi])
5267 num_q_vectors = 1;
5268 else
5269 return -EINVAL;
5270
5271 vsi->q_vectors = kcalloc(num_q_vectors,
5272 sizeof(struct i40e_q_vector),
5273 GFP_KERNEL);
5274 if (!vsi->q_vectors)
5275 return -ENOMEM;
5276
5277 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
5278 vsi->q_vectors[v_idx].vsi = vsi;
5279 vsi->q_vectors[v_idx].v_idx = v_idx;
5280 cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
5281 if (vsi->netdev)
5282 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
5283 i40e_napi_poll, vsi->work_limit);
5284 }
5285
5286 return 0;
5287}
5288
5289/**
5290 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
5291 * @pf: board private structure to initialize
5292 **/
5293static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5294{
5295 int err = 0;
5296
5297 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5298 err = i40e_init_msix(pf);
5299 if (err) {
5300 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
5301 I40E_FLAG_MQ_ENABLED |
5302 I40E_FLAG_DCB_ENABLED |
5303 I40E_FLAG_SRIOV_ENABLED |
5304 I40E_FLAG_FDIR_ENABLED |
5305 I40E_FLAG_FDIR_ATR_ENABLED |
5306 I40E_FLAG_VMDQ_ENABLED);
5307
5308 /* rework the queue expectations without MSIX */
5309 i40e_determine_queue_usage(pf);
5310 }
5311 }
5312
5313 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
5314 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
5315 err = pci_enable_msi(pf->pdev);
5316 if (err) {
5317 dev_info(&pf->pdev->dev,
5318 "MSI init failed (%d), trying legacy.\n", err);
5319 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
5320 }
5321 }
5322
5323 /* track first vector for misc interrupts */
5324 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
5325}
5326
5327/**
5328 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
5329 * @pf: board private structure
5330 *
5331 * This sets up the handler for MSIX 0, which is used to manage the
5332 * non-queue interrupts, e.g. AdminQ and errors. This is not used
5333 * when in MSI or Legacy interrupt mode.
5334 **/
5335static int i40e_setup_misc_vector(struct i40e_pf *pf)
5336{
5337 struct i40e_hw *hw = &pf->hw;
5338 int err = 0;
5339
5340 /* Only request the irq if this is the first time through, and
5341 * not when we're rebuilding after a Reset
5342 */
5343 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
5344 err = request_irq(pf->msix_entries[0].vector,
5345 i40e_intr, 0, pf->misc_int_name, pf);
5346 if (err) {
5347 dev_info(&pf->pdev->dev,
5348 "request_irq for msix_misc failed: %d\n", err);
5349 return -EFAULT;
5350 }
5351 }
5352
5353 i40e_enable_misc_int_causes(hw);
5354
5355 /* associate no queues to the misc vector */
5356 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
5357 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
5358
5359 i40e_flush(hw);
5360
5361 i40e_irq_dynamic_enable_icr0(pf);
5362
5363 return err;
5364}
5365
5366/**
5367 * i40e_config_rss - Prepare for RSS if used
5368 * @pf: board private structure
5369 **/
5370static int i40e_config_rss(struct i40e_pf *pf)
5371{
5372 struct i40e_hw *hw = &pf->hw;
5373 u32 lut = 0;
5374 int i, j;
5375 u64 hena;
5376 /* Set of random keys generated using kernel random number generator */
5377 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
5378 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
5379 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
5380 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
5381
5382 /* Fill out hash function seed */
5383 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5384 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
5385
5386 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
5387 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
5388 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
5389 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
5390 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
5391 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
5392 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
5393 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
5394 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
5395 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
5396 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
5397 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
5398 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
5399 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
5400 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
5401
5402 /* Populate the LUT with max no. of queues in round robin fashion */
5403 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
5404
5405 /* The assumption is that lan qp count will be the highest
5406 * qp count for any PF VSI that needs RSS.
5407 * If multiple VSIs need RSS support, all the qp counts
5408 * for those VSIs should be a power of 2 for RSS to work.
5409 * If LAN VSI is the only consumer for RSS then this requirement
5410 * is not necessary.
5411 */
5412 if (j == pf->rss_size)
5413 j = 0;
5414 /* lut = 4-byte sliding window of 4 lut entries */
5415 lut = (lut << 8) | (j &
5416 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
5417 /* On i = 3, we have 4 entries in lut; write to the register */
5418 if ((i & 3) == 3)
5419 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
5420 }
5421 i40e_flush(hw);
5422
5423 return 0;
5424}
5425
5426/**
5427 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
5428 * @pf: board private structure to initialize
5429 *
5430 * i40e_sw_init initializes the Adapter private data structure.
5431 * Fields are initialized based on PCI device information and
5432 * OS network device settings (MTU size).
5433 **/
5434static int i40e_sw_init(struct i40e_pf *pf)
5435{
5436 int err = 0;
5437 int size;
5438
5439 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
5440 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
5441 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
5442 if (I40E_DEBUG_USER & debug)
5443 pf->hw.debug_mask = debug;
5444 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
5445 I40E_DEFAULT_MSG_ENABLE);
5446 }
5447
5448 /* Set default capability flags */
5449 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
5450 I40E_FLAG_MSI_ENABLED |
5451 I40E_FLAG_MSIX_ENABLED |
5452 I40E_FLAG_RX_PS_ENABLED |
5453 I40E_FLAG_MQ_ENABLED |
5454 I40E_FLAG_RX_1BUF_ENABLED;
5455
5456 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
5457 if (pf->hw.func_caps.rss) {
5458 pf->flags |= I40E_FLAG_RSS_ENABLED;
5459 pf->rss_size = min_t(int, pf->rss_size_max,
5460 nr_cpus_node(numa_node_id()));
5461 } else {
5462 pf->rss_size = 1;
5463 }
5464
5465 if (pf->hw.func_caps.dcb)
5466 pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
5467 else
5468 pf->num_tc_qps = 0;
5469
5470 if (pf->hw.func_caps.fd) {
5471 /* FW/NVM is not yet fixed in this regard */
5472 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
5473 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
5474 pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
5475 dev_info(&pf->pdev->dev,
5476 "Flow Director ATR mode Enabled\n");
5477 pf->flags |= I40E_FLAG_FDIR_ENABLED;
5478 dev_info(&pf->pdev->dev,
5479 "Flow Director Side Band mode Enabled\n");
5480 pf->fdir_pf_filter_count =
5481 pf->hw.func_caps.fd_filters_guaranteed;
5482 }
5483 } else {
5484 pf->fdir_pf_filter_count = 0;
5485 }
5486
5487 if (pf->hw.func_caps.vmdq) {
5488 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
5489 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
5490 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
5491 }
5492
5493 /* MFP mode enabled */
5494 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
5495 pf->flags |= I40E_FLAG_MFP_ENABLED;
5496 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
5497 }
5498
5499#ifdef CONFIG_PCI_IOV
5500 if (pf->hw.func_caps.num_vfs) {
5501 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
5502 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
5503 pf->num_req_vfs = min_t(int,
5504 pf->hw.func_caps.num_vfs,
5505 I40E_MAX_VF_COUNT);
5506 }
5507#endif /* CONFIG_PCI_IOV */
5508 pf->eeprom_version = 0xDEAD;
5509 pf->lan_veb = I40E_NO_VEB;
5510 pf->lan_vsi = I40E_NO_VSI;
5511
5512 /* set up queue assignment tracking */
5513 size = sizeof(struct i40e_lump_tracking)
5514 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
5515 pf->qp_pile = kzalloc(size, GFP_KERNEL);
5516 if (!pf->qp_pile) {
5517 err = -ENOMEM;
5518 goto sw_init_done;
5519 }
5520 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
5521 pf->qp_pile->search_hint = 0;
5522
5523 /* set up vector assignment tracking */
5524 size = sizeof(struct i40e_lump_tracking)
5525 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
5526 pf->irq_pile = kzalloc(size, GFP_KERNEL);
5527 if (!pf->irq_pile) {
5528 kfree(pf->qp_pile);
5529 err = -ENOMEM;
5530 goto sw_init_done;
5531 }
5532 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
5533 pf->irq_pile->search_hint = 0;
5534
5535 mutex_init(&pf->switch_mutex);
5536
5537sw_init_done:
5538 return err;
5539}
5540
5541/**
5542 * i40e_set_features - set the netdev feature flags
5543 * @netdev: ptr to the netdev being adjusted
5544 * @features: the feature set that the stack is suggesting
5545 **/
5546static int i40e_set_features(struct net_device *netdev,
5547 netdev_features_t features)
5548{
5549 struct i40e_netdev_priv *np = netdev_priv(netdev);
5550 struct i40e_vsi *vsi = np->vsi;
5551
5552 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5553 i40e_vlan_stripping_enable(vsi);
5554 else
5555 i40e_vlan_stripping_disable(vsi);
5556
5557 return 0;
5558}
5559
5560static const struct net_device_ops i40e_netdev_ops = {
5561 .ndo_open = i40e_open,
5562 .ndo_stop = i40e_close,
5563 .ndo_start_xmit = i40e_lan_xmit_frame,
5564 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
5565 .ndo_set_rx_mode = i40e_set_rx_mode,
5566 .ndo_validate_addr = eth_validate_addr,
5567 .ndo_set_mac_address = i40e_set_mac,
5568 .ndo_change_mtu = i40e_change_mtu,
5569 .ndo_tx_timeout = i40e_tx_timeout,
5570 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
5571 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
5572#ifdef CONFIG_NET_POLL_CONTROLLER
5573 .ndo_poll_controller = i40e_netpoll,
5574#endif
5575 .ndo_setup_tc = i40e_setup_tc,
5576 .ndo_set_features = i40e_set_features,
5577 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
5578 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
5579 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
5580 .ndo_get_vf_config = i40e_ndo_get_vf_config,
5581};
5582
5583/**
5584 * i40e_config_netdev - Setup the netdev flags
5585 * @vsi: the VSI being configured
5586 *
5587 * Returns 0 on success, negative value on failure
5588 **/
5589static int i40e_config_netdev(struct i40e_vsi *vsi)
5590{
5591 struct i40e_pf *pf = vsi->back;
5592 struct i40e_hw *hw = &pf->hw;
5593 struct i40e_netdev_priv *np;
5594 struct net_device *netdev;
5595 u8 mac_addr[ETH_ALEN];
5596 int etherdev_size;
5597
5598 etherdev_size = sizeof(struct i40e_netdev_priv);
5599 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
5600 if (!netdev)
5601 return -ENOMEM;
5602
5603 vsi->netdev = netdev;
5604 np = netdev_priv(netdev);
5605 np->vsi = vsi;
5606
5607 netdev->hw_enc_features = NETIF_F_IP_CSUM |
5608 NETIF_F_GSO_UDP_TUNNEL |
5609 NETIF_F_TSO |
5610 NETIF_F_SG;
5611
5612 netdev->features = NETIF_F_SG |
5613 NETIF_F_IP_CSUM |
5614 NETIF_F_SCTP_CSUM |
5615 NETIF_F_HIGHDMA |
5616 NETIF_F_GSO_UDP_TUNNEL |
5617 NETIF_F_HW_VLAN_CTAG_TX |
5618 NETIF_F_HW_VLAN_CTAG_RX |
5619 NETIF_F_HW_VLAN_CTAG_FILTER |
5620 NETIF_F_IPV6_CSUM |
5621 NETIF_F_TSO |
5622 NETIF_F_TSO6 |
5623 NETIF_F_RXCSUM |
5624 NETIF_F_RXHASH |
5625 0;
5626
5627 /* copy netdev features into list of user selectable features */
5628 netdev->hw_features |= netdev->features;
5629
5630 if (vsi->type == I40E_VSI_MAIN) {
5631 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
5632 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
5633 } else {
5634 /* relate the VSI_VMDQ name to the VSI_MAIN name */
5635 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
5636 pf->vsi[pf->lan_vsi]->netdev->name);
5637 random_ether_addr(mac_addr);
5638 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
5639 }
5640
5641 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
5642 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
5643 /* vlan gets same features (except vlan offload)
5644 * after any tweaks for specific VSI types
5645 */
5646 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
5647 NETIF_F_HW_VLAN_CTAG_RX |
5648 NETIF_F_HW_VLAN_CTAG_FILTER);
5649 netdev->priv_flags |= IFF_UNICAST_FLT;
5650 netdev->priv_flags |= IFF_SUPP_NOFCS;
5651 /* Setup netdev TC information */
5652 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
5653
5654 netdev->netdev_ops = &i40e_netdev_ops;
5655 netdev->watchdog_timeo = 5 * HZ;
5656 i40e_set_ethtool_ops(netdev);
5657
5658 return 0;
5659}
5660
5661/**
5662 * i40e_vsi_delete - Delete a VSI from the switch
5663 * @vsi: the VSI being removed
5664 *
5665 * Returns 0 on success, negative value on failure
5666 **/
5667static void i40e_vsi_delete(struct i40e_vsi *vsi)
5668{
5669 /* remove default VSI is not allowed */
5670 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
5671 return;
5672
5673 /* there is no HW VSI for FDIR */
5674 if (vsi->type == I40E_VSI_FDIR)
5675 return;
5676
5677 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
5678 return;
5679}
5680
5681/**
5682 * i40e_add_vsi - Add a VSI to the switch
5683 * @vsi: the VSI being configured
5684 *
5685 * This initializes a VSI context depending on the VSI type to be added and
5686 * passes it down to the add_vsi aq command.
5687 **/
5688static int i40e_add_vsi(struct i40e_vsi *vsi)
5689{
5690 int ret = -ENODEV;
5691 struct i40e_mac_filter *f, *ftmp;
5692 struct i40e_pf *pf = vsi->back;
5693 struct i40e_hw *hw = &pf->hw;
5694 struct i40e_vsi_context ctxt;
5695 u8 enabled_tc = 0x1; /* TC0 enabled */
5696 int f_count = 0;
5697
5698 memset(&ctxt, 0, sizeof(ctxt));
5699 switch (vsi->type) {
5700 case I40E_VSI_MAIN:
5701 /* The PF's main VSI is already setup as part of the
5702 * device initialization, so we'll not bother with
5703 * the add_vsi call, but we will retrieve the current
5704 * VSI context.
5705 */
5706 ctxt.seid = pf->main_vsi_seid;
5707 ctxt.pf_num = pf->hw.pf_id;
5708 ctxt.vf_num = 0;
5709 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
5710 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5711 if (ret) {
5712 dev_info(&pf->pdev->dev,
5713 "couldn't get pf vsi config, err %d, aq_err %d\n",
5714 ret, pf->hw.aq.asq_last_status);
5715 return -ENOENT;
5716 }
5717 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5718 vsi->info.valid_sections = 0;
5719
5720 vsi->seid = ctxt.seid;
5721 vsi->id = ctxt.vsi_number;
5722
5723 enabled_tc = i40e_pf_get_tc_map(pf);
5724
5725 /* MFP mode setup queue map and update VSI */
5726 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5727 memset(&ctxt, 0, sizeof(ctxt));
5728 ctxt.seid = pf->main_vsi_seid;
5729 ctxt.pf_num = pf->hw.pf_id;
5730 ctxt.vf_num = 0;
5731 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5732 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5733 if (ret) {
5734 dev_info(&pf->pdev->dev,
5735 "update vsi failed, aq_err=%d\n",
5736 pf->hw.aq.asq_last_status);
5737 ret = -ENOENT;
5738 goto err;
5739 }
5740 /* update the local VSI info queue map */
5741 i40e_vsi_update_queue_map(vsi, &ctxt);
5742 vsi->info.valid_sections = 0;
5743 } else {
5744 /* Default/Main VSI is only enabled for TC0
5745 * reconfigure it to enable all TCs that are
5746 * available on the port in SFP mode.
5747 */
5748 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5749 if (ret) {
5750 dev_info(&pf->pdev->dev,
5751 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
5752 enabled_tc, ret,
5753 pf->hw.aq.asq_last_status);
5754 ret = -ENOENT;
5755 }
5756 }
5757 break;
5758
5759 case I40E_VSI_FDIR:
5760 /* no queue mapping or actual HW VSI needed */
5761 vsi->info.valid_sections = 0;
5762 vsi->seid = 0;
5763 vsi->id = 0;
5764 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5765 return 0;
5766 break;
5767
5768 case I40E_VSI_VMDQ2:
5769 ctxt.pf_num = hw->pf_id;
5770 ctxt.vf_num = 0;
5771 ctxt.uplink_seid = vsi->uplink_seid;
5772 ctxt.connection_type = 0x1; /* regular data port */
5773 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5774
5775 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5776
5777 /* This VSI is connected to VEB so the switch_id
5778 * should be set to zero by default.
5779 */
5780 ctxt.info.switch_id = 0;
5781 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5782 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5783
5784 /* Setup the VSI tx/rx queue map for TC0 only for now */
5785 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5786 break;
5787
5788 case I40E_VSI_SRIOV:
5789 ctxt.pf_num = hw->pf_id;
5790 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
5791 ctxt.uplink_seid = vsi->uplink_seid;
5792 ctxt.connection_type = 0x1; /* regular data port */
5793 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5794
5795 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5796
5797 /* This VSI is connected to VEB so the switch_id
5798 * should be set to zero by default.
5799 */
5800 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5801
5802 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
5803 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5804 /* Setup the VSI tx/rx queue map for TC0 only for now */
5805 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5806 break;
5807
5808 default:
5809 return -ENODEV;
5810 }
5811
5812 if (vsi->type != I40E_VSI_MAIN) {
5813 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5814 if (ret) {
5815 dev_info(&vsi->back->pdev->dev,
5816 "add vsi failed, aq_err=%d\n",
5817 vsi->back->hw.aq.asq_last_status);
5818 ret = -ENOENT;
5819 goto err;
5820 }
5821 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5822 vsi->info.valid_sections = 0;
5823 vsi->seid = ctxt.seid;
5824 vsi->id = ctxt.vsi_number;
5825 }
5826
5827 /* If macvlan filters already exist, force them to get loaded */
5828 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
5829 f->changed = true;
5830 f_count++;
5831 }
5832 if (f_count) {
5833 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
5834 pf->flags |= I40E_FLAG_FILTER_SYNC;
5835 }
5836
5837 /* Update VSI BW information */
5838 ret = i40e_vsi_get_bw_info(vsi);
5839 if (ret) {
5840 dev_info(&pf->pdev->dev,
5841 "couldn't get vsi bw info, err %d, aq_err %d\n",
5842 ret, pf->hw.aq.asq_last_status);
5843 /* VSI is already added so not tearing that up */
5844 ret = 0;
5845 }
5846
5847err:
5848 return ret;
5849}
5850
5851/**
5852 * i40e_vsi_release - Delete a VSI and free its resources
5853 * @vsi: the VSI being removed
5854 *
5855 * Returns 0 on success or < 0 on error
5856 **/
5857int i40e_vsi_release(struct i40e_vsi *vsi)
5858{
5859 struct i40e_mac_filter *f, *ftmp;
5860 struct i40e_veb *veb = NULL;
5861 struct i40e_pf *pf;
5862 u16 uplink_seid;
5863 int i, n;
5864
5865 pf = vsi->back;
5866
5867 /* release of a VEB-owner or last VSI is not allowed */
5868 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
5869 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
5870 vsi->seid, vsi->uplink_seid);
5871 return -ENODEV;
5872 }
5873 if (vsi == pf->vsi[pf->lan_vsi] &&
5874 !test_bit(__I40E_DOWN, &pf->state)) {
5875 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
5876 return -ENODEV;
5877 }
5878
5879 uplink_seid = vsi->uplink_seid;
5880 if (vsi->type != I40E_VSI_SRIOV) {
5881 if (vsi->netdev_registered) {
5882 vsi->netdev_registered = false;
5883 if (vsi->netdev) {
5884 /* results in a call to i40e_close() */
5885 unregister_netdev(vsi->netdev);
5886 free_netdev(vsi->netdev);
5887 vsi->netdev = NULL;
5888 }
5889 } else {
5890 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
5891 i40e_down(vsi);
5892 i40e_vsi_free_irq(vsi);
5893 i40e_vsi_free_tx_resources(vsi);
5894 i40e_vsi_free_rx_resources(vsi);
5895 }
5896 i40e_vsi_disable_irq(vsi);
5897 }
5898
5899 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
5900 i40e_del_filter(vsi, f->macaddr, f->vlan,
5901 f->is_vf, f->is_netdev);
5902 i40e_sync_vsi_filters(vsi);
5903
5904 i40e_vsi_delete(vsi);
5905 i40e_vsi_free_q_vectors(vsi);
5906 i40e_vsi_clear_rings(vsi);
5907 i40e_vsi_clear(vsi);
5908
5909 /* If this was the last thing on the VEB, except for the
5910 * controlling VSI, remove the VEB, which puts the controlling
5911 * VSI onto the next level down in the switch.
5912 *
5913 * Well, okay, there's one more exception here: don't remove
5914 * the orphan VEBs yet. We'll wait for an explicit remove request
5915 * from up the network stack.
5916 */
5917 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
5918 if (pf->vsi[i] &&
5919 pf->vsi[i]->uplink_seid == uplink_seid &&
5920 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
5921 n++; /* count the VSIs */
5922 }
5923 }
5924 for (i = 0; i < I40E_MAX_VEB; i++) {
5925 if (!pf->veb[i])
5926 continue;
5927 if (pf->veb[i]->uplink_seid == uplink_seid)
5928 n++; /* count the VEBs */
5929 if (pf->veb[i]->seid == uplink_seid)
5930 veb = pf->veb[i];
5931 }
5932 if (n == 0 && veb && veb->uplink_seid != 0)
5933 i40e_veb_release(veb);
5934
5935 return 0;
5936}
5937
5938/**
5939 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
5940 * @vsi: ptr to the VSI
5941 *
5942 * This should only be called after i40e_vsi_mem_alloc() which allocates the
5943 * corresponding SW VSI structure and initializes num_queue_pairs for the
5944 * newly allocated VSI.
5945 *
5946 * Returns 0 on success or negative on failure
5947 **/
5948static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
5949{
5950 int ret = -ENOENT;
5951 struct i40e_pf *pf = vsi->back;
5952
5953 if (vsi->q_vectors) {
5954 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
5955 vsi->seid);
5956 return -EEXIST;
5957 }
5958
5959 if (vsi->base_vector) {
5960 dev_info(&pf->pdev->dev,
5961 "VSI %d has non-zero base vector %d\n",
5962 vsi->seid, vsi->base_vector);
5963 return -EEXIST;
5964 }
5965
5966 ret = i40e_alloc_q_vectors(vsi);
5967 if (ret) {
5968 dev_info(&pf->pdev->dev,
5969 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
5970 vsi->num_q_vectors, vsi->seid, ret);
5971 vsi->num_q_vectors = 0;
5972 goto vector_setup_out;
5973 }
5974
5975 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
5976 vsi->num_q_vectors, vsi->idx);
5977 if (vsi->base_vector < 0) {
5978 dev_info(&pf->pdev->dev,
5979 "failed to get q tracking for VSI %d, err=%d\n",
5980 vsi->seid, vsi->base_vector);
5981 i40e_vsi_free_q_vectors(vsi);
5982 ret = -ENOENT;
5983 goto vector_setup_out;
5984 }
5985
5986vector_setup_out:
5987 return ret;
5988}
5989
5990/**
5991 * i40e_vsi_setup - Set up a VSI by a given type
5992 * @pf: board private structure
5993 * @type: VSI type
5994 * @uplink_seid: the switch element to link to
5995 * @param1: usage depends upon VSI type. For VF types, indicates VF id
5996 *
5997 * This allocates the sw VSI structure and its queue resources, then add a VSI
5998 * to the identified VEB.
5999 *
6000 * Returns pointer to the successfully allocated and configure VSI sw struct on
6001 * success, otherwise returns NULL on failure.
6002 **/
6003struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
6004 u16 uplink_seid, u32 param1)
6005{
6006 struct i40e_vsi *vsi = NULL;
6007 struct i40e_veb *veb = NULL;
6008 int ret, i;
6009 int v_idx;
6010
6011 /* The requested uplink_seid must be either
6012 * - the PF's port seid
6013 * no VEB is needed because this is the PF
6014 * or this is a Flow Director special case VSI
6015 * - seid of an existing VEB
6016 * - seid of a VSI that owns an existing VEB
6017 * - seid of a VSI that doesn't own a VEB
6018 * a new VEB is created and the VSI becomes the owner
6019 * - seid of the PF VSI, which is what creates the first VEB
6020 * this is a special case of the previous
6021 *
6022 * Find which uplink_seid we were given and create a new VEB if needed
6023 */
6024 for (i = 0; i < I40E_MAX_VEB; i++) {
6025 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
6026 veb = pf->veb[i];
6027 break;
6028 }
6029 }
6030
6031 if (!veb && uplink_seid != pf->mac_seid) {
6032
6033 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6034 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
6035 vsi = pf->vsi[i];
6036 break;
6037 }
6038 }
6039 if (!vsi) {
6040 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
6041 uplink_seid);
6042 return NULL;
6043 }
6044
6045 if (vsi->uplink_seid == pf->mac_seid)
6046 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
6047 vsi->tc_config.enabled_tc);
6048 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
6049 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
6050 vsi->tc_config.enabled_tc);
6051
6052 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
6053 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
6054 veb = pf->veb[i];
6055 }
6056 if (!veb) {
6057 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
6058 return NULL;
6059 }
6060
6061 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6062 uplink_seid = veb->seid;
6063 }
6064
6065 /* get vsi sw struct */
6066 v_idx = i40e_vsi_mem_alloc(pf, type);
6067 if (v_idx < 0)
6068 goto err_alloc;
6069 vsi = pf->vsi[v_idx];
6070 vsi->type = type;
6071 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
6072
6073 if (type == I40E_VSI_MAIN)
6074 pf->lan_vsi = v_idx;
6075 else if (type == I40E_VSI_SRIOV)
6076 vsi->vf_id = param1;
6077 /* assign it some queues */
6078 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
6079 if (ret < 0) {
6080 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
6081 vsi->seid, ret);
6082 goto err_vsi;
6083 }
6084 vsi->base_queue = ret;
6085
6086 /* get a VSI from the hardware */
6087 vsi->uplink_seid = uplink_seid;
6088 ret = i40e_add_vsi(vsi);
6089 if (ret)
6090 goto err_vsi;
6091
6092 switch (vsi->type) {
6093 /* setup the netdev if needed */
6094 case I40E_VSI_MAIN:
6095 case I40E_VSI_VMDQ2:
6096 ret = i40e_config_netdev(vsi);
6097 if (ret)
6098 goto err_netdev;
6099 ret = register_netdev(vsi->netdev);
6100 if (ret)
6101 goto err_netdev;
6102 vsi->netdev_registered = true;
6103 netif_carrier_off(vsi->netdev);
6104 /* fall through */
6105
6106 case I40E_VSI_FDIR:
6107 /* set up vectors and rings if needed */
6108 ret = i40e_vsi_setup_vectors(vsi);
6109 if (ret)
6110 goto err_msix;
6111
6112 ret = i40e_alloc_rings(vsi);
6113 if (ret)
6114 goto err_rings;
6115
6116 /* map all of the rings to the q_vectors */
6117 i40e_vsi_map_rings_to_vectors(vsi);
6118
6119 i40e_vsi_reset_stats(vsi);
6120 break;
6121
6122 default:
6123 /* no netdev or rings for the other VSI types */
6124 break;
6125 }
6126
6127 return vsi;
6128
6129err_rings:
6130 i40e_vsi_free_q_vectors(vsi);
6131err_msix:
6132 if (vsi->netdev_registered) {
6133 vsi->netdev_registered = false;
6134 unregister_netdev(vsi->netdev);
6135 free_netdev(vsi->netdev);
6136 vsi->netdev = NULL;
6137 }
6138err_netdev:
6139 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
6140err_vsi:
6141 i40e_vsi_clear(vsi);
6142err_alloc:
6143 return NULL;
6144}
6145
6146/**
6147 * i40e_veb_get_bw_info - Query VEB BW information
6148 * @veb: the veb to query
6149 *
6150 * Query the Tx scheduler BW configuration data for given VEB
6151 **/
6152static int i40e_veb_get_bw_info(struct i40e_veb *veb)
6153{
6154 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
6155 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
6156 struct i40e_pf *pf = veb->pf;
6157 struct i40e_hw *hw = &pf->hw;
6158 u32 tc_bw_max;
6159 int ret = 0;
6160 int i;
6161
6162 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
6163 &bw_data, NULL);
6164 if (ret) {
6165 dev_info(&pf->pdev->dev,
6166 "query veb bw config failed, aq_err=%d\n",
6167 hw->aq.asq_last_status);
6168 goto out;
6169 }
6170
6171 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
6172 &ets_data, NULL);
6173 if (ret) {
6174 dev_info(&pf->pdev->dev,
6175 "query veb bw ets config failed, aq_err=%d\n",
6176 hw->aq.asq_last_status);
6177 goto out;
6178 }
6179
6180 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
6181 veb->bw_max_quanta = ets_data.tc_bw_max;
6182 veb->is_abs_credits = bw_data.absolute_credits_enable;
6183 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
6184 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
6185 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6186 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
6187 veb->bw_tc_limit_credits[i] =
6188 le16_to_cpu(bw_data.tc_bw_limits[i]);
6189 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
6190 }
6191
6192out:
6193 return ret;
6194}
6195
6196/**
6197 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
6198 * @pf: board private structure
6199 *
6200 * On error: returns error code (negative)
6201 * On success: returns vsi index in PF (positive)
6202 **/
6203static int i40e_veb_mem_alloc(struct i40e_pf *pf)
6204{
6205 int ret = -ENOENT;
6206 struct i40e_veb *veb;
6207 int i;
6208
6209 /* Need to protect the allocation of switch elements at the PF level */
6210 mutex_lock(&pf->switch_mutex);
6211
6212 /* VEB list may be fragmented if VEB creation/destruction has
6213 * been happening. We can afford to do a quick scan to look
6214 * for any free slots in the list.
6215 *
6216 * find next empty veb slot, looping back around if necessary
6217 */
6218 i = 0;
6219 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
6220 i++;
6221 if (i >= I40E_MAX_VEB) {
6222 ret = -ENOMEM;
6223 goto err_alloc_veb; /* out of VEB slots! */
6224 }
6225
6226 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
6227 if (!veb) {
6228 ret = -ENOMEM;
6229 goto err_alloc_veb;
6230 }
6231 veb->pf = pf;
6232 veb->idx = i;
6233 veb->enabled_tc = 1;
6234
6235 pf->veb[i] = veb;
6236 ret = i;
6237err_alloc_veb:
6238 mutex_unlock(&pf->switch_mutex);
6239 return ret;
6240}
6241
6242/**
6243 * i40e_switch_branch_release - Delete a branch of the switch tree
6244 * @branch: where to start deleting
6245 *
6246 * This uses recursion to find the tips of the branch to be
6247 * removed, deleting until we get back to and can delete this VEB.
6248 **/
6249static void i40e_switch_branch_release(struct i40e_veb *branch)
6250{
6251 struct i40e_pf *pf = branch->pf;
6252 u16 branch_seid = branch->seid;
6253 u16 veb_idx = branch->idx;
6254 int i;
6255
6256 /* release any VEBs on this VEB - RECURSION */
6257 for (i = 0; i < I40E_MAX_VEB; i++) {
6258 if (!pf->veb[i])
6259 continue;
6260 if (pf->veb[i]->uplink_seid == branch->seid)
6261 i40e_switch_branch_release(pf->veb[i]);
6262 }
6263
6264 /* Release the VSIs on this VEB, but not the owner VSI.
6265 *
6266 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
6267 * the VEB itself, so don't use (*branch) after this loop.
6268 */
6269 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6270 if (!pf->vsi[i])
6271 continue;
6272 if (pf->vsi[i]->uplink_seid == branch_seid &&
6273 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6274 i40e_vsi_release(pf->vsi[i]);
6275 }
6276 }
6277
6278 /* There's one corner case where the VEB might not have been
6279 * removed, so double check it here and remove it if needed.
6280 * This case happens if the veb was created from the debugfs
6281 * commands and no VSIs were added to it.
6282 */
6283 if (pf->veb[veb_idx])
6284 i40e_veb_release(pf->veb[veb_idx]);
6285}
6286
6287/**
6288 * i40e_veb_clear - remove veb struct
6289 * @veb: the veb to remove
6290 **/
6291static void i40e_veb_clear(struct i40e_veb *veb)
6292{
6293 if (!veb)
6294 return;
6295
6296 if (veb->pf) {
6297 struct i40e_pf *pf = veb->pf;
6298
6299 mutex_lock(&pf->switch_mutex);
6300 if (pf->veb[veb->idx] == veb)
6301 pf->veb[veb->idx] = NULL;
6302 mutex_unlock(&pf->switch_mutex);
6303 }
6304
6305 kfree(veb);
6306}
6307
6308/**
6309 * i40e_veb_release - Delete a VEB and free its resources
6310 * @veb: the VEB being removed
6311 **/
6312void i40e_veb_release(struct i40e_veb *veb)
6313{
6314 struct i40e_vsi *vsi = NULL;
6315 struct i40e_pf *pf;
6316 int i, n = 0;
6317
6318 pf = veb->pf;
6319
6320 /* find the remaining VSI and check for extras */
6321 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6322 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
6323 n++;
6324 vsi = pf->vsi[i];
6325 }
6326 }
6327 if (n != 1) {
6328 dev_info(&pf->pdev->dev,
6329 "can't remove VEB %d with %d VSIs left\n",
6330 veb->seid, n);
6331 return;
6332 }
6333
6334 /* move the remaining VSI to uplink veb */
6335 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
6336 if (veb->uplink_seid) {
6337 vsi->uplink_seid = veb->uplink_seid;
6338 if (veb->uplink_seid == pf->mac_seid)
6339 vsi->veb_idx = I40E_NO_VEB;
6340 else
6341 vsi->veb_idx = veb->veb_idx;
6342 } else {
6343 /* floating VEB */
6344 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6345 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
6346 }
6347
6348 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
6349 i40e_veb_clear(veb);
6350
6351 return;
6352}
6353
6354/**
6355 * i40e_add_veb - create the VEB in the switch
6356 * @veb: the VEB to be instantiated
6357 * @vsi: the controlling VSI
6358 **/
6359static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
6360{
6361 bool is_default = (vsi->idx == vsi->back->lan_vsi);
6362 int ret;
6363
6364 /* get a VEB from the hardware */
6365 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
6366 veb->enabled_tc, is_default, &veb->seid, NULL);
6367 if (ret) {
6368 dev_info(&veb->pf->pdev->dev,
6369 "couldn't add VEB, err %d, aq_err %d\n",
6370 ret, veb->pf->hw.aq.asq_last_status);
6371 return -EPERM;
6372 }
6373
6374 /* get statistics counter */
6375 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
6376 &veb->stats_idx, NULL, NULL, NULL);
6377 if (ret) {
6378 dev_info(&veb->pf->pdev->dev,
6379 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
6380 ret, veb->pf->hw.aq.asq_last_status);
6381 return -EPERM;
6382 }
6383 ret = i40e_veb_get_bw_info(veb);
6384 if (ret) {
6385 dev_info(&veb->pf->pdev->dev,
6386 "couldn't get VEB bw info, err %d, aq_err %d\n",
6387 ret, veb->pf->hw.aq.asq_last_status);
6388 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
6389 return -ENOENT;
6390 }
6391
6392 vsi->uplink_seid = veb->seid;
6393 vsi->veb_idx = veb->idx;
6394 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6395
6396 return 0;
6397}
6398
6399/**
6400 * i40e_veb_setup - Set up a VEB
6401 * @pf: board private structure
6402 * @flags: VEB setup flags
6403 * @uplink_seid: the switch element to link to
6404 * @vsi_seid: the initial VSI seid
6405 * @enabled_tc: Enabled TC bit-map
6406 *
6407 * This allocates the sw VEB structure and links it into the switch
6408 * It is possible and legal for this to be a duplicate of an already
6409 * existing VEB. It is also possible for both uplink and vsi seids
6410 * to be zero, in order to create a floating VEB.
6411 *
6412 * Returns pointer to the successfully allocated VEB sw struct on
6413 * success, otherwise returns NULL on failure.
6414 **/
6415struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
6416 u16 uplink_seid, u16 vsi_seid,
6417 u8 enabled_tc)
6418{
6419 struct i40e_veb *veb, *uplink_veb = NULL;
6420 int vsi_idx, veb_idx;
6421 int ret;
6422
6423 /* if one seid is 0, the other must be 0 to create a floating relay */
6424 if ((uplink_seid == 0 || vsi_seid == 0) &&
6425 (uplink_seid + vsi_seid != 0)) {
6426 dev_info(&pf->pdev->dev,
6427 "one, not both seid's are 0: uplink=%d vsi=%d\n",
6428 uplink_seid, vsi_seid);
6429 return NULL;
6430 }
6431
6432 /* make sure there is such a vsi and uplink */
6433 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
6434 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
6435 break;
6436 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
6437 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
6438 vsi_seid);
6439 return NULL;
6440 }
6441
6442 if (uplink_seid && uplink_seid != pf->mac_seid) {
6443 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6444 if (pf->veb[veb_idx] &&
6445 pf->veb[veb_idx]->seid == uplink_seid) {
6446 uplink_veb = pf->veb[veb_idx];
6447 break;
6448 }
6449 }
6450 if (!uplink_veb) {
6451 dev_info(&pf->pdev->dev,
6452 "uplink seid %d not found\n", uplink_seid);
6453 return NULL;
6454 }
6455 }
6456
6457 /* get veb sw struct */
6458 veb_idx = i40e_veb_mem_alloc(pf);
6459 if (veb_idx < 0)
6460 goto err_alloc;
6461 veb = pf->veb[veb_idx];
6462 veb->flags = flags;
6463 veb->uplink_seid = uplink_seid;
6464 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
6465 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
6466
6467 /* create the VEB in the switch */
6468 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
6469 if (ret)
6470 goto err_veb;
6471
6472 return veb;
6473
6474err_veb:
6475 i40e_veb_clear(veb);
6476err_alloc:
6477 return NULL;
6478}
6479
6480/**
6481 * i40e_setup_pf_switch_element - set pf vars based on switch type
6482 * @pf: board private structure
6483 * @ele: element we are building info from
6484 * @num_reported: total number of elements
6485 * @printconfig: should we print the contents
6486 *
6487 * helper function to assist in extracting a few useful SEID values.
6488 **/
6489static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
6490 struct i40e_aqc_switch_config_element_resp *ele,
6491 u16 num_reported, bool printconfig)
6492{
6493 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
6494 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
6495 u8 element_type = ele->element_type;
6496 u16 seid = le16_to_cpu(ele->seid);
6497
6498 if (printconfig)
6499 dev_info(&pf->pdev->dev,
6500 "type=%d seid=%d uplink=%d downlink=%d\n",
6501 element_type, seid, uplink_seid, downlink_seid);
6502
6503 switch (element_type) {
6504 case I40E_SWITCH_ELEMENT_TYPE_MAC:
6505 pf->mac_seid = seid;
6506 break;
6507 case I40E_SWITCH_ELEMENT_TYPE_VEB:
6508 /* Main VEB? */
6509 if (uplink_seid != pf->mac_seid)
6510 break;
6511 if (pf->lan_veb == I40E_NO_VEB) {
6512 int v;
6513
6514 /* find existing or else empty VEB */
6515 for (v = 0; v < I40E_MAX_VEB; v++) {
6516 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
6517 pf->lan_veb = v;
6518 break;
6519 }
6520 }
6521 if (pf->lan_veb == I40E_NO_VEB) {
6522 v = i40e_veb_mem_alloc(pf);
6523 if (v < 0)
6524 break;
6525 pf->lan_veb = v;
6526 }
6527 }
6528
6529 pf->veb[pf->lan_veb]->seid = seid;
6530 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
6531 pf->veb[pf->lan_veb]->pf = pf;
6532 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
6533 break;
6534 case I40E_SWITCH_ELEMENT_TYPE_VSI:
6535 if (num_reported != 1)
6536 break;
6537 /* This is immediately after a reset so we can assume this is
6538 * the PF's VSI
6539 */
6540 pf->mac_seid = uplink_seid;
6541 pf->pf_seid = downlink_seid;
6542 pf->main_vsi_seid = seid;
6543 if (printconfig)
6544 dev_info(&pf->pdev->dev,
6545 "pf_seid=%d main_vsi_seid=%d\n",
6546 pf->pf_seid, pf->main_vsi_seid);
6547 break;
6548 case I40E_SWITCH_ELEMENT_TYPE_PF:
6549 case I40E_SWITCH_ELEMENT_TYPE_VF:
6550 case I40E_SWITCH_ELEMENT_TYPE_EMP:
6551 case I40E_SWITCH_ELEMENT_TYPE_BMC:
6552 case I40E_SWITCH_ELEMENT_TYPE_PE:
6553 case I40E_SWITCH_ELEMENT_TYPE_PA:
6554 /* ignore these for now */
6555 break;
6556 default:
6557 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
6558 element_type, seid);
6559 break;
6560 }
6561}
6562
6563/**
6564 * i40e_fetch_switch_configuration - Get switch config from firmware
6565 * @pf: board private structure
6566 * @printconfig: should we print the contents
6567 *
6568 * Get the current switch configuration from the device and
6569 * extract a few useful SEID values.
6570 **/
6571int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
6572{
6573 struct i40e_aqc_get_switch_config_resp *sw_config;
6574 u16 next_seid = 0;
6575 int ret = 0;
6576 u8 *aq_buf;
6577 int i;
6578
6579 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
6580 if (!aq_buf)
6581 return -ENOMEM;
6582
6583 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
6584 do {
6585 u16 num_reported, num_total;
6586
6587 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
6588 I40E_AQ_LARGE_BUF,
6589 &next_seid, NULL);
6590 if (ret) {
6591 dev_info(&pf->pdev->dev,
6592 "get switch config failed %d aq_err=%x\n",
6593 ret, pf->hw.aq.asq_last_status);
6594 kfree(aq_buf);
6595 return -ENOENT;
6596 }
6597
6598 num_reported = le16_to_cpu(sw_config->header.num_reported);
6599 num_total = le16_to_cpu(sw_config->header.num_total);
6600
6601 if (printconfig)
6602 dev_info(&pf->pdev->dev,
6603 "header: %d reported %d total\n",
6604 num_reported, num_total);
6605
6606 if (num_reported) {
6607 int sz = sizeof(*sw_config) * num_reported;
6608
6609 kfree(pf->sw_config);
6610 pf->sw_config = kzalloc(sz, GFP_KERNEL);
6611 if (pf->sw_config)
6612 memcpy(pf->sw_config, sw_config, sz);
6613 }
6614
6615 for (i = 0; i < num_reported; i++) {
6616 struct i40e_aqc_switch_config_element_resp *ele =
6617 &sw_config->element[i];
6618
6619 i40e_setup_pf_switch_element(pf, ele, num_reported,
6620 printconfig);
6621 }
6622 } while (next_seid != 0);
6623
6624 kfree(aq_buf);
6625 return ret;
6626}
6627
6628/**
6629 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
6630 * @pf: board private structure
6631 *
6632 * Returns 0 on success, negative value on failure
6633 **/
6634static int i40e_setup_pf_switch(struct i40e_pf *pf)
6635{
6636 int ret;
6637
6638 /* find out what's out there already */
6639 ret = i40e_fetch_switch_configuration(pf, false);
6640 if (ret) {
6641 dev_info(&pf->pdev->dev,
6642 "couldn't fetch switch config, err %d, aq_err %d\n",
6643 ret, pf->hw.aq.asq_last_status);
6644 return ret;
6645 }
6646 i40e_pf_reset_stats(pf);
6647
6648 /* fdir VSI must happen first to be sure it gets queue 0, but only
6649 * if there is enough room for the fdir VSI
6650 */
6651 if (pf->num_lan_qps > 1)
6652 i40e_fdir_setup(pf);
6653
6654 /* first time setup */
6655 if (pf->lan_vsi == I40E_NO_VSI) {
6656 struct i40e_vsi *vsi = NULL;
6657 u16 uplink_seid;
6658
6659 /* Set up the PF VSI associated with the PF's main VSI
6660 * that is already in the HW switch
6661 */
6662 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6663 uplink_seid = pf->veb[pf->lan_veb]->seid;
6664 else
6665 uplink_seid = pf->mac_seid;
6666
6667 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
6668 if (!vsi) {
6669 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
6670 i40e_fdir_teardown(pf);
6671 return -EAGAIN;
6672 }
6673 /* accommodate kcompat by copying the main VSI queue count
6674 * into the pf, since this newer code pushes the pf queue
6675 * info down a level into a VSI
6676 */
6677 pf->num_rx_queues = vsi->alloc_queue_pairs;
6678 pf->num_tx_queues = vsi->alloc_queue_pairs;
6679 } else {
6680 /* force a reset of TC and queue layout configurations */
6681 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
6682 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
6683 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
6684 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
6685 }
6686 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
6687
6688 /* Setup static PF queue filter control settings */
6689 ret = i40e_setup_pf_filter_control(pf);
6690 if (ret) {
6691 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
6692 ret);
6693 /* Failure here should not stop continuing other steps */
6694 }
6695
6696 /* enable RSS in the HW, even for only one queue, as the stack can use
6697 * the hash
6698 */
6699 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
6700 i40e_config_rss(pf);
6701
6702 /* fill in link information and enable LSE reporting */
6703 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
6704 i40e_link_event(pf);
6705
6706 /* Initialize user-specifics link properties */
6707 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
6708 I40E_AQ_AN_COMPLETED) ? true : false);
6709 pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
6710 if (pf->hw.phy.link_info.an_info &
6711 (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
6712 pf->hw.fc.current_mode = I40E_FC_FULL;
6713 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
6714 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
6715 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
6716 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
6717 else
6718 pf->hw.fc.current_mode = I40E_FC_DEFAULT;
6719
6720 return ret;
6721}
6722
6723/**
6724 * i40e_set_rss_size - helper to set rss_size
6725 * @pf: board private structure
6726 * @queues_left: how many queues
6727 */
6728static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
6729{
6730 int num_tc0;
6731
6732 num_tc0 = min_t(int, queues_left, pf->rss_size_max);
6733 num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
6734 num_tc0 = rounddown_pow_of_two(num_tc0);
6735
6736 return num_tc0;
6737}
6738
6739/**
6740 * i40e_determine_queue_usage - Work out queue distribution
6741 * @pf: board private structure
6742 **/
6743static void i40e_determine_queue_usage(struct i40e_pf *pf)
6744{
6745 int accum_tc_size;
6746 int queues_left;
6747
6748 pf->num_lan_qps = 0;
6749 pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
6750 accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
6751
6752 /* Find the max queues to be put into basic use. We'll always be
6753 * using TC0, whether or not DCB is running, and TC0 will get the
6754 * big RSS set.
6755 */
6756 queues_left = pf->hw.func_caps.num_tx_qp;
6757
6758 if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6759 (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
6760 !(pf->flags & (I40E_FLAG_RSS_ENABLED |
6761 I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
6762 (queues_left == 1)) {
6763
6764 /* one qp for PF, no queues for anything else */
6765 queues_left = 0;
6766 pf->rss_size = pf->num_lan_qps = 1;
6767
6768 /* make sure all the fancies are disabled */
6769 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
6770 I40E_FLAG_MQ_ENABLED |
6771 I40E_FLAG_FDIR_ENABLED |
6772 I40E_FLAG_FDIR_ATR_ENABLED |
6773 I40E_FLAG_DCB_ENABLED |
6774 I40E_FLAG_SRIOV_ENABLED |
6775 I40E_FLAG_VMDQ_ENABLED);
6776
6777 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6778 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6779 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6780
6781 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6782
6783 queues_left -= pf->rss_size;
6784 pf->num_lan_qps = pf->rss_size;
6785
6786 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6787 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6788 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
6789
6790 /* save num_tc_qps queues for TCs 1 thru 7 and the rest
6791 * are set up for RSS in TC0
6792 */
6793 queues_left -= accum_tc_size;
6794
6795 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6796
6797 queues_left -= pf->rss_size;
6798 if (queues_left < 0) {
6799 dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
6800 return;
6801 }
6802
6803 pf->num_lan_qps = pf->rss_size + accum_tc_size;
6804
6805 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6806 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6807 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6808
6809 queues_left -= 1; /* save 1 queue for FD */
6810
6811 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6812
6813 queues_left -= pf->rss_size;
6814 if (queues_left < 0) {
6815 dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
6816 return;
6817 }
6818
6819 pf->num_lan_qps = pf->rss_size;
6820
6821 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6822 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6823 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
6824
6825 /* save 1 queue for TCs 1 thru 7,
6826 * 1 queue for flow director,
6827 * and the rest are set up for RSS in TC0
6828 */
6829 queues_left -= 1;
6830 queues_left -= accum_tc_size;
6831
6832 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6833 queues_left -= pf->rss_size;
6834 if (queues_left < 0) {
6835 dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
6836 return;
6837 }
6838
6839 pf->num_lan_qps = pf->rss_size + accum_tc_size;
6840
6841 } else {
6842 dev_info(&pf->pdev->dev,
6843 "Invalid configuration, flags=0x%08llx\n", pf->flags);
6844 return;
6845 }
6846
6847 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
6848 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
6849 pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
6850 pf->num_vf_qps));
6851 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
6852 }
6853
6854 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
6855 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
6856 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
6857 (queues_left / pf->num_vmdq_qps));
6858 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
6859 }
6860
6861 return;
6862}
6863
6864/**
6865 * i40e_setup_pf_filter_control - Setup PF static filter control
6866 * @pf: PF to be setup
6867 *
6868 * i40e_setup_pf_filter_control sets up a pf's initial filter control
6869 * settings. If PE/FCoE are enabled then it will also set the per PF
6870 * based filter sizes required for them. It also enables Flow director,
6871 * ethertype and macvlan type filter settings for the pf.
6872 *
6873 * Returns 0 on success, negative on failure
6874 **/
6875static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
6876{
6877 struct i40e_filter_control_settings *settings = &pf->filter_settings;
6878
6879 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
6880
6881 /* Flow Director is enabled */
6882 if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
6883 settings->enable_fdir = true;
6884
6885 /* Ethtype and MACVLAN filters enabled for PF */
6886 settings->enable_ethtype = true;
6887 settings->enable_macvlan = true;
6888
6889 if (i40e_set_filter_control(&pf->hw, settings))
6890 return -ENOENT;
6891
6892 return 0;
6893}
6894
6895/**
6896 * i40e_probe - Device initialization routine
6897 * @pdev: PCI device information struct
6898 * @ent: entry in i40e_pci_tbl
6899 *
6900 * i40e_probe initializes a pf identified by a pci_dev structure.
6901 * The OS initialization, configuring of the pf private structure,
6902 * and a hardware reset occur.
6903 *
6904 * Returns 0 on success, negative on failure
6905 **/
6906static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6907{
6908 struct i40e_driver_version dv;
6909 struct i40e_pf *pf;
6910 struct i40e_hw *hw;
6911 int err = 0;
6912 u32 len;
6913
6914 err = pci_enable_device_mem(pdev);
6915 if (err)
6916 return err;
6917
6918 /* set up for high or low dma */
6919 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
6920 /* coherent mask for the same size will always succeed if
6921 * dma_set_mask does
6922 */
6923 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
6924 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
6925 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
6926 } else {
6927 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
6928 err = -EIO;
6929 goto err_dma;
6930 }
6931
6932 /* set up pci connections */
6933 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
6934 IORESOURCE_MEM), i40e_driver_name);
6935 if (err) {
6936 dev_info(&pdev->dev,
6937 "pci_request_selected_regions failed %d\n", err);
6938 goto err_pci_reg;
6939 }
6940
6941 pci_enable_pcie_error_reporting(pdev);
6942 pci_set_master(pdev);
6943
6944 /* Now that we have a PCI connection, we need to do the
6945 * low level device setup. This is primarily setting up
6946 * the Admin Queue structures and then querying for the
6947 * device's current profile information.
6948 */
6949 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
6950 if (!pf) {
6951 err = -ENOMEM;
6952 goto err_pf_alloc;
6953 }
6954 pf->next_vsi = 0;
6955 pf->pdev = pdev;
6956 set_bit(__I40E_DOWN, &pf->state);
6957
6958 hw = &pf->hw;
6959 hw->back = pf;
6960 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6961 pci_resource_len(pdev, 0));
6962 if (!hw->hw_addr) {
6963 err = -EIO;
6964 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
6965 (unsigned int)pci_resource_start(pdev, 0),
6966 (unsigned int)pci_resource_len(pdev, 0), err);
6967 goto err_ioremap;
6968 }
6969 hw->vendor_id = pdev->vendor;
6970 hw->device_id = pdev->device;
6971 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
6972 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6973 hw->subsystem_device_id = pdev->subsystem_device;
6974 hw->bus.device = PCI_SLOT(pdev->devfn);
6975 hw->bus.func = PCI_FUNC(pdev->devfn);
6976
6977 /* Reset here to make sure all is clean and to define PF 'n' */
6978 err = i40e_pf_reset(hw);
6979 if (err) {
6980 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
6981 goto err_pf_reset;
6982 }
6983 pf->pfr_count++;
6984
6985 hw->aq.num_arq_entries = I40E_AQ_LEN;
6986 hw->aq.num_asq_entries = I40E_AQ_LEN;
6987 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
6988 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
6989 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
6990 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
6991 "%s-pf%d:misc",
6992 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
6993
6994 err = i40e_init_shared_code(hw);
6995 if (err) {
6996 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
6997 goto err_pf_reset;
6998 }
6999
7000 err = i40e_init_adminq(hw);
7001 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
7002 if (err) {
7003 dev_info(&pdev->dev,
7004 "init_adminq failed: %d expecting API %02x.%02x\n",
7005 err,
7006 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
7007 goto err_pf_reset;
7008 }
7009
7010 err = i40e_get_capabilities(pf);
7011 if (err)
7012 goto err_adminq_setup;
7013
7014 err = i40e_sw_init(pf);
7015 if (err) {
7016 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
7017 goto err_sw_init;
7018 }
7019
7020 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7021 hw->func_caps.num_rx_qp,
7022 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
7023 if (err) {
7024 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
7025 goto err_init_lan_hmc;
7026 }
7027
7028 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7029 if (err) {
7030 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
7031 err = -ENOENT;
7032 goto err_configure_lan_hmc;
7033 }
7034
7035 i40e_get_mac_addr(hw, hw->mac.addr);
7036 if (i40e_validate_mac_addr(hw->mac.addr)) {
7037 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
7038 err = -EIO;
7039 goto err_mac_addr;
7040 }
7041 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
7042 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
7043
7044 pci_set_drvdata(pdev, pf);
7045 pci_save_state(pdev);
7046
7047 /* set up periodic task facility */
7048 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
7049 pf->service_timer_period = HZ;
7050
7051 INIT_WORK(&pf->service_task, i40e_service_task);
7052 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
7053 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
7054 pf->link_check_timeout = jiffies;
7055
7056 /* set up the main switch operations */
7057 i40e_determine_queue_usage(pf);
7058 i40e_init_interrupt_scheme(pf);
7059
7060 /* Set up the *vsi struct based on the number of VSIs in the HW,
7061 * and set up our local tracking of the MAIN PF vsi.
7062 */
7063 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
7064 pf->vsi = kzalloc(len, GFP_KERNEL);
7065 if (!pf->vsi)
7066 goto err_switch_setup;
7067
7068 err = i40e_setup_pf_switch(pf);
7069 if (err) {
7070 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
7071 goto err_vsis;
7072 }
7073
7074 /* The main driver is (mostly) up and happy. We need to set this state
7075 * before setting up the misc vector or we get a race and the vector
7076 * ends up disabled forever.
7077 */
7078 clear_bit(__I40E_DOWN, &pf->state);
7079
7080 /* In case of MSIX we are going to setup the misc vector right here
7081 * to handle admin queue events etc. In case of legacy and MSI
7082 * the misc functionality and queue processing is combined in
7083 * the same vector and that gets setup at open.
7084 */
7085 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7086 err = i40e_setup_misc_vector(pf);
7087 if (err) {
7088 dev_info(&pdev->dev,
7089 "setup of misc vector failed: %d\n", err);
7090 goto err_vsis;
7091 }
7092 }
7093
7094 /* prep for VF support */
7095 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
7096 (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
7097 u32 val;
7098
7099 /* disable link interrupts for VFs */
7100 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
7101 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
7102 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
7103 i40e_flush(hw);
7104 }
7105
7106 i40e_dbg_pf_init(pf);
7107
7108 /* tell the firmware that we're starting */
7109 dv.major_version = DRV_VERSION_MAJOR;
7110 dv.minor_version = DRV_VERSION_MINOR;
7111 dv.build_version = DRV_VERSION_BUILD;
7112 dv.subbuild_version = 0;
7113 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7114
7115 /* since everything's happy, start the service_task timer */
7116 mod_timer(&pf->service_timer,
7117 round_jiffies(jiffies + pf->service_timer_period));
7118
7119 return 0;
7120
7121 /* Unwind what we've done if something failed in the setup */
7122err_vsis:
7123 set_bit(__I40E_DOWN, &pf->state);
7124err_switch_setup:
7125 i40e_clear_interrupt_scheme(pf);
7126 kfree(pf->vsi);
7127 del_timer_sync(&pf->service_timer);
7128err_mac_addr:
7129err_configure_lan_hmc:
7130 (void)i40e_shutdown_lan_hmc(hw);
7131err_init_lan_hmc:
7132 kfree(pf->qp_pile);
7133 kfree(pf->irq_pile);
7134err_sw_init:
7135err_adminq_setup:
7136 (void)i40e_shutdown_adminq(hw);
7137err_pf_reset:
7138 iounmap(hw->hw_addr);
7139err_ioremap:
7140 kfree(pf);
7141err_pf_alloc:
7142 pci_disable_pcie_error_reporting(pdev);
7143 pci_release_selected_regions(pdev,
7144 pci_select_bars(pdev, IORESOURCE_MEM));
7145err_pci_reg:
7146err_dma:
7147 pci_disable_device(pdev);
7148 return err;
7149}
7150
7151/**
7152 * i40e_remove - Device removal routine
7153 * @pdev: PCI device information struct
7154 *
7155 * i40e_remove is called by the PCI subsystem to alert the driver
7156 * that is should release a PCI device. This could be caused by a
7157 * Hot-Plug event, or because the driver is going to be removed from
7158 * memory.
7159 **/
7160static void i40e_remove(struct pci_dev *pdev)
7161{
7162 struct i40e_pf *pf = pci_get_drvdata(pdev);
7163 i40e_status ret_code;
7164 u32 reg;
7165 int i;
7166
7167 i40e_dbg_pf_exit(pf);
7168
7169 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
7170 i40e_free_vfs(pf);
7171 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
7172 }
7173
7174 /* no more scheduling of any task */
7175 set_bit(__I40E_DOWN, &pf->state);
7176 del_timer_sync(&pf->service_timer);
7177 cancel_work_sync(&pf->service_task);
7178
7179 i40e_fdir_teardown(pf);
7180
7181 /* If there is a switch structure or any orphans, remove them.
7182 * This will leave only the PF's VSI remaining.
7183 */
7184 for (i = 0; i < I40E_MAX_VEB; i++) {
7185 if (!pf->veb[i])
7186 continue;
7187
7188 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
7189 pf->veb[i]->uplink_seid == 0)
7190 i40e_switch_branch_release(pf->veb[i]);
7191 }
7192
7193 /* Now we can shutdown the PF's VSI, just before we kill
7194 * adminq and hmc.
7195 */
7196 if (pf->vsi[pf->lan_vsi])
7197 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
7198
7199 i40e_stop_misc_vector(pf);
7200 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7201 synchronize_irq(pf->msix_entries[0].vector);
7202 free_irq(pf->msix_entries[0].vector, pf);
7203 }
7204
7205 /* shutdown and destroy the HMC */
7206 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
7207 if (ret_code)
7208 dev_warn(&pdev->dev,
7209 "Failed to destroy the HMC resources: %d\n", ret_code);
7210
7211 /* shutdown the adminq */
7212 i40e_aq_queue_shutdown(&pf->hw, true);
7213 ret_code = i40e_shutdown_adminq(&pf->hw);
7214 if (ret_code)
7215 dev_warn(&pdev->dev,
7216 "Failed to destroy the Admin Queue resources: %d\n",
7217 ret_code);
7218
7219 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
7220 i40e_clear_interrupt_scheme(pf);
7221 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7222 if (pf->vsi[i]) {
7223 i40e_vsi_clear_rings(pf->vsi[i]);
7224 i40e_vsi_clear(pf->vsi[i]);
7225 pf->vsi[i] = NULL;
7226 }
7227 }
7228
7229 for (i = 0; i < I40E_MAX_VEB; i++) {
7230 kfree(pf->veb[i]);
7231 pf->veb[i] = NULL;
7232 }
7233
7234 kfree(pf->qp_pile);
7235 kfree(pf->irq_pile);
7236 kfree(pf->sw_config);
7237 kfree(pf->vsi);
7238
7239 /* force a PF reset to clean anything leftover */
7240 reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
7241 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
7242 i40e_flush(&pf->hw);
7243
7244 iounmap(pf->hw.hw_addr);
7245 kfree(pf);
7246 pci_release_selected_regions(pdev,
7247 pci_select_bars(pdev, IORESOURCE_MEM));
7248
7249 pci_disable_pcie_error_reporting(pdev);
7250 pci_disable_device(pdev);
7251}
7252
7253/**
7254 * i40e_pci_error_detected - warning that something funky happened in PCI land
7255 * @pdev: PCI device information struct
7256 *
7257 * Called to warn that something happened and the error handling steps
7258 * are in progress. Allows the driver to quiesce things, be ready for
7259 * remediation.
7260 **/
7261static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
7262 enum pci_channel_state error)
7263{
7264 struct i40e_pf *pf = pci_get_drvdata(pdev);
7265
7266 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
7267
7268 /* shutdown all operations */
7269 i40e_pf_quiesce_all_vsi(pf);
7270
7271 /* Request a slot reset */
7272 return PCI_ERS_RESULT_NEED_RESET;
7273}
7274
7275/**
7276 * i40e_pci_error_slot_reset - a PCI slot reset just happened
7277 * @pdev: PCI device information struct
7278 *
7279 * Called to find if the driver can work with the device now that
7280 * the pci slot has been reset. If a basic connection seems good
7281 * (registers are readable and have sane content) then return a
7282 * happy little PCI_ERS_RESULT_xxx.
7283 **/
7284static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
7285{
7286 struct i40e_pf *pf = pci_get_drvdata(pdev);
7287 pci_ers_result_t result;
7288 int err;
7289 u32 reg;
7290
7291 dev_info(&pdev->dev, "%s\n", __func__);
7292 if (pci_enable_device_mem(pdev)) {
7293 dev_info(&pdev->dev,
7294 "Cannot re-enable PCI device after reset.\n");
7295 result = PCI_ERS_RESULT_DISCONNECT;
7296 } else {
7297 pci_set_master(pdev);
7298 pci_restore_state(pdev);
7299 pci_save_state(pdev);
7300 pci_wake_from_d3(pdev, false);
7301
7302 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7303 if (reg == 0)
7304 result = PCI_ERS_RESULT_RECOVERED;
7305 else
7306 result = PCI_ERS_RESULT_DISCONNECT;
7307 }
7308
7309 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7310 if (err) {
7311 dev_info(&pdev->dev,
7312 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7313 err);
7314 /* non-fatal, continue */
7315 }
7316
7317 return result;
7318}
7319
7320/**
7321 * i40e_pci_error_resume - restart operations after PCI error recovery
7322 * @pdev: PCI device information struct
7323 *
7324 * Called to allow the driver to bring things back up after PCI error
7325 * and/or reset recovery has finished.
7326 **/
7327static void i40e_pci_error_resume(struct pci_dev *pdev)
7328{
7329 struct i40e_pf *pf = pci_get_drvdata(pdev);
7330
7331 dev_info(&pdev->dev, "%s\n", __func__);
7332 i40e_handle_reset_warning(pf);
7333}
7334
7335static const struct pci_error_handlers i40e_err_handler = {
7336 .error_detected = i40e_pci_error_detected,
7337 .slot_reset = i40e_pci_error_slot_reset,
7338 .resume = i40e_pci_error_resume,
7339};
7340
7341static struct pci_driver i40e_driver = {
7342 .name = i40e_driver_name,
7343 .id_table = i40e_pci_tbl,
7344 .probe = i40e_probe,
7345 .remove = i40e_remove,
7346 .err_handler = &i40e_err_handler,
7347 .sriov_configure = i40e_pci_sriov_configure,
7348};
7349
7350/**
7351 * i40e_init_module - Driver registration routine
7352 *
7353 * i40e_init_module is the first routine called when the driver is
7354 * loaded. All it does is register with the PCI subsystem.
7355 **/
7356static int __init i40e_init_module(void)
7357{
7358 pr_info("%s: %s - version %s\n", i40e_driver_name,
7359 i40e_driver_string, i40e_driver_version_str);
7360 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
7361 i40e_dbg_init();
7362 return pci_register_driver(&i40e_driver);
7363}
7364module_init(i40e_init_module);
7365
7366/**
7367 * i40e_exit_module - Driver exit cleanup routine
7368 *
7369 * i40e_exit_module is called just before the driver is removed
7370 * from memory.
7371 **/
7372static void __exit i40e_exit_module(void)
7373{
7374 pci_unregister_driver(&i40e_driver);
7375 i40e_dbg_exit();
7376}
7377module_exit(i40e_exit_module);