blob: c7d831d6e21234a21d3d0ab68ecc8c2e401474aa [file] [log] [blame]
Greg Rose17367272010-01-09 02:25:48 +00001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Don Skidmore94971822012-01-06 03:24:16 +00004 Copyright(c) 1999 - 2012 Intel Corporation.
Greg Rose17367272010-01-09 02:25:48 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Greg Rose17367272010-01-09 02:25:48 +000028#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
37#include <linux/ipv6.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#endif
41
42#include "ixgbe.h"
Greg Rosec6bda302011-08-24 02:37:55 +000043#include "ixgbe_type.h"
Greg Rose17367272010-01-09 02:25:48 +000044#include "ixgbe_sriov.h"
45
Greg Rosec6bda302011-08-24 02:37:55 +000046#ifdef CONFIG_PCI_IOV
47static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
48{
49 struct pci_dev *pdev = adapter->pdev;
50 struct pci_dev *pvfdev;
51 u16 vf_devfn = 0;
52 int device_id;
53 int vfs_found = 0;
54
55 switch (adapter->hw.mac.type) {
56 case ixgbe_mac_82599EB:
57 device_id = IXGBE_DEV_ID_82599_VF;
58 break;
59 case ixgbe_mac_X540:
60 device_id = IXGBE_DEV_ID_X540_VF;
61 break;
62 default:
63 device_id = 0;
64 break;
65 }
66
67 vf_devfn = pdev->devfn + 0x80;
68 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
69 while (pvfdev) {
Greg Rosea4b08322012-02-03 00:54:13 +000070 if (pvfdev->devfn == vf_devfn &&
71 (pvfdev->bus->number >= pdev->bus->number))
Greg Rosec6bda302011-08-24 02:37:55 +000072 vfs_found++;
73 vf_devfn += 2;
74 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
75 device_id, pvfdev);
76 }
77
78 return vfs_found;
79}
80
81void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
82 const struct ixgbe_info *ii)
83{
84 struct ixgbe_hw *hw = &adapter->hw;
85 int err = 0;
86 int num_vf_macvlans, i;
87 struct vf_macvlans *mv_list;
88 int pre_existing_vfs = 0;
89
90 pre_existing_vfs = ixgbe_find_enabled_vfs(adapter);
91 if (!pre_existing_vfs && !adapter->num_vfs)
92 return;
93
94 /* If there are pre-existing VFs then we have to force
95 * use of that many because they were not deleted the last
96 * time someone removed the PF driver. That would have
97 * been because they were allocated to guest VMs and can't
98 * be removed. Go ahead and just re-enable the old amount.
99 * If the user wants to change the number of VFs they can
100 * use ethtool while making sure no VFs are allocated to
101 * guest VMs... i.e. the right way.
102 */
103 if (pre_existing_vfs) {
104 adapter->num_vfs = pre_existing_vfs;
105 dev_warn(&adapter->pdev->dev, "Virtual Functions already "
106 "enabled for this device - Please reload all "
107 "VF drivers to avoid spoofed packet errors\n");
108 } else {
109 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
Alexander Duyck73079ea2012-07-14 06:48:49 +0000110 if (err) {
111 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
112 goto err_novfs;
113 }
Greg Rosec6bda302011-08-24 02:37:55 +0000114 }
Greg Rosec6bda302011-08-24 02:37:55 +0000115
Alexander Duyck73079ea2012-07-14 06:48:49 +0000116 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
Greg Rosec6bda302011-08-24 02:37:55 +0000117 e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
118
Alexander Duyck73079ea2012-07-14 06:48:49 +0000119 /* Enable VMDq flag so device will be set in VM mode */
120 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
121 if (!adapter->ring_feature[RING_F_VMDQ].limit)
122 adapter->ring_feature[RING_F_VMDQ].limit = 1;
123 adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
124
Greg Rosec6bda302011-08-24 02:37:55 +0000125 num_vf_macvlans = hw->mac.num_rar_entries -
126 (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
127
128 adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
129 sizeof(struct vf_macvlans),
130 GFP_KERNEL);
131 if (mv_list) {
132 /* Initialize list of VF macvlans */
133 INIT_LIST_HEAD(&adapter->vf_mvs.l);
134 for (i = 0; i < num_vf_macvlans; i++) {
135 mv_list->vf = -1;
136 mv_list->free = true;
137 mv_list->rar_entry = hw->mac.num_rar_entries -
138 (i + adapter->num_vfs + 1);
139 list_add(&mv_list->l, &adapter->vf_mvs.l);
140 mv_list++;
141 }
142 }
143
144 /* If call to enable VFs succeeded then allocate memory
145 * for per VF control structures.
146 */
147 adapter->vfinfo =
148 kcalloc(adapter->num_vfs,
149 sizeof(struct vf_data_storage), GFP_KERNEL);
150 if (adapter->vfinfo) {
151 /* Now that we're sure SR-IOV is enabled
152 * and memory allocated set up the mailbox parameters
153 */
154 ixgbe_init_mbx_params_pf(hw);
Alexander Duyck73079ea2012-07-14 06:48:49 +0000155 memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
156
157 /* limit trafffic classes based on VFs enabled */
158 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
159 (adapter->num_vfs < 16)) {
160 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
161 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
162 } else if (adapter->num_vfs < 32) {
163 adapter->dcb_cfg.num_tcs.pg_tcs = 4;
164 adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
165 } else {
166 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
167 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
168 }
169
170 /* We do not support RSS w/ SR-IOV */
171 adapter->ring_feature[RING_F_RSS].limit = 1;
Greg Rosec6bda302011-08-24 02:37:55 +0000172
173 /* Disable RSC when in SR-IOV mode */
174 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
175 IXGBE_FLAG2_RSC_ENABLED);
Alexander Duyck73079ea2012-07-14 06:48:49 +0000176
177#ifdef IXGBE_FCOE
178 /*
179 * When SR-IOV is enabled 82599 cannot support jumbo frames
180 * so we must disable FCoE because we cannot support FCoE MTU.
181 */
182 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
183 adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
184 IXGBE_FLAG_FCOE_CAPABLE);
185#endif
186
187 /* enable spoof checking for all VFs */
Greg Rosede4c7f62011-09-29 05:57:33 +0000188 for (i = 0; i < adapter->num_vfs; i++)
189 adapter->vfinfo[i].spoofchk_enabled = true;
Greg Rosec6bda302011-08-24 02:37:55 +0000190 return;
191 }
192
193 /* Oh oh */
194 e_err(probe, "Unable to allocate memory for VF Data Storage - "
195 "SRIOV disabled\n");
196 pci_disable_sriov(adapter->pdev);
197
198err_novfs:
199 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
200 adapter->num_vfs = 0;
201}
202#endif /* #ifdef CONFIG_PCI_IOV */
203
204void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
205{
206 struct ixgbe_hw *hw = &adapter->hw;
Greg Rosec6bda302011-08-24 02:37:55 +0000207 u32 gpie;
208 u32 vmdctl;
209 int i;
210
211#ifdef CONFIG_PCI_IOV
212 /* disable iov and allow time for transactions to clear */
213 pci_disable_sriov(adapter->pdev);
214#endif
215
216 /* turn off device IOV mode */
Alexander Duyck73079ea2012-07-14 06:48:49 +0000217 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
Greg Rosec6bda302011-08-24 02:37:55 +0000218 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
219 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
220 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
221
222 /* set default pool back to 0 */
223 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
224 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
225 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
226 IXGBE_WRITE_FLUSH(hw);
227
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +0000228 /* Disable VMDq flag so device will be set in VM mode */
229 if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
230 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
231 adapter->ring_feature[RING_F_VMDQ].offset = 0;
232
Greg Rosec6bda302011-08-24 02:37:55 +0000233 /* take a breather then clean up driver data */
234 msleep(100);
235
236 /* Release reference to VF devices */
237 for (i = 0; i < adapter->num_vfs; i++) {
238 if (adapter->vfinfo[i].vfdev)
239 pci_dev_put(adapter->vfinfo[i].vfdev);
240 }
241 kfree(adapter->vfinfo);
242 kfree(adapter->mv_list);
243 adapter->vfinfo = NULL;
244
245 adapter->num_vfs = 0;
246 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
247}
248
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000249static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
250 int entries, u16 *hash_list, u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000251{
252 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
Greg Rose8a07a222010-05-05 19:57:30 +0000253 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose17367272010-01-09 02:25:48 +0000254 int i;
Greg Rose8a07a222010-05-05 19:57:30 +0000255 u32 vector_bit;
256 u32 vector_reg;
257 u32 mta_reg;
Greg Rose17367272010-01-09 02:25:48 +0000258
259 /* only so many hash values supported */
260 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
261
262 /*
263 * salt away the number of multi cast addresses assigned
264 * to this VF for later use to restore when the PF multi cast
265 * list changes
266 */
267 vfinfo->num_vf_mc_hashes = entries;
268
269 /*
270 * VFs are limited to using the MTA hash table for their multicast
271 * addresses
272 */
273 for (i = 0; i < entries; i++) {
Joe Perchese81a1ba2010-11-14 17:04:33 +0000274 vfinfo->vf_mc_hashes[i] = hash_list[i];
Greg Rose17367272010-01-09 02:25:48 +0000275 }
276
Greg Rose8a07a222010-05-05 19:57:30 +0000277 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
278 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
279 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
280 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
281 mta_reg |= (1 << vector_bit);
282 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
283 }
Greg Rose17367272010-01-09 02:25:48 +0000284
285 return 0;
286}
287
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000288static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
289{
290 struct ixgbe_hw *hw = &adapter->hw;
291 struct list_head *pos;
292 struct vf_macvlans *entry;
293
294 list_for_each(pos, &adapter->vf_mvs.l) {
295 entry = list_entry(pos, struct vf_macvlans, l);
Joe Perches23677ce2012-02-09 11:17:23 +0000296 if (!entry->free)
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000297 hw->mac.ops.set_rar(hw, entry->rar_entry,
298 entry->vf_macvlan,
299 entry->vf, IXGBE_RAH_AV);
300 }
301}
302
Greg Rose17367272010-01-09 02:25:48 +0000303void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
304{
305 struct ixgbe_hw *hw = &adapter->hw;
306 struct vf_data_storage *vfinfo;
307 int i, j;
308 u32 vector_bit;
309 u32 vector_reg;
310 u32 mta_reg;
311
312 for (i = 0; i < adapter->num_vfs; i++) {
313 vfinfo = &adapter->vfinfo[i];
314 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
315 hw->addr_ctrl.mta_in_use++;
316 vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
317 vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
318 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
319 mta_reg |= (1 << vector_bit);
320 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
321 }
322 }
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000323
324 /* Restore any VF macvlans */
325 ixgbe_restore_vf_macvlans(adapter);
Greg Rose17367272010-01-09 02:25:48 +0000326}
327
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000328static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
329 u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000330{
Greg Rose17367272010-01-09 02:25:48 +0000331 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
332}
333
John Fastabendb32c8dc2011-04-12 02:44:55 +0000334static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
Greg Rosee9f98072011-01-26 01:06:07 +0000335{
336 struct ixgbe_hw *hw = &adapter->hw;
337 int new_mtu = msgbuf[1];
338 u32 max_frs;
339 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
340
341 /* Only X540 supports jumbo frames in IOV mode */
342 if (adapter->hw.mac.type != ixgbe_mac_X540)
343 return;
344
345 /* MTU < 68 is an error and causes problems on some kernels */
346 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
347 e_err(drv, "VF mtu %d out of range\n", new_mtu);
348 return;
349 }
350
351 max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
352 IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
353 if (max_frs < new_mtu) {
354 max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
355 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
356 }
357
358 e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
359}
360
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000361static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
Greg Rose17367272010-01-09 02:25:48 +0000362{
363 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
Greg Rosef0412772010-05-04 22:11:46 +0000364 vmolr |= (IXGBE_VMOLR_ROMPE |
Greg Rose17367272010-01-09 02:25:48 +0000365 IXGBE_VMOLR_BAM);
Greg Rosef0412772010-05-04 22:11:46 +0000366 if (aupe)
367 vmolr |= IXGBE_VMOLR_AUPE;
368 else
369 vmolr &= ~IXGBE_VMOLR_AUPE;
Greg Rose17367272010-01-09 02:25:48 +0000370 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
371}
372
Greg Rose7f016482010-05-04 22:12:06 +0000373static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
374{
375 struct ixgbe_hw *hw = &adapter->hw;
376
377 if (vid)
378 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
379 (vid | IXGBE_VMVIR_VLANA_DEFAULT));
380 else
381 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
382}
383
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000384static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000385{
386 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck28500622010-06-15 09:25:48 +0000387 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
Greg Rose17367272010-01-09 02:25:48 +0000388
389 /* reset offloads to defaults */
Greg Rose7f016482010-05-04 22:12:06 +0000390 if (adapter->vfinfo[vf].pf_vlan) {
391 ixgbe_set_vf_vlan(adapter, true,
392 adapter->vfinfo[vf].pf_vlan, vf);
393 ixgbe_set_vmvir(adapter,
394 (adapter->vfinfo[vf].pf_vlan |
395 (adapter->vfinfo[vf].pf_qos <<
396 VLAN_PRIO_SHIFT)), vf);
397 ixgbe_set_vmolr(hw, vf, false);
398 } else {
399 ixgbe_set_vmvir(adapter, 0, vf);
400 ixgbe_set_vmolr(hw, vf, true);
401 }
Greg Rose17367272010-01-09 02:25:48 +0000402
403 /* reset multicast table array for vf */
404 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
405
406 /* Flush and reset the mta with the new values */
407 ixgbe_set_rx_mode(adapter->netdev);
408
Alexander Duyck28500622010-06-15 09:25:48 +0000409 hw->mac.ops.clear_rar(hw, rar_entry);
Greg Rose17367272010-01-09 02:25:48 +0000410}
411
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000412static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
413 int vf, unsigned char *mac_addr)
Greg Rose17367272010-01-09 02:25:48 +0000414{
415 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck28500622010-06-15 09:25:48 +0000416 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
Greg Rose17367272010-01-09 02:25:48 +0000417
418 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
Alexander Duyck28500622010-06-15 09:25:48 +0000419 hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
Greg Rose17367272010-01-09 02:25:48 +0000420
421 return 0;
422}
423
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000424static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
425 int vf, int index, unsigned char *mac_addr)
426{
427 struct ixgbe_hw *hw = &adapter->hw;
428 struct list_head *pos;
429 struct vf_macvlans *entry;
430
431 if (index <= 1) {
432 list_for_each(pos, &adapter->vf_mvs.l) {
433 entry = list_entry(pos, struct vf_macvlans, l);
434 if (entry->vf == vf) {
435 entry->vf = -1;
436 entry->free = true;
437 entry->is_macvlan = false;
438 hw->mac.ops.clear_rar(hw, entry->rar_entry);
439 }
440 }
441 }
442
443 /*
444 * If index was zero then we were asked to clear the uc list
445 * for the VF. We're done.
446 */
447 if (!index)
448 return 0;
449
450 entry = NULL;
451
452 list_for_each(pos, &adapter->vf_mvs.l) {
453 entry = list_entry(pos, struct vf_macvlans, l);
454 if (entry->free)
455 break;
456 }
457
458 /*
459 * If we traversed the entire list and didn't find a free entry
460 * then we're out of space on the RAR table. Also entry may
461 * be NULL because the original memory allocation for the list
462 * failed, which is not fatal but does mean we can't support
463 * VF requests for MACVLAN because we couldn't allocate
464 * memory for the list management required.
465 */
466 if (!entry || !entry->free)
467 return -ENOSPC;
468
469 entry->free = false;
470 entry->is_macvlan = true;
471 entry->vf = vf;
472 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
473
474 hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
475
476 return 0;
477}
478
Greg Rosec6bda302011-08-24 02:37:55 +0000479int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
480{
Rose, Gregory V01264102011-11-07 07:44:17 +0000481#ifdef CONFIG_PCI_IOV
Greg Rosec6bda302011-08-24 02:37:55 +0000482 int i;
483 for (i = 0; i < adapter->num_vfs; i++) {
484 if (adapter->vfinfo[i].vfdev->dev_flags &
485 PCI_DEV_FLAGS_ASSIGNED)
486 return true;
487 }
Rose, Gregory V01264102011-11-07 07:44:17 +0000488#endif
Greg Rosec6bda302011-08-24 02:37:55 +0000489 return false;
490}
491
Greg Rose17367272010-01-09 02:25:48 +0000492int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
493{
494 unsigned char vf_mac_addr[6];
Alexander Duyckc60fbb02010-11-16 19:26:54 -0800495 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
Greg Rose17367272010-01-09 02:25:48 +0000496 unsigned int vfn = (event_mask & 0x3f);
Greg Rosec6bda302011-08-24 02:37:55 +0000497 struct pci_dev *pvfdev;
498 unsigned int device_id;
499 u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) |
500 (pdev->devfn & 1);
Greg Rose17367272010-01-09 02:25:48 +0000501
502 bool enable = ((event_mask & 0x10000000U) != 0);
503
504 if (enable) {
Joe Perches7efd26d2012-07-12 19:33:06 +0000505 eth_random_addr(vf_mac_addr);
Emil Tantilov396e7992010-07-01 20:05:12 +0000506 e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
507 vfn, vf_mac_addr);
Greg Rose17367272010-01-09 02:25:48 +0000508 /*
509 * Store away the VF "permananet" MAC address, it will ask
510 * for it later.
511 */
512 memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
Greg Rosec6bda302011-08-24 02:37:55 +0000513
514 switch (adapter->hw.mac.type) {
515 case ixgbe_mac_82599EB:
516 device_id = IXGBE_DEV_ID_82599_VF;
517 break;
518 case ixgbe_mac_X540:
519 device_id = IXGBE_DEV_ID_X540_VF;
520 break;
521 default:
522 device_id = 0;
523 break;
524 }
525
526 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
527 while (pvfdev) {
528 if (pvfdev->devfn == thisvf_devfn)
529 break;
530 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
531 device_id, pvfdev);
532 }
533 if (pvfdev)
534 adapter->vfinfo[vfn].vfdev = pvfdev;
535 else
536 e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n",
537 thisvf_devfn);
Greg Rose17367272010-01-09 02:25:48 +0000538 }
539
540 return 0;
541}
542
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000543static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000544{
545 struct ixgbe_hw *hw = &adapter->hw;
546 u32 reg;
547 u32 reg_offset, vf_shift;
548
549 vf_shift = vf % 32;
550 reg_offset = vf / 32;
551
552 /* enable transmit and receive for vf */
553 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
554 reg |= (reg | (1 << vf_shift));
555 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
556
557 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
558 reg |= (reg | (1 << vf_shift));
559 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
560
Greg Rosea985b6c32010-11-18 03:02:52 +0000561 /* Enable counting of spoofed packets in the SSVPC register */
562 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
563 reg |= (1 << vf_shift);
564 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
565
Greg Rose17367272010-01-09 02:25:48 +0000566 ixgbe_vf_reset_event(adapter, vf);
567}
568
569static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
570{
571 u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
Emil Tantilovc0509992011-05-07 06:49:18 +0000572 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
Greg Rose17367272010-01-09 02:25:48 +0000573 struct ixgbe_hw *hw = &adapter->hw;
574 s32 retval;
575 int entries;
576 u16 *hash_list;
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000577 int add, vid, index;
Greg Rosed3306c22010-11-18 03:03:23 +0000578 u8 *new_mac;
Greg Rose17367272010-01-09 02:25:48 +0000579
580 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
581
Alexander Duyckdcaccc82012-03-28 08:03:38 +0000582 if (retval) {
Emil Tantilov849c4542010-06-03 16:53:41 +0000583 pr_err("Error receiving message from VF\n");
Alexander Duyckdcaccc82012-03-28 08:03:38 +0000584 return retval;
585 }
Greg Rose17367272010-01-09 02:25:48 +0000586
587 /* this is a message we already processed, do nothing */
588 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
589 return retval;
590
Alexander Duyckdcaccc82012-03-28 08:03:38 +0000591 /* flush the ack before we write any messages back */
592 IXGBE_WRITE_FLUSH(hw);
593
Greg Rose17367272010-01-09 02:25:48 +0000594 /*
595 * until the vf completes a virtual function reset it should not be
596 * allowed to start any configuration.
597 */
598
599 if (msgbuf[0] == IXGBE_VF_RESET) {
600 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
Greg Rosed3306c22010-11-18 03:03:23 +0000601 new_mac = (u8 *)(&msgbuf[1]);
Emil Tantilov396e7992010-07-01 20:05:12 +0000602 e_info(probe, "VF Reset msg received from vf %d\n", vf);
Greg Rose17367272010-01-09 02:25:48 +0000603 adapter->vfinfo[vf].clear_to_send = false;
604 ixgbe_vf_reset_msg(adapter, vf);
605 adapter->vfinfo[vf].clear_to_send = true;
606
Greg Rosed3306c22010-11-18 03:03:23 +0000607 if (is_valid_ether_addr(new_mac) &&
608 !adapter->vfinfo[vf].pf_set_mac)
609 ixgbe_set_vf_mac(adapter, vf, vf_mac);
610 else
611 ixgbe_set_vf_mac(adapter,
612 vf, adapter->vfinfo[vf].vf_mac_addresses);
613
Greg Rose17367272010-01-09 02:25:48 +0000614 /* reply to reset with ack and vf mac address */
615 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
Joe Perchesea99d832011-09-20 15:32:52 +0000616 memcpy(new_mac, vf_mac, ETH_ALEN);
Greg Rose17367272010-01-09 02:25:48 +0000617 /*
618 * Piggyback the multicast filter type so VF can compute the
619 * correct vectors
620 */
621 msgbuf[3] = hw->mac.mc_filter_type;
622 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
623
624 return retval;
625 }
626
627 if (!adapter->vfinfo[vf].clear_to_send) {
628 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
629 ixgbe_write_mbx(hw, msgbuf, 1, vf);
630 return retval;
631 }
632
633 switch ((msgbuf[0] & 0xFFFF)) {
634 case IXGBE_VF_SET_MAC_ADDR:
Greg Rosed3306c22010-11-18 03:03:23 +0000635 new_mac = ((u8 *)(&msgbuf[1]));
636 if (is_valid_ether_addr(new_mac) &&
637 !adapter->vfinfo[vf].pf_set_mac) {
638 ixgbe_set_vf_mac(adapter, vf, new_mac);
639 } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
640 new_mac, ETH_ALEN)) {
641 e_warn(drv, "VF %d attempted to override "
642 "administratively set MAC address\nReload "
643 "the VF driver to resume operations\n", vf);
644 retval = -1;
Greg Rose17367272010-01-09 02:25:48 +0000645 }
646 break;
647 case IXGBE_VF_SET_MULTICAST:
648 entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
649 >> IXGBE_VT_MSGINFO_SHIFT;
650 hash_list = (u16 *)&msgbuf[1];
651 retval = ixgbe_set_vf_multicasts(adapter, entries,
652 hash_list, vf);
653 break;
654 case IXGBE_VF_SET_LPE:
Greg Rosee9f98072011-01-26 01:06:07 +0000655 ixgbe_set_vf_lpe(adapter, msgbuf);
Greg Rose17367272010-01-09 02:25:48 +0000656 break;
657 case IXGBE_VF_SET_VLAN:
658 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
659 >> IXGBE_VT_MSGINFO_SHIFT;
660 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
Greg Rosed3306c22010-11-18 03:03:23 +0000661 if (adapter->vfinfo[vf].pf_vlan) {
662 e_warn(drv, "VF %d attempted to override "
663 "administratively set VLAN configuration\n"
664 "Reload the VF driver to resume operations\n",
665 vf);
666 retval = -1;
667 } else {
Greg Rosede4c7f62011-09-29 05:57:33 +0000668 if (add)
669 adapter->vfinfo[vf].vlan_count++;
670 else if (adapter->vfinfo[vf].vlan_count)
671 adapter->vfinfo[vf].vlan_count--;
Greg Rosed3306c22010-11-18 03:03:23 +0000672 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
Greg Rosede4c7f62011-09-29 05:57:33 +0000673 if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
674 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
Greg Rosed3306c22010-11-18 03:03:23 +0000675 }
Greg Rose17367272010-01-09 02:25:48 +0000676 break;
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000677 case IXGBE_VF_SET_MACVLAN:
Greg Rose44b82dd2012-04-21 00:54:28 +0000678 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
679 IXGBE_VT_MSGINFO_SHIFT;
680 if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
Greg Rose2ee70652012-03-24 00:26:44 +0000681 e_warn(drv, "VF %d requested MACVLAN filter but is "
682 "administratively denied\n", vf);
683 retval = -1;
684 break;
685 }
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000686 /*
687 * If the VF is allowed to set MAC filters then turn off
688 * anti-spoofing to avoid false positives. An index
689 * greater than 0 will indicate the VF is setting a
690 * macvlan MAC filter.
691 */
Greg Rosede4c7f62011-09-29 05:57:33 +0000692 if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
693 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000694 retval = ixgbe_set_vf_macvlan(adapter, vf, index,
695 (unsigned char *)(&msgbuf[1]));
Greg Rose68d6d4a2012-01-05 07:58:11 +0000696 if (retval == -ENOSPC)
697 e_warn(drv, "VF %d has requested a MACVLAN filter "
698 "but there is no space for it\n", vf);
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000699 break;
Greg Rose17367272010-01-09 02:25:48 +0000700 default:
Emil Tantilov396e7992010-07-01 20:05:12 +0000701 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
Greg Rose17367272010-01-09 02:25:48 +0000702 retval = IXGBE_ERR_MBX;
703 break;
704 }
705
706 /* notify the VF of the results of what it sent us */
707 if (retval)
708 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
709 else
710 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
711
712 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
713
714 ixgbe_write_mbx(hw, msgbuf, 1, vf);
715
716 return retval;
717}
718
719static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
720{
721 struct ixgbe_hw *hw = &adapter->hw;
722 u32 msg = IXGBE_VT_MSGTYPE_NACK;
723
724 /* if device isn't clear to send it shouldn't be reading either */
725 if (!adapter->vfinfo[vf].clear_to_send)
726 ixgbe_write_mbx(hw, &msg, 1, vf);
727}
728
729void ixgbe_msg_task(struct ixgbe_adapter *adapter)
730{
731 struct ixgbe_hw *hw = &adapter->hw;
732 u32 vf;
733
734 for (vf = 0; vf < adapter->num_vfs; vf++) {
735 /* process any reset requests */
736 if (!ixgbe_check_for_rst(hw, vf))
737 ixgbe_vf_reset_event(adapter, vf);
738
739 /* process any messages pending */
740 if (!ixgbe_check_for_msg(hw, vf))
741 ixgbe_rcv_msg_from_vf(adapter, vf);
742
743 /* process any acks */
744 if (!ixgbe_check_for_ack(hw, vf))
745 ixgbe_rcv_ack_from_vf(adapter, vf);
746 }
747}
748
Greg Rose767081a2010-01-22 22:46:40 +0000749void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
750{
751 struct ixgbe_hw *hw = &adapter->hw;
752
753 /* disable transmit and receive for all vfs */
754 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
755 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
756
757 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
758 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
759}
760
761void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
762{
763 struct ixgbe_hw *hw = &adapter->hw;
764 u32 ping;
765 int i;
766
767 for (i = 0 ; i < adapter->num_vfs; i++) {
768 ping = IXGBE_PF_CONTROL_MSG;
769 if (adapter->vfinfo[i].clear_to_send)
770 ping |= IXGBE_VT_MSGTYPE_CTS;
771 ixgbe_write_mbx(hw, &ping, 1, i);
772 }
773}
774
Greg Rose7f016482010-05-04 22:12:06 +0000775int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
776{
777 struct ixgbe_adapter *adapter = netdev_priv(netdev);
778 if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
779 return -EINVAL;
780 adapter->vfinfo[vf].pf_set_mac = true;
781 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
782 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
783 " change effective.");
784 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
785 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
786 " but the PF device is not up.\n");
787 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
788 " attempting to use the VF device.\n");
789 }
790 return ixgbe_set_vf_mac(adapter, vf, mac);
791}
792
793int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
794{
795 int err = 0;
796 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Greg Rosea985b6c32010-11-18 03:02:52 +0000797 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose7f016482010-05-04 22:12:06 +0000798
799 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
800 return -EINVAL;
801 if (vlan || qos) {
802 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
803 if (err)
804 goto out;
805 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
Greg Rosea985b6c32010-11-18 03:02:52 +0000806 ixgbe_set_vmolr(hw, vf, false);
Greg Rosede4c7f62011-09-29 05:57:33 +0000807 if (adapter->vfinfo[vf].spoofchk_enabled)
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000808 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
Greg Rosede4c7f62011-09-29 05:57:33 +0000809 adapter->vfinfo[vf].vlan_count++;
Greg Rose7f016482010-05-04 22:12:06 +0000810 adapter->vfinfo[vf].pf_vlan = vlan;
811 adapter->vfinfo[vf].pf_qos = qos;
812 dev_info(&adapter->pdev->dev,
813 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
814 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
815 dev_warn(&adapter->pdev->dev,
816 "The VF VLAN has been set,"
817 " but the PF device is not up.\n");
818 dev_warn(&adapter->pdev->dev,
819 "Bring the PF device up before"
820 " attempting to use the VF device.\n");
821 }
822 } else {
823 err = ixgbe_set_vf_vlan(adapter, false,
824 adapter->vfinfo[vf].pf_vlan, vf);
825 ixgbe_set_vmvir(adapter, vlan, vf);
Greg Rosea985b6c32010-11-18 03:02:52 +0000826 ixgbe_set_vmolr(hw, vf, true);
827 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
Greg Rosede4c7f62011-09-29 05:57:33 +0000828 if (adapter->vfinfo[vf].vlan_count)
829 adapter->vfinfo[vf].vlan_count--;
Greg Rose7f016482010-05-04 22:12:06 +0000830 adapter->vfinfo[vf].pf_vlan = 0;
831 adapter->vfinfo[vf].pf_qos = 0;
832 }
833out:
834 return err;
835}
836
Lior Levyff4ab202011-03-11 02:03:07 +0000837static int ixgbe_link_mbps(int internal_link_speed)
838{
839 switch (internal_link_speed) {
840 case IXGBE_LINK_SPEED_100_FULL:
841 return 100;
842 case IXGBE_LINK_SPEED_1GB_FULL:
843 return 1000;
844 case IXGBE_LINK_SPEED_10GB_FULL:
845 return 10000;
846 default:
847 return 0;
848 }
849}
850
851static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
852 int link_speed)
853{
854 int rf_dec, rf_int;
855 u32 bcnrc_val;
856
857 if (tx_rate != 0) {
858 /* Calculate the rate factor values to set */
859 rf_int = link_speed / tx_rate;
860 rf_dec = (link_speed - (rf_int * tx_rate));
861 rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
862
863 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
864 bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
865 IXGBE_RTTBCNRC_RF_INT_MASK);
866 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
867 } else {
868 bcnrc_val = 0;
869 }
870
871 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
Lior Levy7555e832011-06-25 00:09:08 -0700872 /*
873 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
874 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
875 * and 0x004 otherwise.
876 */
877 switch (hw->mac.type) {
878 case ixgbe_mac_82599EB:
879 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
880 break;
881 case ixgbe_mac_X540:
882 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
883 break;
884 default:
885 break;
886 }
887
Lior Levyff4ab202011-03-11 02:03:07 +0000888 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
889}
890
891void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
892{
893 int actual_link_speed, i;
894 bool reset_rate = false;
895
896 /* VF Tx rate limit was not set */
897 if (adapter->vf_rate_link_speed == 0)
898 return;
899
900 actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
901 if (actual_link_speed != adapter->vf_rate_link_speed) {
902 reset_rate = true;
903 adapter->vf_rate_link_speed = 0;
904 dev_info(&adapter->pdev->dev,
905 "Link speed has been changed. VF Transmit rate "
906 "is disabled\n");
907 }
908
909 for (i = 0; i < adapter->num_vfs; i++) {
910 if (reset_rate)
911 adapter->vfinfo[i].tx_rate = 0;
912
913 ixgbe_set_vf_rate_limit(&adapter->hw, i,
914 adapter->vfinfo[i].tx_rate,
915 actual_link_speed);
916 }
917}
918
Greg Rose7f016482010-05-04 22:12:06 +0000919int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
920{
Lior Levyff4ab202011-03-11 02:03:07 +0000921 struct ixgbe_adapter *adapter = netdev_priv(netdev);
922 struct ixgbe_hw *hw = &adapter->hw;
923 int actual_link_speed;
924
925 actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
926 if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
927 (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
928 ((tx_rate != 0) && (tx_rate <= 10)))
929 /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
930 return -EINVAL;
931
932 adapter->vf_rate_link_speed = actual_link_speed;
933 adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
934 ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
935
936 return 0;
Greg Rose7f016482010-05-04 22:12:06 +0000937}
938
Greg Rosede4c7f62011-09-29 05:57:33 +0000939int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
940{
941 struct ixgbe_adapter *adapter = netdev_priv(netdev);
942 int vf_target_reg = vf >> 3;
943 int vf_target_shift = vf % 8;
944 struct ixgbe_hw *hw = &adapter->hw;
945 u32 regval;
946
947 adapter->vfinfo[vf].spoofchk_enabled = setting;
948
949 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
950 regval &= ~(1 << vf_target_shift);
951 regval |= (setting << vf_target_shift);
952 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
953
954 if (adapter->vfinfo[vf].vlan_count) {
955 vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
956 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
957 regval &= ~(1 << vf_target_shift);
958 regval |= (setting << vf_target_shift);
959 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
960 }
961
962 return 0;
963}
964
Greg Rose7f016482010-05-04 22:12:06 +0000965int ixgbe_ndo_get_vf_config(struct net_device *netdev,
966 int vf, struct ifla_vf_info *ivi)
967{
968 struct ixgbe_adapter *adapter = netdev_priv(netdev);
969 if (vf >= adapter->num_vfs)
970 return -EINVAL;
971 ivi->vf = vf;
972 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
Lior Levyff4ab202011-03-11 02:03:07 +0000973 ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
Greg Rose7f016482010-05-04 22:12:06 +0000974 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
975 ivi->qos = adapter->vfinfo[vf].pf_qos;
Greg Rosede4c7f62011-09-29 05:57:33 +0000976 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
Greg Rose7f016482010-05-04 22:12:06 +0000977 return 0;
978}