blob: 4f22668b6aea651f33edd8e3e50cabef513c1bb0 [file] [log] [blame]
Greg Rose17367272010-01-09 02:25:48 +00001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Don Skidmore94971822012-01-06 03:24:16 +00004 Copyright(c) 1999 - 2012 Intel Corporation.
Greg Rose17367272010-01-09 02:25:48 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Greg Rose17367272010-01-09 02:25:48 +000028#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
37#include <linux/ipv6.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#endif
41
42#include "ixgbe.h"
Greg Rosec6bda302011-08-24 02:37:55 +000043#include "ixgbe_type.h"
Greg Rose17367272010-01-09 02:25:48 +000044#include "ixgbe_sriov.h"
45
Greg Rosec6bda302011-08-24 02:37:55 +000046#ifdef CONFIG_PCI_IOV
47static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
48{
49 struct pci_dev *pdev = adapter->pdev;
50 struct pci_dev *pvfdev;
51 u16 vf_devfn = 0;
52 int device_id;
53 int vfs_found = 0;
54
55 switch (adapter->hw.mac.type) {
56 case ixgbe_mac_82599EB:
57 device_id = IXGBE_DEV_ID_82599_VF;
58 break;
59 case ixgbe_mac_X540:
60 device_id = IXGBE_DEV_ID_X540_VF;
61 break;
62 default:
63 device_id = 0;
64 break;
65 }
66
67 vf_devfn = pdev->devfn + 0x80;
68 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
69 while (pvfdev) {
Greg Rosea4b08322012-02-03 00:54:13 +000070 if (pvfdev->devfn == vf_devfn &&
71 (pvfdev->bus->number >= pdev->bus->number))
Greg Rosec6bda302011-08-24 02:37:55 +000072 vfs_found++;
73 vf_devfn += 2;
74 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
75 device_id, pvfdev);
76 }
77
78 return vfs_found;
79}
80
81void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
82 const struct ixgbe_info *ii)
83{
84 struct ixgbe_hw *hw = &adapter->hw;
85 int err = 0;
86 int num_vf_macvlans, i;
87 struct vf_macvlans *mv_list;
88 int pre_existing_vfs = 0;
89
90 pre_existing_vfs = ixgbe_find_enabled_vfs(adapter);
91 if (!pre_existing_vfs && !adapter->num_vfs)
92 return;
93
94 /* If there are pre-existing VFs then we have to force
95 * use of that many because they were not deleted the last
96 * time someone removed the PF driver. That would have
97 * been because they were allocated to guest VMs and can't
98 * be removed. Go ahead and just re-enable the old amount.
99 * If the user wants to change the number of VFs they can
100 * use ethtool while making sure no VFs are allocated to
101 * guest VMs... i.e. the right way.
102 */
103 if (pre_existing_vfs) {
104 adapter->num_vfs = pre_existing_vfs;
105 dev_warn(&adapter->pdev->dev, "Virtual Functions already "
106 "enabled for this device - Please reload all "
107 "VF drivers to avoid spoofed packet errors\n");
108 } else {
109 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
Alexander Duyck73079ea2012-07-14 06:48:49 +0000110 if (err) {
111 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
112 goto err_novfs;
113 }
Greg Rosec6bda302011-08-24 02:37:55 +0000114 }
Greg Rosec6bda302011-08-24 02:37:55 +0000115
Alexander Duyck73079ea2012-07-14 06:48:49 +0000116 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
Greg Rosec6bda302011-08-24 02:37:55 +0000117 e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
118
Alexander Duyck73079ea2012-07-14 06:48:49 +0000119 /* Enable VMDq flag so device will be set in VM mode */
120 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
121 if (!adapter->ring_feature[RING_F_VMDQ].limit)
122 adapter->ring_feature[RING_F_VMDQ].limit = 1;
123 adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
124
Greg Rosec6bda302011-08-24 02:37:55 +0000125 num_vf_macvlans = hw->mac.num_rar_entries -
126 (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
127
128 adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
129 sizeof(struct vf_macvlans),
130 GFP_KERNEL);
131 if (mv_list) {
132 /* Initialize list of VF macvlans */
133 INIT_LIST_HEAD(&adapter->vf_mvs.l);
134 for (i = 0; i < num_vf_macvlans; i++) {
135 mv_list->vf = -1;
136 mv_list->free = true;
137 mv_list->rar_entry = hw->mac.num_rar_entries -
138 (i + adapter->num_vfs + 1);
139 list_add(&mv_list->l, &adapter->vf_mvs.l);
140 mv_list++;
141 }
142 }
143
144 /* If call to enable VFs succeeded then allocate memory
145 * for per VF control structures.
146 */
147 adapter->vfinfo =
148 kcalloc(adapter->num_vfs,
149 sizeof(struct vf_data_storage), GFP_KERNEL);
150 if (adapter->vfinfo) {
151 /* Now that we're sure SR-IOV is enabled
152 * and memory allocated set up the mailbox parameters
153 */
154 ixgbe_init_mbx_params_pf(hw);
Alexander Duyck73079ea2012-07-14 06:48:49 +0000155 memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
156
157 /* limit trafffic classes based on VFs enabled */
158 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
159 (adapter->num_vfs < 16)) {
160 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
161 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
162 } else if (adapter->num_vfs < 32) {
163 adapter->dcb_cfg.num_tcs.pg_tcs = 4;
164 adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
165 } else {
166 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
167 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
168 }
169
170 /* We do not support RSS w/ SR-IOV */
171 adapter->ring_feature[RING_F_RSS].limit = 1;
Greg Rosec6bda302011-08-24 02:37:55 +0000172
173 /* Disable RSC when in SR-IOV mode */
174 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
175 IXGBE_FLAG2_RSC_ENABLED);
Alexander Duyck73079ea2012-07-14 06:48:49 +0000176
177#ifdef IXGBE_FCOE
178 /*
179 * When SR-IOV is enabled 82599 cannot support jumbo frames
180 * so we must disable FCoE because we cannot support FCoE MTU.
181 */
182 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
183 adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
184 IXGBE_FLAG_FCOE_CAPABLE);
185#endif
186
187 /* enable spoof checking for all VFs */
Greg Rosede4c7f62011-09-29 05:57:33 +0000188 for (i = 0; i < adapter->num_vfs; i++)
189 adapter->vfinfo[i].spoofchk_enabled = true;
Greg Rosec6bda302011-08-24 02:37:55 +0000190 return;
191 }
192
193 /* Oh oh */
194 e_err(probe, "Unable to allocate memory for VF Data Storage - "
195 "SRIOV disabled\n");
196 pci_disable_sriov(adapter->pdev);
197
198err_novfs:
199 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
200 adapter->num_vfs = 0;
201}
202#endif /* #ifdef CONFIG_PCI_IOV */
203
204void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
205{
206 struct ixgbe_hw *hw = &adapter->hw;
Greg Rosec6bda302011-08-24 02:37:55 +0000207 u32 gpie;
208 u32 vmdctl;
209 int i;
210
Alexander Duyckd773d132012-05-05 05:32:26 +0000211 /* set num VFs to 0 to prevent access to vfinfo */
212 adapter->num_vfs = 0;
213
214 /* free VF control structures */
215 kfree(adapter->vfinfo);
216 adapter->vfinfo = NULL;
217
218 /* free macvlan list */
219 kfree(adapter->mv_list);
220 adapter->mv_list = NULL;
221
Greg Rosec6bda302011-08-24 02:37:55 +0000222#ifdef CONFIG_PCI_IOV
223 /* disable iov and allow time for transactions to clear */
224 pci_disable_sriov(adapter->pdev);
225#endif
226
227 /* turn off device IOV mode */
Alexander Duyck73079ea2012-07-14 06:48:49 +0000228 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
Greg Rosec6bda302011-08-24 02:37:55 +0000229 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
230 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
231 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
232
233 /* set default pool back to 0 */
234 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
235 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
236 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
237 IXGBE_WRITE_FLUSH(hw);
238
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +0000239 /* Disable VMDq flag so device will be set in VM mode */
240 if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
241 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
242 adapter->ring_feature[RING_F_VMDQ].offset = 0;
243
Greg Rosec6bda302011-08-24 02:37:55 +0000244 /* take a breather then clean up driver data */
245 msleep(100);
246
247 /* Release reference to VF devices */
248 for (i = 0; i < adapter->num_vfs; i++) {
249 if (adapter->vfinfo[i].vfdev)
250 pci_dev_put(adapter->vfinfo[i].vfdev);
251 }
Greg Rosec6bda302011-08-24 02:37:55 +0000252
Greg Rosec6bda302011-08-24 02:37:55 +0000253 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
254}
255
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000256static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
257 int entries, u16 *hash_list, u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000258{
259 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
Greg Rose8a07a222010-05-05 19:57:30 +0000260 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose17367272010-01-09 02:25:48 +0000261 int i;
Greg Rose8a07a222010-05-05 19:57:30 +0000262 u32 vector_bit;
263 u32 vector_reg;
264 u32 mta_reg;
Greg Rose17367272010-01-09 02:25:48 +0000265
266 /* only so many hash values supported */
267 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
268
269 /*
270 * salt away the number of multi cast addresses assigned
271 * to this VF for later use to restore when the PF multi cast
272 * list changes
273 */
274 vfinfo->num_vf_mc_hashes = entries;
275
276 /*
277 * VFs are limited to using the MTA hash table for their multicast
278 * addresses
279 */
280 for (i = 0; i < entries; i++) {
Joe Perchese81a1ba2010-11-14 17:04:33 +0000281 vfinfo->vf_mc_hashes[i] = hash_list[i];
Greg Rose17367272010-01-09 02:25:48 +0000282 }
283
Greg Rose8a07a222010-05-05 19:57:30 +0000284 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
285 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
286 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
287 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
288 mta_reg |= (1 << vector_bit);
289 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
290 }
Greg Rose17367272010-01-09 02:25:48 +0000291
292 return 0;
293}
294
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000295static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
296{
297 struct ixgbe_hw *hw = &adapter->hw;
298 struct list_head *pos;
299 struct vf_macvlans *entry;
300
301 list_for_each(pos, &adapter->vf_mvs.l) {
302 entry = list_entry(pos, struct vf_macvlans, l);
Joe Perches23677ce2012-02-09 11:17:23 +0000303 if (!entry->free)
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000304 hw->mac.ops.set_rar(hw, entry->rar_entry,
305 entry->vf_macvlan,
306 entry->vf, IXGBE_RAH_AV);
307 }
308}
309
Greg Rose17367272010-01-09 02:25:48 +0000310void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
311{
312 struct ixgbe_hw *hw = &adapter->hw;
313 struct vf_data_storage *vfinfo;
314 int i, j;
315 u32 vector_bit;
316 u32 vector_reg;
317 u32 mta_reg;
318
319 for (i = 0; i < adapter->num_vfs; i++) {
320 vfinfo = &adapter->vfinfo[i];
321 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
322 hw->addr_ctrl.mta_in_use++;
323 vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
324 vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
325 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
326 mta_reg |= (1 << vector_bit);
327 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
328 }
329 }
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000330
331 /* Restore any VF macvlans */
332 ixgbe_restore_vf_macvlans(adapter);
Greg Rose17367272010-01-09 02:25:48 +0000333}
334
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000335static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
336 u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000337{
Greg Rose17367272010-01-09 02:25:48 +0000338 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
339}
340
John Fastabendb32c8dc2011-04-12 02:44:55 +0000341static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
Greg Rosee9f98072011-01-26 01:06:07 +0000342{
343 struct ixgbe_hw *hw = &adapter->hw;
344 int new_mtu = msgbuf[1];
345 u32 max_frs;
346 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
347
348 /* Only X540 supports jumbo frames in IOV mode */
349 if (adapter->hw.mac.type != ixgbe_mac_X540)
350 return;
351
352 /* MTU < 68 is an error and causes problems on some kernels */
353 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
354 e_err(drv, "VF mtu %d out of range\n", new_mtu);
355 return;
356 }
357
358 max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
359 IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
360 if (max_frs < new_mtu) {
361 max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
362 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
363 }
364
365 e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
366}
367
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000368static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
Greg Rose17367272010-01-09 02:25:48 +0000369{
370 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
Greg Rosef0412772010-05-04 22:11:46 +0000371 vmolr |= (IXGBE_VMOLR_ROMPE |
Greg Rose17367272010-01-09 02:25:48 +0000372 IXGBE_VMOLR_BAM);
Greg Rosef0412772010-05-04 22:11:46 +0000373 if (aupe)
374 vmolr |= IXGBE_VMOLR_AUPE;
375 else
376 vmolr &= ~IXGBE_VMOLR_AUPE;
Greg Rose17367272010-01-09 02:25:48 +0000377 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
378}
379
Greg Rose7f016482010-05-04 22:12:06 +0000380static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
381{
382 struct ixgbe_hw *hw = &adapter->hw;
383
384 if (vid)
385 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
386 (vid | IXGBE_VMVIR_VLANA_DEFAULT));
387 else
388 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
389}
390
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000391static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000392{
393 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck28500622010-06-15 09:25:48 +0000394 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
Greg Rose17367272010-01-09 02:25:48 +0000395
396 /* reset offloads to defaults */
Greg Rose7f016482010-05-04 22:12:06 +0000397 if (adapter->vfinfo[vf].pf_vlan) {
398 ixgbe_set_vf_vlan(adapter, true,
399 adapter->vfinfo[vf].pf_vlan, vf);
400 ixgbe_set_vmvir(adapter,
401 (adapter->vfinfo[vf].pf_vlan |
402 (adapter->vfinfo[vf].pf_qos <<
403 VLAN_PRIO_SHIFT)), vf);
404 ixgbe_set_vmolr(hw, vf, false);
405 } else {
406 ixgbe_set_vmvir(adapter, 0, vf);
407 ixgbe_set_vmolr(hw, vf, true);
408 }
Greg Rose17367272010-01-09 02:25:48 +0000409
410 /* reset multicast table array for vf */
411 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
412
413 /* Flush and reset the mta with the new values */
414 ixgbe_set_rx_mode(adapter->netdev);
415
Alexander Duyck28500622010-06-15 09:25:48 +0000416 hw->mac.ops.clear_rar(hw, rar_entry);
Greg Rose17367272010-01-09 02:25:48 +0000417}
418
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000419static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
420 int vf, unsigned char *mac_addr)
Greg Rose17367272010-01-09 02:25:48 +0000421{
422 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck28500622010-06-15 09:25:48 +0000423 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
Greg Rose17367272010-01-09 02:25:48 +0000424
425 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
Alexander Duyck28500622010-06-15 09:25:48 +0000426 hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
Greg Rose17367272010-01-09 02:25:48 +0000427
428 return 0;
429}
430
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000431static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
432 int vf, int index, unsigned char *mac_addr)
433{
434 struct ixgbe_hw *hw = &adapter->hw;
435 struct list_head *pos;
436 struct vf_macvlans *entry;
437
438 if (index <= 1) {
439 list_for_each(pos, &adapter->vf_mvs.l) {
440 entry = list_entry(pos, struct vf_macvlans, l);
441 if (entry->vf == vf) {
442 entry->vf = -1;
443 entry->free = true;
444 entry->is_macvlan = false;
445 hw->mac.ops.clear_rar(hw, entry->rar_entry);
446 }
447 }
448 }
449
450 /*
451 * If index was zero then we were asked to clear the uc list
452 * for the VF. We're done.
453 */
454 if (!index)
455 return 0;
456
457 entry = NULL;
458
459 list_for_each(pos, &adapter->vf_mvs.l) {
460 entry = list_entry(pos, struct vf_macvlans, l);
461 if (entry->free)
462 break;
463 }
464
465 /*
466 * If we traversed the entire list and didn't find a free entry
467 * then we're out of space on the RAR table. Also entry may
468 * be NULL because the original memory allocation for the list
469 * failed, which is not fatal but does mean we can't support
470 * VF requests for MACVLAN because we couldn't allocate
471 * memory for the list management required.
472 */
473 if (!entry || !entry->free)
474 return -ENOSPC;
475
476 entry->free = false;
477 entry->is_macvlan = true;
478 entry->vf = vf;
479 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
480
481 hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
482
483 return 0;
484}
485
Greg Rosec6bda302011-08-24 02:37:55 +0000486int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
487{
Rose, Gregory V01264102011-11-07 07:44:17 +0000488#ifdef CONFIG_PCI_IOV
Greg Rosec6bda302011-08-24 02:37:55 +0000489 int i;
490 for (i = 0; i < adapter->num_vfs; i++) {
491 if (adapter->vfinfo[i].vfdev->dev_flags &
492 PCI_DEV_FLAGS_ASSIGNED)
493 return true;
494 }
Rose, Gregory V01264102011-11-07 07:44:17 +0000495#endif
Greg Rosec6bda302011-08-24 02:37:55 +0000496 return false;
497}
498
Greg Rose17367272010-01-09 02:25:48 +0000499int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
500{
501 unsigned char vf_mac_addr[6];
Alexander Duyckc60fbb02010-11-16 19:26:54 -0800502 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
Greg Rose17367272010-01-09 02:25:48 +0000503 unsigned int vfn = (event_mask & 0x3f);
Greg Rosec6bda302011-08-24 02:37:55 +0000504 struct pci_dev *pvfdev;
505 unsigned int device_id;
506 u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) |
507 (pdev->devfn & 1);
Greg Rose17367272010-01-09 02:25:48 +0000508
509 bool enable = ((event_mask & 0x10000000U) != 0);
510
511 if (enable) {
Joe Perches7efd26d2012-07-12 19:33:06 +0000512 eth_random_addr(vf_mac_addr);
Emil Tantilov396e7992010-07-01 20:05:12 +0000513 e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
514 vfn, vf_mac_addr);
Greg Rose17367272010-01-09 02:25:48 +0000515 /*
516 * Store away the VF "permananet" MAC address, it will ask
517 * for it later.
518 */
519 memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
Greg Rosec6bda302011-08-24 02:37:55 +0000520
521 switch (adapter->hw.mac.type) {
522 case ixgbe_mac_82599EB:
523 device_id = IXGBE_DEV_ID_82599_VF;
524 break;
525 case ixgbe_mac_X540:
526 device_id = IXGBE_DEV_ID_X540_VF;
527 break;
528 default:
529 device_id = 0;
530 break;
531 }
532
533 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
534 while (pvfdev) {
535 if (pvfdev->devfn == thisvf_devfn)
536 break;
537 pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
538 device_id, pvfdev);
539 }
540 if (pvfdev)
541 adapter->vfinfo[vfn].vfdev = pvfdev;
542 else
543 e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n",
544 thisvf_devfn);
Greg Rose17367272010-01-09 02:25:48 +0000545 }
546
547 return 0;
548}
549
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000550static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000551{
552 struct ixgbe_hw *hw = &adapter->hw;
553 u32 reg;
554 u32 reg_offset, vf_shift;
555
556 vf_shift = vf % 32;
557 reg_offset = vf / 32;
558
559 /* enable transmit and receive for vf */
560 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
561 reg |= (reg | (1 << vf_shift));
562 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
563
564 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
565 reg |= (reg | (1 << vf_shift));
566 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
567
Greg Rosea985b6c32010-11-18 03:02:52 +0000568 /* Enable counting of spoofed packets in the SSVPC register */
569 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
570 reg |= (1 << vf_shift);
571 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
572
Greg Rose17367272010-01-09 02:25:48 +0000573 ixgbe_vf_reset_event(adapter, vf);
574}
575
576static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
577{
578 u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
Emil Tantilovc0509992011-05-07 06:49:18 +0000579 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
Greg Rose17367272010-01-09 02:25:48 +0000580 struct ixgbe_hw *hw = &adapter->hw;
581 s32 retval;
582 int entries;
583 u16 *hash_list;
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000584 int add, vid, index;
Greg Rosed3306c22010-11-18 03:03:23 +0000585 u8 *new_mac;
Greg Rose17367272010-01-09 02:25:48 +0000586
587 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
588
Alexander Duyckdcaccc82012-03-28 08:03:38 +0000589 if (retval) {
Emil Tantilov849c4542010-06-03 16:53:41 +0000590 pr_err("Error receiving message from VF\n");
Alexander Duyckdcaccc82012-03-28 08:03:38 +0000591 return retval;
592 }
Greg Rose17367272010-01-09 02:25:48 +0000593
594 /* this is a message we already processed, do nothing */
595 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
596 return retval;
597
Alexander Duyckdcaccc82012-03-28 08:03:38 +0000598 /* flush the ack before we write any messages back */
599 IXGBE_WRITE_FLUSH(hw);
600
Greg Rose17367272010-01-09 02:25:48 +0000601 /*
602 * until the vf completes a virtual function reset it should not be
603 * allowed to start any configuration.
604 */
605
606 if (msgbuf[0] == IXGBE_VF_RESET) {
607 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
Greg Rosed3306c22010-11-18 03:03:23 +0000608 new_mac = (u8 *)(&msgbuf[1]);
Emil Tantilov396e7992010-07-01 20:05:12 +0000609 e_info(probe, "VF Reset msg received from vf %d\n", vf);
Greg Rose17367272010-01-09 02:25:48 +0000610 adapter->vfinfo[vf].clear_to_send = false;
611 ixgbe_vf_reset_msg(adapter, vf);
612 adapter->vfinfo[vf].clear_to_send = true;
613
Greg Rosed3306c22010-11-18 03:03:23 +0000614 if (is_valid_ether_addr(new_mac) &&
615 !adapter->vfinfo[vf].pf_set_mac)
616 ixgbe_set_vf_mac(adapter, vf, vf_mac);
617 else
618 ixgbe_set_vf_mac(adapter,
619 vf, adapter->vfinfo[vf].vf_mac_addresses);
620
Greg Rose17367272010-01-09 02:25:48 +0000621 /* reply to reset with ack and vf mac address */
622 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
Joe Perchesea99d832011-09-20 15:32:52 +0000623 memcpy(new_mac, vf_mac, ETH_ALEN);
Greg Rose17367272010-01-09 02:25:48 +0000624 /*
625 * Piggyback the multicast filter type so VF can compute the
626 * correct vectors
627 */
628 msgbuf[3] = hw->mac.mc_filter_type;
629 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
630
631 return retval;
632 }
633
634 if (!adapter->vfinfo[vf].clear_to_send) {
635 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
636 ixgbe_write_mbx(hw, msgbuf, 1, vf);
637 return retval;
638 }
639
640 switch ((msgbuf[0] & 0xFFFF)) {
641 case IXGBE_VF_SET_MAC_ADDR:
Greg Rosed3306c22010-11-18 03:03:23 +0000642 new_mac = ((u8 *)(&msgbuf[1]));
643 if (is_valid_ether_addr(new_mac) &&
644 !adapter->vfinfo[vf].pf_set_mac) {
645 ixgbe_set_vf_mac(adapter, vf, new_mac);
646 } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
647 new_mac, ETH_ALEN)) {
648 e_warn(drv, "VF %d attempted to override "
649 "administratively set MAC address\nReload "
650 "the VF driver to resume operations\n", vf);
651 retval = -1;
Greg Rose17367272010-01-09 02:25:48 +0000652 }
653 break;
654 case IXGBE_VF_SET_MULTICAST:
655 entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
656 >> IXGBE_VT_MSGINFO_SHIFT;
657 hash_list = (u16 *)&msgbuf[1];
658 retval = ixgbe_set_vf_multicasts(adapter, entries,
659 hash_list, vf);
660 break;
661 case IXGBE_VF_SET_LPE:
Greg Rosee9f98072011-01-26 01:06:07 +0000662 ixgbe_set_vf_lpe(adapter, msgbuf);
Greg Rose17367272010-01-09 02:25:48 +0000663 break;
664 case IXGBE_VF_SET_VLAN:
665 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
666 >> IXGBE_VT_MSGINFO_SHIFT;
667 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
Greg Rosed3306c22010-11-18 03:03:23 +0000668 if (adapter->vfinfo[vf].pf_vlan) {
669 e_warn(drv, "VF %d attempted to override "
670 "administratively set VLAN configuration\n"
671 "Reload the VF driver to resume operations\n",
672 vf);
673 retval = -1;
674 } else {
Greg Rosede4c7f62011-09-29 05:57:33 +0000675 if (add)
676 adapter->vfinfo[vf].vlan_count++;
677 else if (adapter->vfinfo[vf].vlan_count)
678 adapter->vfinfo[vf].vlan_count--;
Greg Rosed3306c22010-11-18 03:03:23 +0000679 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
Greg Rosede4c7f62011-09-29 05:57:33 +0000680 if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
681 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
Greg Rosed3306c22010-11-18 03:03:23 +0000682 }
Greg Rose17367272010-01-09 02:25:48 +0000683 break;
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000684 case IXGBE_VF_SET_MACVLAN:
Greg Rose44b82dd2012-04-21 00:54:28 +0000685 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
686 IXGBE_VT_MSGINFO_SHIFT;
687 if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
Greg Rose2ee70652012-03-24 00:26:44 +0000688 e_warn(drv, "VF %d requested MACVLAN filter but is "
689 "administratively denied\n", vf);
690 retval = -1;
691 break;
692 }
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000693 /*
694 * If the VF is allowed to set MAC filters then turn off
695 * anti-spoofing to avoid false positives. An index
696 * greater than 0 will indicate the VF is setting a
697 * macvlan MAC filter.
698 */
Greg Rosede4c7f62011-09-29 05:57:33 +0000699 if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
700 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000701 retval = ixgbe_set_vf_macvlan(adapter, vf, index,
702 (unsigned char *)(&msgbuf[1]));
Greg Rose68d6d4a2012-01-05 07:58:11 +0000703 if (retval == -ENOSPC)
704 e_warn(drv, "VF %d has requested a MACVLAN filter "
705 "but there is no space for it\n", vf);
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000706 break;
Greg Rose17367272010-01-09 02:25:48 +0000707 default:
Emil Tantilov396e7992010-07-01 20:05:12 +0000708 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
Greg Rose17367272010-01-09 02:25:48 +0000709 retval = IXGBE_ERR_MBX;
710 break;
711 }
712
713 /* notify the VF of the results of what it sent us */
714 if (retval)
715 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
716 else
717 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
718
719 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
720
721 ixgbe_write_mbx(hw, msgbuf, 1, vf);
722
723 return retval;
724}
725
726static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
727{
728 struct ixgbe_hw *hw = &adapter->hw;
729 u32 msg = IXGBE_VT_MSGTYPE_NACK;
730
731 /* if device isn't clear to send it shouldn't be reading either */
732 if (!adapter->vfinfo[vf].clear_to_send)
733 ixgbe_write_mbx(hw, &msg, 1, vf);
734}
735
736void ixgbe_msg_task(struct ixgbe_adapter *adapter)
737{
738 struct ixgbe_hw *hw = &adapter->hw;
739 u32 vf;
740
741 for (vf = 0; vf < adapter->num_vfs; vf++) {
742 /* process any reset requests */
743 if (!ixgbe_check_for_rst(hw, vf))
744 ixgbe_vf_reset_event(adapter, vf);
745
746 /* process any messages pending */
747 if (!ixgbe_check_for_msg(hw, vf))
748 ixgbe_rcv_msg_from_vf(adapter, vf);
749
750 /* process any acks */
751 if (!ixgbe_check_for_ack(hw, vf))
752 ixgbe_rcv_ack_from_vf(adapter, vf);
753 }
754}
755
Greg Rose767081a2010-01-22 22:46:40 +0000756void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
757{
758 struct ixgbe_hw *hw = &adapter->hw;
759
760 /* disable transmit and receive for all vfs */
761 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
762 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
763
764 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
765 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
766}
767
768void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
769{
770 struct ixgbe_hw *hw = &adapter->hw;
771 u32 ping;
772 int i;
773
774 for (i = 0 ; i < adapter->num_vfs; i++) {
775 ping = IXGBE_PF_CONTROL_MSG;
776 if (adapter->vfinfo[i].clear_to_send)
777 ping |= IXGBE_VT_MSGTYPE_CTS;
778 ixgbe_write_mbx(hw, &ping, 1, i);
779 }
780}
781
Greg Rose7f016482010-05-04 22:12:06 +0000782int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
783{
784 struct ixgbe_adapter *adapter = netdev_priv(netdev);
785 if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
786 return -EINVAL;
787 adapter->vfinfo[vf].pf_set_mac = true;
788 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
789 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
790 " change effective.");
791 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
792 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
793 " but the PF device is not up.\n");
794 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
795 " attempting to use the VF device.\n");
796 }
797 return ixgbe_set_vf_mac(adapter, vf, mac);
798}
799
800int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
801{
802 int err = 0;
803 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Greg Rosea985b6c32010-11-18 03:02:52 +0000804 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose7f016482010-05-04 22:12:06 +0000805
806 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
807 return -EINVAL;
808 if (vlan || qos) {
809 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
810 if (err)
811 goto out;
812 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
Greg Rosea985b6c32010-11-18 03:02:52 +0000813 ixgbe_set_vmolr(hw, vf, false);
Greg Rosede4c7f62011-09-29 05:57:33 +0000814 if (adapter->vfinfo[vf].spoofchk_enabled)
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000815 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
Greg Rosede4c7f62011-09-29 05:57:33 +0000816 adapter->vfinfo[vf].vlan_count++;
Greg Rose7f016482010-05-04 22:12:06 +0000817 adapter->vfinfo[vf].pf_vlan = vlan;
818 adapter->vfinfo[vf].pf_qos = qos;
819 dev_info(&adapter->pdev->dev,
820 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
821 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
822 dev_warn(&adapter->pdev->dev,
823 "The VF VLAN has been set,"
824 " but the PF device is not up.\n");
825 dev_warn(&adapter->pdev->dev,
826 "Bring the PF device up before"
827 " attempting to use the VF device.\n");
828 }
829 } else {
830 err = ixgbe_set_vf_vlan(adapter, false,
831 adapter->vfinfo[vf].pf_vlan, vf);
832 ixgbe_set_vmvir(adapter, vlan, vf);
Greg Rosea985b6c32010-11-18 03:02:52 +0000833 ixgbe_set_vmolr(hw, vf, true);
834 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
Greg Rosede4c7f62011-09-29 05:57:33 +0000835 if (adapter->vfinfo[vf].vlan_count)
836 adapter->vfinfo[vf].vlan_count--;
Greg Rose7f016482010-05-04 22:12:06 +0000837 adapter->vfinfo[vf].pf_vlan = 0;
838 adapter->vfinfo[vf].pf_qos = 0;
839 }
840out:
841 return err;
842}
843
Lior Levyff4ab202011-03-11 02:03:07 +0000844static int ixgbe_link_mbps(int internal_link_speed)
845{
846 switch (internal_link_speed) {
847 case IXGBE_LINK_SPEED_100_FULL:
848 return 100;
849 case IXGBE_LINK_SPEED_1GB_FULL:
850 return 1000;
851 case IXGBE_LINK_SPEED_10GB_FULL:
852 return 10000;
853 default:
854 return 0;
855 }
856}
857
858static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
859 int link_speed)
860{
861 int rf_dec, rf_int;
862 u32 bcnrc_val;
863
864 if (tx_rate != 0) {
865 /* Calculate the rate factor values to set */
866 rf_int = link_speed / tx_rate;
867 rf_dec = (link_speed - (rf_int * tx_rate));
868 rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
869
870 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
871 bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
872 IXGBE_RTTBCNRC_RF_INT_MASK);
873 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
874 } else {
875 bcnrc_val = 0;
876 }
877
878 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
Lior Levy7555e832011-06-25 00:09:08 -0700879 /*
880 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
881 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
882 * and 0x004 otherwise.
883 */
884 switch (hw->mac.type) {
885 case ixgbe_mac_82599EB:
886 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
887 break;
888 case ixgbe_mac_X540:
889 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
890 break;
891 default:
892 break;
893 }
894
Lior Levyff4ab202011-03-11 02:03:07 +0000895 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
896}
897
898void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
899{
900 int actual_link_speed, i;
901 bool reset_rate = false;
902
903 /* VF Tx rate limit was not set */
904 if (adapter->vf_rate_link_speed == 0)
905 return;
906
907 actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
908 if (actual_link_speed != adapter->vf_rate_link_speed) {
909 reset_rate = true;
910 adapter->vf_rate_link_speed = 0;
911 dev_info(&adapter->pdev->dev,
912 "Link speed has been changed. VF Transmit rate "
913 "is disabled\n");
914 }
915
916 for (i = 0; i < adapter->num_vfs; i++) {
917 if (reset_rate)
918 adapter->vfinfo[i].tx_rate = 0;
919
920 ixgbe_set_vf_rate_limit(&adapter->hw, i,
921 adapter->vfinfo[i].tx_rate,
922 actual_link_speed);
923 }
924}
925
Greg Rose7f016482010-05-04 22:12:06 +0000926int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
927{
Lior Levyff4ab202011-03-11 02:03:07 +0000928 struct ixgbe_adapter *adapter = netdev_priv(netdev);
929 struct ixgbe_hw *hw = &adapter->hw;
930 int actual_link_speed;
931
932 actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
933 if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
934 (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
935 ((tx_rate != 0) && (tx_rate <= 10)))
936 /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
937 return -EINVAL;
938
939 adapter->vf_rate_link_speed = actual_link_speed;
940 adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
941 ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
942
943 return 0;
Greg Rose7f016482010-05-04 22:12:06 +0000944}
945
Greg Rosede4c7f62011-09-29 05:57:33 +0000946int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
947{
948 struct ixgbe_adapter *adapter = netdev_priv(netdev);
949 int vf_target_reg = vf >> 3;
950 int vf_target_shift = vf % 8;
951 struct ixgbe_hw *hw = &adapter->hw;
952 u32 regval;
953
954 adapter->vfinfo[vf].spoofchk_enabled = setting;
955
956 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
957 regval &= ~(1 << vf_target_shift);
958 regval |= (setting << vf_target_shift);
959 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
960
961 if (adapter->vfinfo[vf].vlan_count) {
962 vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
963 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
964 regval &= ~(1 << vf_target_shift);
965 regval |= (setting << vf_target_shift);
966 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
967 }
968
969 return 0;
970}
971
Greg Rose7f016482010-05-04 22:12:06 +0000972int ixgbe_ndo_get_vf_config(struct net_device *netdev,
973 int vf, struct ifla_vf_info *ivi)
974{
975 struct ixgbe_adapter *adapter = netdev_priv(netdev);
976 if (vf >= adapter->num_vfs)
977 return -EINVAL;
978 ivi->vf = vf;
979 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
Lior Levyff4ab202011-03-11 02:03:07 +0000980 ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
Greg Rose7f016482010-05-04 22:12:06 +0000981 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
982 ivi->qos = adapter->vfinfo[vf].pf_qos;
Greg Rosede4c7f62011-09-29 05:57:33 +0000983 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
Greg Rose7f016482010-05-04 22:12:06 +0000984 return 0;
985}