blob: d10b25fa0be415766c334e731f8ca6b960c1abe7 [file] [log] [blame]
Greg Rose17367272010-01-09 02:25:48 +00001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Mark Rustad988d1302015-10-30 15:29:34 -07004 Copyright(c) 1999 - 2015 Intel Corporation.
Greg Rose17367272010-01-09 02:25:48 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
Jacob Kellerb89aae72014-02-22 01:23:50 +000023 Linux NICS <linux.nics@intel.com>
Greg Rose17367272010-01-09 02:25:48 +000024 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
Greg Rose17367272010-01-09 02:25:48 +000029#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/ipv6.h>
Don Skidmoreaa2bacb2015-04-09 22:03:22 -070039#include <linux/if_bridge.h>
Patrick McHardyf6469682013-04-19 02:04:27 +000040#ifdef NETIF_F_HW_VLAN_CTAG_TX
Greg Rose17367272010-01-09 02:25:48 +000041#include <linux/if_vlan.h>
42#endif
43
44#include "ixgbe.h"
Greg Rosec6bda302011-08-24 02:37:55 +000045#include "ixgbe_type.h"
Greg Rose17367272010-01-09 02:25:48 +000046#include "ixgbe_sriov.h"
47
Greg Rosec6bda302011-08-24 02:37:55 +000048#ifdef CONFIG_PCI_IOV
Emil Tantilov2bc09722017-01-20 14:11:45 -080049static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter)
50{
51 struct ixgbe_hw *hw = &adapter->hw;
52 struct vf_macvlans *mv_list;
53 int num_vf_macvlans, i;
54
55 num_vf_macvlans = hw->mac.num_rar_entries -
56 (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
57 if (!num_vf_macvlans)
58 return;
59
60 mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
61 GFP_KERNEL);
62 if (mv_list) {
63 /* Initialize list of VF macvlans */
64 INIT_LIST_HEAD(&adapter->vf_mvs.l);
65 for (i = 0; i < num_vf_macvlans; i++) {
66 mv_list[i].vf = -1;
67 mv_list[i].free = true;
68 list_add(&mv_list[i].l, &adapter->vf_mvs.l);
69 }
70 adapter->mv_list = mv_list;
71 }
72}
73
Greg Rose66dcfd72012-12-11 08:26:38 +000074static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
Greg Rosec6bda302011-08-24 02:37:55 +000075{
76 struct ixgbe_hw *hw = &adapter->hw;
Greg Rosec6bda302011-08-24 02:37:55 +000077
Alexander Duyck73079ea2012-07-14 06:48:49 +000078 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
Greg Rosec6bda302011-08-24 02:37:55 +000079 e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
80
Alexander Duyck73079ea2012-07-14 06:48:49 +000081 /* Enable VMDq flag so device will be set in VM mode */
82 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
83 if (!adapter->ring_feature[RING_F_VMDQ].limit)
84 adapter->ring_feature[RING_F_VMDQ].limit = 1;
85 adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
86
John Fastabend815cccb2012-10-24 08:13:09 +000087 /* Initialize default switching mode VEB */
88 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
Don Skidmoreaa2bacb2015-04-09 22:03:22 -070089 adapter->bridge_mode = BRIDGE_MODE_VEB;
John Fastabend815cccb2012-10-24 08:13:09 +000090
Emil Tantilov2bc09722017-01-20 14:11:45 -080091 ixgbe_alloc_vf_macvlans(adapter);
92
Greg Rosec6bda302011-08-24 02:37:55 +000093 /* If call to enable VFs succeeded then allocate memory
94 * for per VF control structures.
95 */
96 adapter->vfinfo =
97 kcalloc(adapter->num_vfs,
98 sizeof(struct vf_data_storage), GFP_KERNEL);
99 if (adapter->vfinfo) {
Emil Tantilov2bc09722017-01-20 14:11:45 -0800100 int i;
101
Alexander Duyck73079ea2012-07-14 06:48:49 +0000102 /* limit trafffic classes based on VFs enabled */
103 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
104 (adapter->num_vfs < 16)) {
105 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
106 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
107 } else if (adapter->num_vfs < 32) {
108 adapter->dcb_cfg.num_tcs.pg_tcs = 4;
109 adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
110 } else {
111 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
112 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
113 }
114
Greg Rosec6bda302011-08-24 02:37:55 +0000115 /* Disable RSC when in SR-IOV mode */
116 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
117 IXGBE_FLAG2_RSC_ENABLED);
Alexander Duyck73079ea2012-07-14 06:48:49 +0000118
Vlad Zolotarove65ce0d2015-03-30 21:35:24 +0300119 for (i = 0; i < adapter->num_vfs; i++) {
120 /* enable spoof checking for all VFs */
Greg Rosede4c7f62011-09-29 05:57:33 +0000121 adapter->vfinfo[i].spoofchk_enabled = true;
Vlad Zolotarove65ce0d2015-03-30 21:35:24 +0300122
123 /* We support VF RSS querying only for 82599 and x540
124 * devices at the moment. These devices share RSS
125 * indirection table and RSS hash key with PF therefore
126 * we want to disable the querying by default.
127 */
128 adapter->vfinfo[i].rss_query_enabled = 0;
Hiroshi Shimamoto54011e42015-08-28 06:58:33 +0000129
130 /* Untrust all VFs */
131 adapter->vfinfo[i].trusted = false;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +0000132
133 /* set the default xcast mode */
134 adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
Vlad Zolotarove65ce0d2015-03-30 21:35:24 +0300135 }
136
Greg Rose66dcfd72012-12-11 08:26:38 +0000137 return 0;
Greg Rosec6bda302011-08-24 02:37:55 +0000138 }
139
Greg Rose66dcfd72012-12-11 08:26:38 +0000140 return -ENOMEM;
141}
142
Mark Rustad988d1302015-10-30 15:29:34 -0700143/**
144 * ixgbe_get_vfs - Find and take references to all vf devices
145 * @adapter: Pointer to adapter struct
146 */
147static void ixgbe_get_vfs(struct ixgbe_adapter *adapter)
148{
149 struct pci_dev *pdev = adapter->pdev;
150 u16 vendor = pdev->vendor;
151 struct pci_dev *vfdev;
152 int vf = 0;
153 u16 vf_id;
154 int pos;
155
156 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
157 if (!pos)
158 return;
159 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
160
161 vfdev = pci_get_device(vendor, vf_id, NULL);
162 for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) {
163 if (!vfdev->is_virtfn)
164 continue;
165 if (vfdev->physfn != pdev)
166 continue;
167 if (vf >= adapter->num_vfs)
168 continue;
169 pci_dev_get(vfdev);
170 adapter->vfinfo[vf].vfdev = vfdev;
171 ++vf;
172 }
173}
174
Greg Rose66dcfd72012-12-11 08:26:38 +0000175/* Note this function is called when the user wants to enable SR-IOV
176 * VFs using the now deprecated module parameter
177 */
178void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
179{
180 int pre_existing_vfs = 0;
181
182 pre_existing_vfs = pci_num_vf(adapter->pdev);
183 if (!pre_existing_vfs && !adapter->num_vfs)
184 return;
185
Greg Rose66dcfd72012-12-11 08:26:38 +0000186 /* If there are pre-existing VFs then we have to force
187 * use of that many - over ride any module parameter value.
188 * This may result from the user unloading the PF driver
189 * while VFs were assigned to guest VMs or because the VFs
190 * have been created via the new PCI SR-IOV sysfs interface.
191 */
192 if (pre_existing_vfs) {
193 adapter->num_vfs = pre_existing_vfs;
194 dev_warn(&adapter->pdev->dev,
195 "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
196 } else {
197 int err;
198 /*
199 * The 82599 supports up to 64 VFs per physical function
200 * but this implementation limits allocation to 63 so that
201 * basic networking resources are still available to the
Joe Perchesdbedd442015-03-06 20:49:12 -0800202 * physical function. If the user requests greater than
Greg Rose66dcfd72012-12-11 08:26:38 +0000203 * 63 VFs then it is an error - reset to default of zero.
204 */
ethan.zhaodcc23e32014-01-16 19:41:04 -0800205 adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
Greg Rose66dcfd72012-12-11 08:26:38 +0000206
207 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
208 if (err) {
209 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
210 adapter->num_vfs = 0;
211 return;
212 }
213 }
214
Mark Rustad988d1302015-10-30 15:29:34 -0700215 if (!__ixgbe_enable_sriov(adapter)) {
216 ixgbe_get_vfs(adapter);
Greg Rose66dcfd72012-12-11 08:26:38 +0000217 return;
Mark Rustad988d1302015-10-30 15:29:34 -0700218 }
Greg Rose66dcfd72012-12-11 08:26:38 +0000219
220 /* If we have gotten to this point then there is no memory available
221 * to manage the VF devices - print message and bail.
222 */
Greg Rosec6bda302011-08-24 02:37:55 +0000223 e_err(probe, "Unable to allocate memory for VF Data Storage - "
224 "SRIOV disabled\n");
Alexander Duyck99d74482012-05-09 08:09:25 +0000225 ixgbe_disable_sriov(adapter);
Greg Rosec6bda302011-08-24 02:37:55 +0000226}
Greg Rosec6bda302011-08-24 02:37:55 +0000227
Alexander Duyck92971272012-05-23 02:58:40 +0000228#endif /* #ifdef CONFIG_PCI_IOV */
Greg Roseda36b642012-12-11 08:26:43 +0000229int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
Greg Rosec6bda302011-08-24 02:37:55 +0000230{
Mark Rustad988d1302015-10-30 15:29:34 -0700231 unsigned int num_vfs = adapter->num_vfs, vf;
Greg Rosec6bda302011-08-24 02:37:55 +0000232 struct ixgbe_hw *hw = &adapter->hw;
Greg Rosec6bda302011-08-24 02:37:55 +0000233 u32 gpie;
234 u32 vmdctl;
Greg Roseda36b642012-12-11 08:26:43 +0000235 int rss;
Greg Rosec6bda302011-08-24 02:37:55 +0000236
Alexander Duyckd773d132012-05-05 05:32:26 +0000237 /* set num VFs to 0 to prevent access to vfinfo */
238 adapter->num_vfs = 0;
239
Mark Rustad988d1302015-10-30 15:29:34 -0700240 /* put the reference to all of the vf devices */
241 for (vf = 0; vf < num_vfs; ++vf) {
242 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
243
244 if (!vfdev)
245 continue;
246 adapter->vfinfo[vf].vfdev = NULL;
247 pci_dev_put(vfdev);
248 }
249
Alexander Duyckd773d132012-05-05 05:32:26 +0000250 /* free VF control structures */
251 kfree(adapter->vfinfo);
252 adapter->vfinfo = NULL;
253
254 /* free macvlan list */
255 kfree(adapter->mv_list);
256 adapter->mv_list = NULL;
257
Alexander Duyck99d74482012-05-09 08:09:25 +0000258 /* if SR-IOV is already disabled then there is nothing to do */
259 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
Greg Roseda36b642012-12-11 08:26:43 +0000260 return 0;
Alexander Duyck99d74482012-05-09 08:09:25 +0000261
Greg Rosec6bda302011-08-24 02:37:55 +0000262#ifdef CONFIG_PCI_IOV
Alexander Duyck92971272012-05-23 02:58:40 +0000263 /*
264 * If our VFs are assigned we cannot shut down SR-IOV
265 * without causing issues, so just leave the hardware
266 * available but disabled
267 */
Alexander Duycke507d0c2013-03-26 00:03:21 +0000268 if (pci_vfs_assigned(adapter->pdev)) {
Alexander Duyck92971272012-05-23 02:58:40 +0000269 e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
Greg Roseda36b642012-12-11 08:26:43 +0000270 return -EPERM;
David S. Millerd47e12d2012-07-22 12:36:41 -0700271 }
Greg Rosec6bda302011-08-24 02:37:55 +0000272 /* disable iov and allow time for transactions to clear */
273 pci_disable_sriov(adapter->pdev);
274#endif
275
276 /* turn off device IOV mode */
Alexander Duyck73079ea2012-07-14 06:48:49 +0000277 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
Greg Rosec6bda302011-08-24 02:37:55 +0000278 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
279 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
280 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
281
282 /* set default pool back to 0 */
283 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
284 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
285 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
286 IXGBE_WRITE_FLUSH(hw);
287
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +0000288 /* Disable VMDq flag so device will be set in VM mode */
John Fastabend2a47fa42013-11-06 09:54:52 -0800289 if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +0000290 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
John Fastabend2a47fa42013-11-06 09:54:52 -0800291 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
Don Skidmore0f9b2322014-11-18 09:35:08 +0000292 rss = min_t(int, ixgbe_max_rss_indices(adapter),
293 num_online_cpus());
John Fastabend2a47fa42013-11-06 09:54:52 -0800294 } else {
295 rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
296 }
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +0000297
John Fastabend2a47fa42013-11-06 09:54:52 -0800298 adapter->ring_feature[RING_F_VMDQ].offset = 0;
Greg Roseda36b642012-12-11 08:26:43 +0000299 adapter->ring_feature[RING_F_RSS].limit = rss;
300
Greg Rosec6bda302011-08-24 02:37:55 +0000301 /* take a breather then clean up driver data */
302 msleep(100);
Greg Roseda36b642012-12-11 08:26:43 +0000303 return 0;
304}
305
306static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
307{
308#ifdef CONFIG_PCI_IOV
309 struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
310 int err = 0;
Usha Ketinenib5d8acb2016-12-23 10:08:14 -0800311 u8 num_tc;
Greg Roseda36b642012-12-11 08:26:43 +0000312 int i;
313 int pre_existing_vfs = pci_num_vf(dev);
314
315 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
316 err = ixgbe_disable_sriov(adapter);
317 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
Mark Rustade90dd262014-07-22 06:51:08 +0000318 return num_vfs;
Greg Roseda36b642012-12-11 08:26:43 +0000319
320 if (err)
Mark Rustade90dd262014-07-22 06:51:08 +0000321 return err;
Greg Roseda36b642012-12-11 08:26:43 +0000322
Jacob Kelleraac2f1b2014-08-21 06:17:59 +0000323 /* While the SR-IOV capability structure reports total VFs to be 64,
Usha Ketinenib5d8acb2016-12-23 10:08:14 -0800324 * we limit the actual number allocated as below based on two factors.
325 * Num_TCs MAX_VFs
326 * 1 63
327 * <=4 31
328 * >4 15
Jacob Kelleraac2f1b2014-08-21 06:17:59 +0000329 * First, we reserve some transmit/receive resources for the PF.
330 * Second, VMDQ also uses the same pools that SR-IOV does. We need to
331 * account for this, so that we don't accidentally allocate more VFs
332 * than we have available pools. The PCI bus driver already checks for
333 * other values out of range.
Greg Roseda36b642012-12-11 08:26:43 +0000334 */
Usha Ketinenib5d8acb2016-12-23 10:08:14 -0800335 num_tc = netdev_get_num_tc(adapter->netdev);
Greg Roseda36b642012-12-11 08:26:43 +0000336
Usha Ketinenib5d8acb2016-12-23 10:08:14 -0800337 if (num_tc > 4) {
338 if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
339 e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
340 return -EPERM;
341 }
342 } else if ((num_tc > 1) && (num_tc <= 4)) {
343 if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
344 e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
345 return -EPERM;
346 }
347 } else {
348 if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
349 e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
350 return -EPERM;
351 }
352 }
Greg Roseda36b642012-12-11 08:26:43 +0000353 adapter->num_vfs = num_vfs;
354
355 err = __ixgbe_enable_sriov(adapter);
356 if (err)
Mark Rustade90dd262014-07-22 06:51:08 +0000357 return err;
Greg Roseda36b642012-12-11 08:26:43 +0000358
359 for (i = 0; i < adapter->num_vfs; i++)
360 ixgbe_vf_configuration(dev, (i | 0x10000000));
361
Emil Tantilov0c339bf2016-09-09 12:59:10 -0700362 /* reset before enabling SRIOV to avoid mailbox issues */
363 ixgbe_sriov_reinit(adapter);
364
Greg Roseda36b642012-12-11 08:26:43 +0000365 err = pci_enable_sriov(dev, num_vfs);
366 if (err) {
367 e_dev_warn("Failed to enable PCI sriov: %d\n", err);
Mark Rustade90dd262014-07-22 06:51:08 +0000368 return err;
Greg Roseda36b642012-12-11 08:26:43 +0000369 }
Mark Rustad988d1302015-10-30 15:29:34 -0700370 ixgbe_get_vfs(adapter);
Greg Roseda36b642012-12-11 08:26:43 +0000371
Greg Roseda36b642012-12-11 08:26:43 +0000372 return num_vfs;
Mark Rustade90dd262014-07-22 06:51:08 +0000373#else
Greg Roseda36b642012-12-11 08:26:43 +0000374 return 0;
Mark Rustade90dd262014-07-22 06:51:08 +0000375#endif
Greg Roseda36b642012-12-11 08:26:43 +0000376}
377
378static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
379{
380 struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
381 int err;
Don Skidmore8f48f5b2013-11-22 04:27:23 +0000382#ifdef CONFIG_PCI_IOV
Greg Roseda36b642012-12-11 08:26:43 +0000383 u32 current_flags = adapter->flags;
Don Skidmore8f48f5b2013-11-22 04:27:23 +0000384#endif
Greg Roseda36b642012-12-11 08:26:43 +0000385
386 err = ixgbe_disable_sriov(adapter);
387
388 /* Only reinit if no error and state changed */
Greg Roseda36b642012-12-11 08:26:43 +0000389#ifdef CONFIG_PCI_IOV
John Fastabend2a47fa42013-11-06 09:54:52 -0800390 if (!err && current_flags != adapter->flags)
Greg Roseda36b642012-12-11 08:26:43 +0000391 ixgbe_sriov_reinit(adapter);
392#endif
Greg Roseda36b642012-12-11 08:26:43 +0000393
394 return err;
395}
396
397int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
398{
399 if (num_vfs == 0)
400 return ixgbe_pci_sriov_disable(dev);
401 else
402 return ixgbe_pci_sriov_enable(dev, num_vfs);
Greg Rosec6bda302011-08-24 02:37:55 +0000403}
404
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000405static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
Alexander Duyck58a02be2012-07-20 08:09:17 +0000406 u32 *msgbuf, u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000407{
Alexander Duyck58a02be2012-07-20 08:09:17 +0000408 int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
409 >> IXGBE_VT_MSGINFO_SHIFT;
410 u16 *hash_list = (u16 *)&msgbuf[1];
Greg Rose17367272010-01-09 02:25:48 +0000411 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
Greg Rose8a07a222010-05-05 19:57:30 +0000412 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose17367272010-01-09 02:25:48 +0000413 int i;
Greg Rose8a07a222010-05-05 19:57:30 +0000414 u32 vector_bit;
415 u32 vector_reg;
416 u32 mta_reg;
Jacob Kellerb335e752014-03-25 07:45:27 +0000417 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
Greg Rose17367272010-01-09 02:25:48 +0000418
419 /* only so many hash values supported */
420 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
421
422 /*
423 * salt away the number of multi cast addresses assigned
424 * to this VF for later use to restore when the PF multi cast
425 * list changes
426 */
427 vfinfo->num_vf_mc_hashes = entries;
428
429 /*
430 * VFs are limited to using the MTA hash table for their multicast
431 * addresses
432 */
433 for (i = 0; i < entries; i++) {
Joe Perchese81a1ba2010-11-14 17:04:33 +0000434 vfinfo->vf_mc_hashes[i] = hash_list[i];
Greg Rose17367272010-01-09 02:25:48 +0000435 }
436
Greg Rose8a07a222010-05-05 19:57:30 +0000437 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
438 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
439 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
440 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700441 mta_reg |= BIT(vector_bit);
Greg Rose8a07a222010-05-05 19:57:30 +0000442 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
443 }
Jacob Kellerb335e752014-03-25 07:45:27 +0000444 vmolr |= IXGBE_VMOLR_ROMPE;
445 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
Greg Rose17367272010-01-09 02:25:48 +0000446
447 return 0;
448}
449
Jacob Kellerb335e752014-03-25 07:45:27 +0000450#ifdef CONFIG_PCI_IOV
Greg Rose17367272010-01-09 02:25:48 +0000451void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
452{
453 struct ixgbe_hw *hw = &adapter->hw;
454 struct vf_data_storage *vfinfo;
455 int i, j;
456 u32 vector_bit;
457 u32 vector_reg;
458 u32 mta_reg;
459
460 for (i = 0; i < adapter->num_vfs; i++) {
Jacob Kellerb335e752014-03-25 07:45:27 +0000461 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
Greg Rose17367272010-01-09 02:25:48 +0000462 vfinfo = &adapter->vfinfo[i];
463 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
464 hw->addr_ctrl.mta_in_use++;
465 vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
466 vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
467 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700468 mta_reg |= BIT(vector_bit);
Greg Rose17367272010-01-09 02:25:48 +0000469 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
470 }
Jacob Kellerb335e752014-03-25 07:45:27 +0000471
472 if (vfinfo->num_vf_mc_hashes)
473 vmolr |= IXGBE_VMOLR_ROMPE;
474 else
475 vmolr &= ~IXGBE_VMOLR_ROMPE;
476 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
Greg Rose17367272010-01-09 02:25:48 +0000477 }
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000478
479 /* Restore any VF macvlans */
Jacob Keller5d7daa32014-03-29 06:51:25 +0000480 ixgbe_full_sync_mac_table(adapter);
Greg Rose17367272010-01-09 02:25:48 +0000481}
Jacob Kellerb335e752014-03-25 07:45:27 +0000482#endif
Greg Rose17367272010-01-09 02:25:48 +0000483
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000484static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
485 u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000486{
Alexander Duyckb6488b62015-11-02 17:10:01 -0800487 struct ixgbe_hw *hw = &adapter->hw;
488 int err;
489
Alexander Duyckb6488b62015-11-02 17:10:01 -0800490 /* If VLAN overlaps with one the PF is currently monitoring make
491 * sure that we are able to allocate a VLVF entry. This may be
492 * redundant but it guarantees PF will maintain visibility to
493 * the VLAN.
494 */
495 if (add && test_bit(vid, adapter->active_vlans)) {
496 err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false);
497 if (err)
498 return err;
499 }
500
501 err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false);
502
Alexander Duycke1d0a2a2015-11-02 17:10:19 -0800503 if (add && !err)
504 return err;
505
506 /* If we failed to add the VF VLAN or we are removing the VF VLAN
507 * we may need to drop the PF pool bit in order to allow us to free
508 * up the VLVF resources.
509 */
510 if (test_bit(vid, adapter->active_vlans) ||
511 (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
512 ixgbe_update_pf_promisc_vlvf(adapter, vid);
513
Alexander Duyckb6488b62015-11-02 17:10:01 -0800514 return err;
Greg Rose17367272010-01-09 02:25:48 +0000515}
516
Alexander Duyck872844d2012-08-15 02:10:43 +0000517static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
Greg Rosee9f98072011-01-26 01:06:07 +0000518{
519 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck872844d2012-08-15 02:10:43 +0000520 int max_frame = msgbuf[1];
Greg Rosee9f98072011-01-26 01:06:07 +0000521 u32 max_frs;
Greg Rosee9f98072011-01-26 01:06:07 +0000522
Alexander Duyck872844d2012-08-15 02:10:43 +0000523 /*
524 * For 82599EB we have to keep all PFs and VFs operating with
525 * the same max_frame value in order to avoid sending an oversize
526 * frame to a VF. In order to guarantee this is handled correctly
527 * for all cases we have several special exceptions to take into
528 * account before we can enable the VF for receive
529 */
530 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
531 struct net_device *dev = adapter->netdev;
532 int pf_max_frame = dev->mtu + ETH_HLEN;
533 u32 reg_offset, vf_shift, vfre;
534 s32 err = 0;
Greg Rosee9f98072011-01-26 01:06:07 +0000535
Alexander Duyck872844d2012-08-15 02:10:43 +0000536#ifdef CONFIG_FCOE
537 if (dev->features & NETIF_F_FCOE_MTU)
538 pf_max_frame = max_t(int, pf_max_frame,
539 IXGBE_FCOE_JUMBO_FRAME_SIZE);
540
541#endif /* CONFIG_FCOE */
Alexander Duyckbffb3bc2012-07-20 08:09:37 +0000542 switch (adapter->vfinfo[vf].vf_api) {
543 case ixgbe_mbox_api_11:
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +0300544 case ixgbe_mbox_api_12:
Don Skidmore07eea572016-12-15 21:18:32 -0500545 case ixgbe_mbox_api_13:
Alexander Duyckbffb3bc2012-07-20 08:09:37 +0000546 /*
547 * Version 1.1 supports jumbo frames on VFs if PF has
548 * jumbo frames enabled which means legacy VFs are
549 * disabled
550 */
551 if (pf_max_frame > ETH_FRAME_LEN)
552 break;
553 default:
554 /*
555 * If the PF or VF are running w/ jumbo frames enabled
556 * we need to shut down the VF Rx path as we cannot
557 * support jumbo frames on legacy VFs
558 */
559 if ((pf_max_frame > ETH_FRAME_LEN) ||
560 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
561 err = -EINVAL;
562 break;
563 }
Alexander Duyck872844d2012-08-15 02:10:43 +0000564
565 /* determine VF receive enable location */
566 vf_shift = vf % 32;
567 reg_offset = vf / 32;
568
569 /* enable or disable receive depending on error */
570 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
571 if (err)
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700572 vfre &= ~BIT(vf_shift);
Alexander Duyck872844d2012-08-15 02:10:43 +0000573 else
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700574 vfre |= BIT(vf_shift);
Alexander Duyck872844d2012-08-15 02:10:43 +0000575 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
576
577 if (err) {
578 e_err(drv, "VF max_frame %d out of range\n", max_frame);
579 return err;
580 }
Greg Rosee9f98072011-01-26 01:06:07 +0000581 }
582
Alexander Duyck872844d2012-08-15 02:10:43 +0000583 /* MTU < 68 is an error and causes problems on some kernels */
584 if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
585 e_err(drv, "VF max_frame %d out of range\n", max_frame);
586 return -EINVAL;
587 }
588
589 /* pull current max frame size from hardware */
590 max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
591 max_frs &= IXGBE_MHADD_MFS_MASK;
592 max_frs >>= IXGBE_MHADD_MFS_SHIFT;
593
594 if (max_frs < max_frame) {
595 max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
Greg Rosee9f98072011-01-26 01:06:07 +0000596 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
597 }
598
Alexander Duyck872844d2012-08-15 02:10:43 +0000599 e_info(hw, "VF requests change max MTU to %d\n", max_frame);
600
601 return 0;
Greg Rosee9f98072011-01-26 01:06:07 +0000602}
603
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000604static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
Greg Rose17367272010-01-09 02:25:48 +0000605{
606 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
Jacob Kellerb335e752014-03-25 07:45:27 +0000607 vmolr |= IXGBE_VMOLR_BAM;
Greg Rosef0412772010-05-04 22:11:46 +0000608 if (aupe)
609 vmolr |= IXGBE_VMOLR_AUPE;
610 else
611 vmolr &= ~IXGBE_VMOLR_AUPE;
Greg Rose17367272010-01-09 02:25:48 +0000612 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
613}
614
Alexander Duyck107d3012012-10-02 00:17:03 +0000615static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
Greg Rose7f016482010-05-04 22:12:06 +0000616{
617 struct ixgbe_hw *hw = &adapter->hw;
618
Alexander Duyck107d3012012-10-02 00:17:03 +0000619 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
Greg Rose7f016482010-05-04 22:12:06 +0000620}
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800621
622static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
623{
624 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800625 u32 vlvfb_mask, pool_mask, i;
626
627 /* create mask for VF and other pools */
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700628 pool_mask = ~BIT(VMDQ_P(0) % 32);
629 vlvfb_mask = BIT(vf % 32);
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800630
631 /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
632 for (i = IXGBE_VLVF_ENTRIES; i--;) {
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800633 u32 bits[2], vlvfb, vid, vfta, vlvf;
Alexander Duyckab3a3b72015-12-23 09:00:35 -0800634 u32 word = i * 2 + vf / 32;
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800635 u32 mask;
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800636
Alexander Duyckab3a3b72015-12-23 09:00:35 -0800637 vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800638
639 /* if our bit isn't set we can skip it */
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800640 if (!(vlvfb & vlvfb_mask))
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800641 continue;
642
643 /* clear our bit from vlvfb */
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800644 vlvfb ^= vlvfb_mask;
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800645
646 /* create 64b mask to chedk to see if we should clear VLVF */
647 bits[word % 2] = vlvfb;
Alexander Duyckab3a3b72015-12-23 09:00:35 -0800648 bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800649
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800650 /* if other pools are present, just remove ourselves */
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800651 if (bits[(VMDQ_P(0) / 32) ^ 1] ||
652 (bits[VMDQ_P(0) / 32] & pool_mask))
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800653 goto update_vlvfb;
654
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800655 /* if PF is present, leave VFTA */
656 if (bits[0] || bits[1])
657 goto update_vlvf;
658
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800659 /* if we cannot determine VLAN just remove ourselves */
660 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
661 if (!vlvf)
662 goto update_vlvfb;
663
664 vid = vlvf & VLAN_VID_MASK;
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700665 mask = BIT(vid % 32);
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800666
667 /* clear bit from VFTA */
668 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
669 if (vfta & mask)
670 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask);
671update_vlvf:
672 /* clear POOL selection enable */
673 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800674
675 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
676 vlvfb = 0;
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800677update_vlvfb:
678 /* clear pool bits */
679 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
680 }
681}
682
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000683static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000684{
685 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck107d3012012-10-02 00:17:03 +0000686 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
Alexander Duyck107d3012012-10-02 00:17:03 +0000687 u8 num_tcs = netdev_get_num_tc(adapter->netdev);
688
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800689 /* remove VLAN filters beloning to this VF */
690 ixgbe_clear_vf_vlans(adapter, vf);
691
692 /* add back PF assigned VLAN or VLAN 0 */
Alexander Duyck107d3012012-10-02 00:17:03 +0000693 ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
Greg Rose17367272010-01-09 02:25:48 +0000694
695 /* reset offloads to defaults */
Alexander Duyck107d3012012-10-02 00:17:03 +0000696 ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
697
698 /* set outgoing tags for VFs */
699 if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
700 ixgbe_clear_vmvir(adapter, vf);
Greg Rose7f016482010-05-04 22:12:06 +0000701 } else {
Alexander Duyck107d3012012-10-02 00:17:03 +0000702 if (vfinfo->pf_qos || !num_tcs)
703 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
704 vfinfo->pf_qos, vf);
705 else
706 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
707 adapter->default_up, vf);
708
709 if (vfinfo->spoofchk_enabled)
710 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
Greg Rose7f016482010-05-04 22:12:06 +0000711 }
Greg Rose17367272010-01-09 02:25:48 +0000712
713 /* reset multicast table array for vf */
714 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
715
716 /* Flush and reset the mta with the new values */
717 ixgbe_set_rx_mode(adapter->netdev);
718
Jacob Keller5d7daa32014-03-29 06:51:25 +0000719 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
Alexander Duyck374c65d2012-07-20 08:09:22 +0000720
721 /* reset VF api back to unknown */
722 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
Greg Rose17367272010-01-09 02:25:48 +0000723}
724
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000725static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
726 int vf, unsigned char *mac_addr)
Greg Rose17367272010-01-09 02:25:48 +0000727{
Jacob Keller5d7daa32014-03-29 06:51:25 +0000728 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
Joe Perchesd458cdf2013-10-01 19:04:40 -0700729 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Jacob Keller5d7daa32014-03-29 06:51:25 +0000730 ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
Greg Rose17367272010-01-09 02:25:48 +0000731
732 return 0;
733}
734
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000735static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
736 int vf, int index, unsigned char *mac_addr)
737{
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000738 struct list_head *pos;
739 struct vf_macvlans *entry;
740
741 if (index <= 1) {
742 list_for_each(pos, &adapter->vf_mvs.l) {
743 entry = list_entry(pos, struct vf_macvlans, l);
744 if (entry->vf == vf) {
745 entry->vf = -1;
746 entry->free = true;
747 entry->is_macvlan = false;
Jacob Keller5d7daa32014-03-29 06:51:25 +0000748 ixgbe_del_mac_filter(adapter,
749 entry->vf_macvlan, vf);
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000750 }
751 }
752 }
753
754 /*
755 * If index was zero then we were asked to clear the uc list
756 * for the VF. We're done.
757 */
758 if (!index)
759 return 0;
760
761 entry = NULL;
762
763 list_for_each(pos, &adapter->vf_mvs.l) {
764 entry = list_entry(pos, struct vf_macvlans, l);
765 if (entry->free)
766 break;
767 }
768
769 /*
770 * If we traversed the entire list and didn't find a free entry
771 * then we're out of space on the RAR table. Also entry may
772 * be NULL because the original memory allocation for the list
773 * failed, which is not fatal but does mean we can't support
774 * VF requests for MACVLAN because we couldn't allocate
775 * memory for the list management required.
776 */
777 if (!entry || !entry->free)
778 return -ENOSPC;
779
780 entry->free = false;
781 entry->is_macvlan = true;
782 entry->vf = vf;
783 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
784
Jacob Keller5d7daa32014-03-29 06:51:25 +0000785 ixgbe_add_mac_filter(adapter, mac_addr, vf);
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000786
787 return 0;
788}
789
Greg Rose17367272010-01-09 02:25:48 +0000790int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
791{
Alexander Duyckc60fbb02010-11-16 19:26:54 -0800792 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
Greg Rose17367272010-01-09 02:25:48 +0000793 unsigned int vfn = (event_mask & 0x3f);
794
795 bool enable = ((event_mask & 0x10000000U) != 0);
796
Joe Perchesd458cdf2013-10-01 19:04:40 -0700797 if (enable)
798 eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
Greg Rose17367272010-01-09 02:25:48 +0000799
800 return 0;
801}
802
Don Skidmore8d697e72014-11-05 04:52:09 +0000803static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
804 u32 qde)
805{
806 struct ixgbe_hw *hw = &adapter->hw;
807 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
808 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
809 int i;
810
811 for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
812 u32 reg;
813
814 /* flush previous write */
815 IXGBE_WRITE_FLUSH(hw);
816
817 /* indicate to hardware that we want to set drop enable */
818 reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
819 reg |= i << IXGBE_QDE_IDX_SHIFT;
820 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
821 }
822}
823
Alexander Duyck58a02be2012-07-20 08:09:17 +0000824static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000825{
Alexander Duyck87397372014-01-15 17:38:40 -0800826 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
Greg Rose17367272010-01-09 02:25:48 +0000827 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck58a02be2012-07-20 08:09:17 +0000828 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
Emil Tantilovb08e1ed2013-07-26 07:34:54 +0000829 u32 reg, reg_offset, vf_shift;
830 u32 msgbuf[4] = {0, 0, 0, 0};
Alexander Duyck58a02be2012-07-20 08:09:17 +0000831 u8 *addr = (u8 *)(&msgbuf[1]);
Alexander Duyck87397372014-01-15 17:38:40 -0800832 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
833 int i;
Alexander Duyck58a02be2012-07-20 08:09:17 +0000834
835 e_info(probe, "VF Reset msg received from vf %d\n", vf);
836
837 /* reset the filters for the device */
838 ixgbe_vf_reset_event(adapter, vf);
839
840 /* set vf mac address */
Greg Rose35055922013-02-15 05:20:09 +0000841 if (!is_zero_ether_addr(vf_mac))
842 ixgbe_set_vf_mac(adapter, vf, vf_mac);
Greg Rose17367272010-01-09 02:25:48 +0000843
844 vf_shift = vf % 32;
845 reg_offset = vf / 32;
846
Alexander Duyck58a02be2012-07-20 08:09:17 +0000847 /* enable transmit for vf */
Greg Rose17367272010-01-09 02:25:48 +0000848 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700849 reg |= BIT(vf_shift);
Greg Rose17367272010-01-09 02:25:48 +0000850 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
851
Alexander Duyck87397372014-01-15 17:38:40 -0800852 /* force drop enable for all VF Rx queues */
Don Skidmore8d697e72014-11-05 04:52:09 +0000853 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
Alexander Duyck87397372014-01-15 17:38:40 -0800854
Alexander Duyck58a02be2012-07-20 08:09:17 +0000855 /* enable receive for vf */
Greg Rose17367272010-01-09 02:25:48 +0000856 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700857 reg |= BIT(vf_shift);
Alexander Duyck872844d2012-08-15 02:10:43 +0000858 /*
859 * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
860 * For more info take a look at ixgbe_set_vf_lpe
861 */
862 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
863 struct net_device *dev = adapter->netdev;
864 int pf_max_frame = dev->mtu + ETH_HLEN;
865
866#ifdef CONFIG_FCOE
867 if (dev->features & NETIF_F_FCOE_MTU)
868 pf_max_frame = max_t(int, pf_max_frame,
869 IXGBE_FCOE_JUMBO_FRAME_SIZE);
870
871#endif /* CONFIG_FCOE */
872 if (pf_max_frame > ETH_FRAME_LEN)
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700873 reg &= ~BIT(vf_shift);
Alexander Duyck872844d2012-08-15 02:10:43 +0000874 }
Greg Rose17367272010-01-09 02:25:48 +0000875 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
876
Alexander Duyck58a02be2012-07-20 08:09:17 +0000877 /* enable VF mailbox for further messages */
878 adapter->vfinfo[vf].clear_to_send = true;
879
Greg Rosea985b6c32010-11-18 03:02:52 +0000880 /* Enable counting of spoofed packets in the SSVPC register */
881 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700882 reg |= BIT(vf_shift);
Greg Rosea985b6c32010-11-18 03:02:52 +0000883 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
884
Alexander Duyckdbf231a2014-01-15 17:38:41 -0800885 /*
886 * Reset the VFs TDWBAL and TDWBAH registers
887 * which are not cleared by an FLR
888 */
889 for (i = 0; i < q_per_pool; i++) {
890 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
891 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
892 }
893
Alexander Duyck58a02be2012-07-20 08:09:17 +0000894 /* reply to reset with ack and vf mac address */
Greg Rose35055922013-02-15 05:20:09 +0000895 msgbuf[0] = IXGBE_VF_RESET;
896 if (!is_zero_ether_addr(vf_mac)) {
897 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
898 memcpy(addr, vf_mac, ETH_ALEN);
899 } else {
900 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
901 dev_warn(&adapter->pdev->dev,
902 "VF %d has no MAC address assigned, you may have to assign one manually\n",
903 vf);
904 }
Alexander Duyck58a02be2012-07-20 08:09:17 +0000905
906 /*
907 * Piggyback the multicast filter type so VF can compute the
908 * correct vectors
909 */
910 msgbuf[3] = hw->mac.mc_filter_type;
911 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
912
913 return 0;
914}
915
916static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
917 u32 *msgbuf, u32 vf)
918{
919 u8 *new_mac = ((u8 *)(&msgbuf[1]));
920
921 if (!is_valid_ether_addr(new_mac)) {
922 e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
923 return -1;
924 }
925
chas williams1d96cf92016-01-05 17:30:39 -0500926 if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
dingtianhong4012dda2013-12-30 15:40:50 +0800927 !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
Alexander Duyck58a02be2012-07-20 08:09:17 +0000928 e_warn(drv,
929 "VF %d attempted to override administratively set MAC address\n"
930 "Reload the VF driver to resume operations\n",
931 vf);
932 return -1;
933 }
934
Greg Rose3970c322012-09-25 02:25:30 +0000935 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
Alexander Duyck58a02be2012-07-20 08:09:17 +0000936}
937
938static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
939 u32 *msgbuf, u32 vf)
940{
Alexander Duycke1d0a2a2015-11-02 17:10:19 -0800941 u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
942 u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
Alexander Duyck107d3012012-10-02 00:17:03 +0000943 u8 tcs = netdev_get_num_tc(adapter->netdev);
Alexander Duyck58a02be2012-07-20 08:09:17 +0000944
Alexander Duyck107d3012012-10-02 00:17:03 +0000945 if (adapter->vfinfo[vf].pf_vlan || tcs) {
Alexander Duyck58a02be2012-07-20 08:09:17 +0000946 e_warn(drv,
947 "VF %d attempted to override administratively set VLAN configuration\n"
948 "Reload the VF driver to resume operations\n",
949 vf);
950 return -1;
951 }
952
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800953 /* VLAN 0 is a special case, don't allow it to be removed */
954 if (!vid && !add)
955 return 0;
956
Emil Tantilovd3dec7c2016-03-18 16:11:19 -0700957 return ixgbe_set_vf_vlan(adapter, add, vid, vf);
Alexander Duyck58a02be2012-07-20 08:09:17 +0000958}
959
960static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
961 u32 *msgbuf, u32 vf)
962{
963 u8 *new_mac = ((u8 *)(&msgbuf[1]));
964 int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
965 IXGBE_VT_MSGINFO_SHIFT;
966 int err;
967
Ken Coxa9d2d532016-11-15 13:00:37 -0600968 if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
969 index > 0) {
Alexander Duyck58a02be2012-07-20 08:09:17 +0000970 e_warn(drv,
971 "VF %d requested MACVLAN filter but is administratively denied\n",
972 vf);
973 return -1;
974 }
975
976 /* An non-zero index indicates the VF is setting a filter */
977 if (index) {
978 if (!is_valid_ether_addr(new_mac)) {
979 e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
980 return -1;
981 }
982
983 /*
984 * If the VF is allowed to set MAC filters then turn off
985 * anti-spoofing to avoid false positives.
986 */
Emil Tantilov77f192a2016-03-18 16:11:14 -0700987 if (adapter->vfinfo[vf].spoofchk_enabled) {
988 struct ixgbe_hw *hw = &adapter->hw;
989
990 hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
Emil Tantilov581e0c72016-06-01 18:59:44 -0700991 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
Emil Tantilov77f192a2016-03-18 16:11:14 -0700992 }
Alexander Duyck58a02be2012-07-20 08:09:17 +0000993 }
994
995 err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
996 if (err == -ENOSPC)
997 e_warn(drv,
998 "VF %d has requested a MACVLAN filter but there is no space for it\n",
999 vf);
Greg Rosea3013402012-10-30 00:40:02 +00001000
1001 return err < 0;
Greg Rose17367272010-01-09 02:25:48 +00001002}
1003
Alexander Duyck374c65d2012-07-20 08:09:22 +00001004static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
1005 u32 *msgbuf, u32 vf)
1006{
1007 int api = msgbuf[1];
1008
1009 switch (api) {
1010 case ixgbe_mbox_api_10:
Alexander Duyckbffb3bc2012-07-20 08:09:37 +00001011 case ixgbe_mbox_api_11:
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +03001012 case ixgbe_mbox_api_12:
Don Skidmore07eea572016-12-15 21:18:32 -05001013 case ixgbe_mbox_api_13:
Alexander Duyck374c65d2012-07-20 08:09:22 +00001014 adapter->vfinfo[vf].vf_api = api;
1015 return 0;
1016 default:
1017 break;
1018 }
1019
1020 e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
1021
1022 return -1;
1023}
1024
Alexander Duyckf591cd92012-07-20 08:09:32 +00001025static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
1026 u32 *msgbuf, u32 vf)
1027{
1028 struct net_device *dev = adapter->netdev;
1029 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
1030 unsigned int default_tc = 0;
1031 u8 num_tcs = netdev_get_num_tc(dev);
1032
1033 /* verify the PF is supporting the correct APIs */
1034 switch (adapter->vfinfo[vf].vf_api) {
1035 case ixgbe_mbox_api_20:
1036 case ixgbe_mbox_api_11:
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +03001037 case ixgbe_mbox_api_12:
Don Skidmore07eea572016-12-15 21:18:32 -05001038 case ixgbe_mbox_api_13:
Alexander Duyckf591cd92012-07-20 08:09:32 +00001039 break;
1040 default:
1041 return -1;
1042 }
1043
1044 /* only allow 1 Tx queue for bandwidth limiting */
1045 msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1046 msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1047
1048 /* if TCs > 1 determine which TC belongs to default user priority */
1049 if (num_tcs > 1)
1050 default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
1051
1052 /* notify VF of need for VLAN tag stripping, and correct queue */
1053 if (num_tcs)
1054 msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
1055 else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
1056 msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
1057 else
1058 msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
1059
1060 /* notify VF of default queue */
1061 msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
1062
1063 return 0;
1064}
1065
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +03001066static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
1067{
1068 u32 i, j;
1069 u32 *out_buf = &msgbuf[1];
1070 const u8 *reta = adapter->rss_indir_tbl;
1071 u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter);
1072
1073 /* Check if operation is permitted */
1074 if (!adapter->vfinfo[vf].rss_query_enabled)
1075 return -EPERM;
1076
1077 /* verify the PF is supporting the correct API */
Don Skidmore07eea572016-12-15 21:18:32 -05001078 switch (adapter->vfinfo[vf].vf_api) {
1079 case ixgbe_mbox_api_13:
1080 case ixgbe_mbox_api_12:
1081 break;
1082 default:
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +03001083 return -EOPNOTSUPP;
Don Skidmore07eea572016-12-15 21:18:32 -05001084 }
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +03001085
1086 /* This mailbox command is supported (required) only for 82599 and x540
1087 * VFs which support up to 4 RSS queues. Therefore we will compress the
1088 * RETA by saving only 2 bits from each entry. This way we will be able
1089 * to transfer the whole RETA in a single mailbox operation.
1090 */
1091 for (i = 0; i < reta_size / 16; i++) {
1092 out_buf[i] = 0;
1093 for (j = 0; j < 16; j++)
1094 out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j);
1095 }
1096
1097 return 0;
1098}
1099
Vlad Zolotarov3c0841a2015-03-30 21:35:27 +03001100static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
1101 u32 *msgbuf, u32 vf)
1102{
1103 u32 *rss_key = &msgbuf[1];
1104
1105 /* Check if the operation is permitted */
1106 if (!adapter->vfinfo[vf].rss_query_enabled)
1107 return -EPERM;
1108
1109 /* verify the PF is supporting the correct API */
Don Skidmore07eea572016-12-15 21:18:32 -05001110 switch (adapter->vfinfo[vf].vf_api) {
1111 case ixgbe_mbox_api_13:
1112 case ixgbe_mbox_api_12:
1113 break;
1114 default:
Vlad Zolotarov3c0841a2015-03-30 21:35:27 +03001115 return -EOPNOTSUPP;
Don Skidmore07eea572016-12-15 21:18:32 -05001116 }
Vlad Zolotarov3c0841a2015-03-30 21:35:27 +03001117
1118 memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key));
1119
1120 return 0;
1121}
1122
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001123static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
1124 u32 *msgbuf, u32 vf)
1125{
1126 struct ixgbe_hw *hw = &adapter->hw;
1127 int xcast_mode = msgbuf[1];
Don Skidmore07eea572016-12-15 21:18:32 -05001128 u32 vmolr, fctrl, disable, enable;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001129
1130 /* verify the PF is supporting the correct APIs */
1131 switch (adapter->vfinfo[vf].vf_api) {
1132 case ixgbe_mbox_api_12:
Don Skidmore07eea572016-12-15 21:18:32 -05001133 /* promisc introduced in 1.3 version */
1134 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
1135 return -EOPNOTSUPP;
1136 /* Fall threw */
1137 case ixgbe_mbox_api_13:
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001138 break;
1139 default:
1140 return -EOPNOTSUPP;
1141 }
1142
1143 if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
1144 !adapter->vfinfo[vf].trusted) {
1145 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
1146 }
1147
1148 if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
1149 goto out;
1150
1151 switch (xcast_mode) {
1152 case IXGBEVF_XCAST_MODE_NONE:
Don Skidmore07eea572016-12-15 21:18:32 -05001153 disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
1154 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001155 enable = 0;
1156 break;
1157 case IXGBEVF_XCAST_MODE_MULTI:
Don Skidmore07eea572016-12-15 21:18:32 -05001158 disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001159 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
1160 break;
1161 case IXGBEVF_XCAST_MODE_ALLMULTI:
Don Skidmore07eea572016-12-15 21:18:32 -05001162 disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001163 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
1164 break;
Don Skidmore07eea572016-12-15 21:18:32 -05001165 case IXGBEVF_XCAST_MODE_PROMISC:
1166 if (hw->mac.type <= ixgbe_mac_82599EB)
1167 return -EOPNOTSUPP;
1168
1169 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1170 if (!(fctrl & IXGBE_FCTRL_UPE)) {
1171 /* VF promisc requires PF in promisc */
1172 e_warn(drv,
1173 "Enabling VF promisc requires PF in promisc\n");
1174 return -EPERM;
1175 }
1176
1177 disable = 0;
1178 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
1179 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1180 break;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001181 default:
1182 return -EOPNOTSUPP;
1183 }
1184
1185 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
1186 vmolr &= ~disable;
1187 vmolr |= enable;
1188 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
1189
1190 adapter->vfinfo[vf].xcast_mode = xcast_mode;
1191
1192out:
1193 msgbuf[1] = xcast_mode;
1194
1195 return 0;
1196}
1197
Greg Rose17367272010-01-09 02:25:48 +00001198static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1199{
1200 u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
Emil Tantilovc0509992011-05-07 06:49:18 +00001201 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
Greg Rose17367272010-01-09 02:25:48 +00001202 struct ixgbe_hw *hw = &adapter->hw;
1203 s32 retval;
Greg Rose17367272010-01-09 02:25:48 +00001204
1205 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
1206
Alexander Duyckdcaccc82012-03-28 08:03:38 +00001207 if (retval) {
Emil Tantilov849c4542010-06-03 16:53:41 +00001208 pr_err("Error receiving message from VF\n");
Alexander Duyckdcaccc82012-03-28 08:03:38 +00001209 return retval;
1210 }
Greg Rose17367272010-01-09 02:25:48 +00001211
1212 /* this is a message we already processed, do nothing */
1213 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
Mark Rustade90dd262014-07-22 06:51:08 +00001214 return 0;
Greg Rose17367272010-01-09 02:25:48 +00001215
Alexander Duyckdcaccc82012-03-28 08:03:38 +00001216 /* flush the ack before we write any messages back */
1217 IXGBE_WRITE_FLUSH(hw);
1218
Alexander Duyck374c65d2012-07-20 08:09:22 +00001219 if (msgbuf[0] == IXGBE_VF_RESET)
1220 return ixgbe_vf_reset_msg(adapter, vf);
1221
Greg Rose17367272010-01-09 02:25:48 +00001222 /*
1223 * until the vf completes a virtual function reset it should not be
1224 * allowed to start any configuration.
1225 */
Greg Rose17367272010-01-09 02:25:48 +00001226 if (!adapter->vfinfo[vf].clear_to_send) {
1227 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1228 ixgbe_write_mbx(hw, msgbuf, 1, vf);
Mark Rustade90dd262014-07-22 06:51:08 +00001229 return 0;
Greg Rose17367272010-01-09 02:25:48 +00001230 }
1231
1232 switch ((msgbuf[0] & 0xFFFF)) {
1233 case IXGBE_VF_SET_MAC_ADDR:
Alexander Duyck58a02be2012-07-20 08:09:17 +00001234 retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
Greg Rose17367272010-01-09 02:25:48 +00001235 break;
1236 case IXGBE_VF_SET_MULTICAST:
Alexander Duyck58a02be2012-07-20 08:09:17 +00001237 retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
1238 break;
1239 case IXGBE_VF_SET_VLAN:
1240 retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
Greg Rose17367272010-01-09 02:25:48 +00001241 break;
1242 case IXGBE_VF_SET_LPE:
Alexander Duyck872844d2012-08-15 02:10:43 +00001243 retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
Greg Rose17367272010-01-09 02:25:48 +00001244 break;
Greg Rosea1cbb15c2011-05-13 01:33:48 +00001245 case IXGBE_VF_SET_MACVLAN:
Alexander Duyck58a02be2012-07-20 08:09:17 +00001246 retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
Greg Rosea1cbb15c2011-05-13 01:33:48 +00001247 break;
Alexander Duyck374c65d2012-07-20 08:09:22 +00001248 case IXGBE_VF_API_NEGOTIATE:
1249 retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
1250 break;
Alexander Duyckf591cd92012-07-20 08:09:32 +00001251 case IXGBE_VF_GET_QUEUES:
1252 retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
1253 break;
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +03001254 case IXGBE_VF_GET_RETA:
1255 retval = ixgbe_get_vf_reta(adapter, msgbuf, vf);
1256 break;
Vlad Zolotarov3c0841a2015-03-30 21:35:27 +03001257 case IXGBE_VF_GET_RSS_KEY:
1258 retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
1259 break;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001260 case IXGBE_VF_UPDATE_XCAST_MODE:
1261 retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
1262 break;
Greg Rose17367272010-01-09 02:25:48 +00001263 default:
Emil Tantilov396e7992010-07-01 20:05:12 +00001264 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
Greg Rose17367272010-01-09 02:25:48 +00001265 retval = IXGBE_ERR_MBX;
1266 break;
1267 }
1268
1269 /* notify the VF of the results of what it sent us */
1270 if (retval)
1271 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1272 else
1273 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
1274
1275 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
1276
Alexander Duyck374c65d2012-07-20 08:09:22 +00001277 ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
Greg Rose17367272010-01-09 02:25:48 +00001278
1279 return retval;
1280}
1281
1282static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1283{
1284 struct ixgbe_hw *hw = &adapter->hw;
1285 u32 msg = IXGBE_VT_MSGTYPE_NACK;
1286
1287 /* if device isn't clear to send it shouldn't be reading either */
1288 if (!adapter->vfinfo[vf].clear_to_send)
1289 ixgbe_write_mbx(hw, &msg, 1, vf);
1290}
1291
1292void ixgbe_msg_task(struct ixgbe_adapter *adapter)
1293{
1294 struct ixgbe_hw *hw = &adapter->hw;
1295 u32 vf;
1296
1297 for (vf = 0; vf < adapter->num_vfs; vf++) {
1298 /* process any reset requests */
1299 if (!ixgbe_check_for_rst(hw, vf))
1300 ixgbe_vf_reset_event(adapter, vf);
1301
1302 /* process any messages pending */
1303 if (!ixgbe_check_for_msg(hw, vf))
1304 ixgbe_rcv_msg_from_vf(adapter, vf);
1305
1306 /* process any acks */
1307 if (!ixgbe_check_for_ack(hw, vf))
1308 ixgbe_rcv_ack_from_vf(adapter, vf);
1309 }
1310}
1311
Greg Rose767081a2010-01-22 22:46:40 +00001312void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
1313{
1314 struct ixgbe_hw *hw = &adapter->hw;
1315
1316 /* disable transmit and receive for all vfs */
1317 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
1318 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
1319
1320 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
1321 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
1322}
1323
Hiroshi Shimamoto54011e42015-08-28 06:58:33 +00001324static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
1325{
1326 struct ixgbe_hw *hw = &adapter->hw;
1327 u32 ping;
1328
1329 ping = IXGBE_PF_CONTROL_MSG;
1330 if (adapter->vfinfo[vf].clear_to_send)
1331 ping |= IXGBE_VT_MSGTYPE_CTS;
1332 ixgbe_write_mbx(hw, &ping, 1, vf);
1333}
1334
Greg Rose767081a2010-01-22 22:46:40 +00001335void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
1336{
1337 struct ixgbe_hw *hw = &adapter->hw;
1338 u32 ping;
1339 int i;
1340
1341 for (i = 0 ; i < adapter->num_vfs; i++) {
1342 ping = IXGBE_PF_CONTROL_MSG;
1343 if (adapter->vfinfo[i].clear_to_send)
1344 ping |= IXGBE_VT_MSGTYPE_CTS;
1345 ixgbe_write_mbx(hw, &ping, 1, i);
1346 }
1347}
1348
Greg Rose7f016482010-05-04 22:12:06 +00001349int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1350{
1351 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1352 if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
1353 return -EINVAL;
1354 adapter->vfinfo[vf].pf_set_mac = true;
1355 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
1356 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
1357 " change effective.");
1358 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1359 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
1360 " but the PF device is not up.\n");
1361 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
1362 " attempting to use the VF device.\n");
1363 }
1364 return ixgbe_set_vf_mac(adapter, vf, mac);
1365}
1366
Don Skidmore2b509c02014-11-01 01:06:57 +00001367static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
1368 u16 vlan, u8 qos)
1369{
1370 struct ixgbe_hw *hw = &adapter->hw;
Emil Tantilov42ce2c82014-12-10 05:28:51 +00001371 int err;
Don Skidmore2b509c02014-11-01 01:06:57 +00001372
Emil Tantilov42ce2c82014-12-10 05:28:51 +00001373 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
Don Skidmore2b509c02014-11-01 01:06:57 +00001374 if (err)
1375 goto out;
Emil Tantilov42ce2c82014-12-10 05:28:51 +00001376
Alexander Duyck4c7f35f2015-11-02 17:10:32 -08001377 /* Revoke tagless access via VLAN 0 */
1378 ixgbe_set_vf_vlan(adapter, false, 0, vf);
1379
Don Skidmore2b509c02014-11-01 01:06:57 +00001380 ixgbe_set_vmvir(adapter, vlan, qos, vf);
1381 ixgbe_set_vmolr(hw, vf, false);
Don Skidmore9a75a1a2014-11-07 03:53:35 +00001382
1383 /* enable hide vlan on X550 */
1384 if (hw->mac.type >= ixgbe_mac_X550)
1385 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE |
1386 IXGBE_QDE_HIDE_VLAN);
1387
Don Skidmore2b509c02014-11-01 01:06:57 +00001388 adapter->vfinfo[vf].pf_vlan = vlan;
1389 adapter->vfinfo[vf].pf_qos = qos;
1390 dev_info(&adapter->pdev->dev,
1391 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
1392 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1393 dev_warn(&adapter->pdev->dev,
1394 "The VF VLAN has been set, but the PF device is not up.\n");
1395 dev_warn(&adapter->pdev->dev,
1396 "Bring the PF device up before attempting to use the VF device.\n");
1397 }
1398
1399out:
1400 return err;
1401}
1402
1403static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
1404{
1405 struct ixgbe_hw *hw = &adapter->hw;
1406 int err;
1407
1408 err = ixgbe_set_vf_vlan(adapter, false,
1409 adapter->vfinfo[vf].pf_vlan, vf);
Alexander Duyck4c7f35f2015-11-02 17:10:32 -08001410 /* Restore tagless access via VLAN 0 */
1411 ixgbe_set_vf_vlan(adapter, true, 0, vf);
Don Skidmore2b509c02014-11-01 01:06:57 +00001412 ixgbe_clear_vmvir(adapter, vf);
1413 ixgbe_set_vmolr(hw, vf, true);
Emil Tantilov42ce2c82014-12-10 05:28:51 +00001414
1415 /* disable hide VLAN on X550 */
1416 if (hw->mac.type >= ixgbe_mac_X550)
1417 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
1418
Don Skidmore2b509c02014-11-01 01:06:57 +00001419 adapter->vfinfo[vf].pf_vlan = 0;
1420 adapter->vfinfo[vf].pf_qos = 0;
1421
1422 return err;
1423}
1424
Moshe Shemesh79aab092016-09-22 12:11:15 +03001425int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1426 u8 qos, __be16 vlan_proto)
Greg Rose7f016482010-05-04 22:12:06 +00001427{
1428 int err = 0;
1429 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1430
1431 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
1432 return -EINVAL;
Moshe Shemesh79aab092016-09-22 12:11:15 +03001433 if (vlan_proto != htons(ETH_P_8021Q))
1434 return -EPROTONOSUPPORT;
Greg Rose7f016482010-05-04 22:12:06 +00001435 if (vlan || qos) {
Don Skidmore2b509c02014-11-01 01:06:57 +00001436 /* Check if there is already a port VLAN set, if so
1437 * we have to delete the old one first before we
1438 * can set the new one. The usage model had
1439 * previously assumed the user would delete the
1440 * old port VLAN before setting a new one but this
1441 * is not necessarily the case.
1442 */
Greg Rose026ac672013-04-17 20:41:35 +00001443 if (adapter->vfinfo[vf].pf_vlan)
Don Skidmore2b509c02014-11-01 01:06:57 +00001444 err = ixgbe_disable_port_vlan(adapter, vf);
Greg Rose026ac672013-04-17 20:41:35 +00001445 if (err)
1446 goto out;
Don Skidmore2b509c02014-11-01 01:06:57 +00001447 err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos);
Greg Rose7f016482010-05-04 22:12:06 +00001448 } else {
Don Skidmore2b509c02014-11-01 01:06:57 +00001449 err = ixgbe_disable_port_vlan(adapter, vf);
Jacob Kellere7cf7452014-04-09 06:03:10 +00001450 }
Don Skidmore2b509c02014-11-01 01:06:57 +00001451
Greg Rose7f016482010-05-04 22:12:06 +00001452out:
Jacob Kellere7cf7452014-04-09 06:03:10 +00001453 return err;
Greg Rose7f016482010-05-04 22:12:06 +00001454}
1455
Rostislav Pehlivanovc04f90e2016-01-27 18:33:30 +00001456int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
Lior Levyff4ab202011-03-11 02:03:07 +00001457{
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001458 switch (adapter->link_speed) {
Lior Levyff4ab202011-03-11 02:03:07 +00001459 case IXGBE_LINK_SPEED_100_FULL:
1460 return 100;
1461 case IXGBE_LINK_SPEED_1GB_FULL:
1462 return 1000;
1463 case IXGBE_LINK_SPEED_10GB_FULL:
1464 return 10000;
1465 default:
1466 return 0;
1467 }
1468}
1469
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001470static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
Lior Levyff4ab202011-03-11 02:03:07 +00001471{
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001472 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
1473 struct ixgbe_hw *hw = &adapter->hw;
1474 u32 bcnrc_val = 0;
1475 u16 queue, queues_per_pool;
1476 u16 tx_rate = adapter->vfinfo[vf].tx_rate;
Lior Levyff4ab202011-03-11 02:03:07 +00001477
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001478 if (tx_rate) {
1479 /* start with base link speed value */
1480 bcnrc_val = adapter->vf_rate_link_speed;
1481
Lior Levyff4ab202011-03-11 02:03:07 +00001482 /* Calculate the rate factor values to set */
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001483 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1484 bcnrc_val /= tx_rate;
Lior Levyff4ab202011-03-11 02:03:07 +00001485
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001486 /* clear everything but the rate factor */
1487 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1488 IXGBE_RTTBCNRC_RF_DEC_MASK;
1489
1490 /* enable the rate scheduler */
1491 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
Lior Levyff4ab202011-03-11 02:03:07 +00001492 }
1493
Lior Levy7555e832011-06-25 00:09:08 -07001494 /*
1495 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
1496 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
1497 * and 0x004 otherwise.
1498 */
1499 switch (hw->mac.type) {
1500 case ixgbe_mac_82599EB:
1501 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
1502 break;
1503 case ixgbe_mac_X540:
1504 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
1505 break;
1506 default:
1507 break;
1508 }
1509
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001510 /* determine how many queues per pool based on VMDq mask */
1511 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
1512
1513 /* write value for all Tx queues belonging to VF */
1514 for (queue = 0; queue < queues_per_pool; queue++) {
1515 unsigned int reg_idx = (vf * queues_per_pool) + queue;
1516
1517 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
1518 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1519 }
Lior Levyff4ab202011-03-11 02:03:07 +00001520}
1521
1522void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
1523{
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001524 int i;
Lior Levyff4ab202011-03-11 02:03:07 +00001525
1526 /* VF Tx rate limit was not set */
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001527 if (!adapter->vf_rate_link_speed)
Lior Levyff4ab202011-03-11 02:03:07 +00001528 return;
1529
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001530 if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
Lior Levyff4ab202011-03-11 02:03:07 +00001531 adapter->vf_rate_link_speed = 0;
1532 dev_info(&adapter->pdev->dev,
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001533 "Link speed has been changed. VF Transmit rate is disabled\n");
Lior Levyff4ab202011-03-11 02:03:07 +00001534 }
1535
1536 for (i = 0; i < adapter->num_vfs; i++) {
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001537 if (!adapter->vf_rate_link_speed)
Lior Levyff4ab202011-03-11 02:03:07 +00001538 adapter->vfinfo[i].tx_rate = 0;
1539
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001540 ixgbe_set_vf_rate_limit(adapter, i);
Lior Levyff4ab202011-03-11 02:03:07 +00001541 }
1542}
1543
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001544int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
1545 int max_tx_rate)
Greg Rose7f016482010-05-04 22:12:06 +00001546{
Lior Levyff4ab202011-03-11 02:03:07 +00001547 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001548 int link_speed;
Lior Levyff4ab202011-03-11 02:03:07 +00001549
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001550 /* verify VF is active */
1551 if (vf >= adapter->num_vfs)
Lior Levyff4ab202011-03-11 02:03:07 +00001552 return -EINVAL;
1553
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001554 /* verify link is up */
1555 if (!adapter->link_up)
1556 return -EINVAL;
1557
1558 /* verify we are linked at 10Gbps */
1559 link_speed = ixgbe_link_mbps(adapter);
1560 if (link_speed != 10000)
1561 return -EINVAL;
1562
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001563 if (min_tx_rate)
1564 return -EINVAL;
1565
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001566 /* rate limit cannot be less than 10Mbs or greater than link speed */
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001567 if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001568 return -EINVAL;
1569
1570 /* store values */
1571 adapter->vf_rate_link_speed = link_speed;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001572 adapter->vfinfo[vf].tx_rate = max_tx_rate;
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001573
1574 /* update hardware configuration */
1575 ixgbe_set_vf_rate_limit(adapter, vf);
Lior Levyff4ab202011-03-11 02:03:07 +00001576
1577 return 0;
Greg Rose7f016482010-05-04 22:12:06 +00001578}
1579
Greg Rosede4c7f62011-09-29 05:57:33 +00001580int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
1581{
1582 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Greg Rosede4c7f62011-09-29 05:57:33 +00001583 struct ixgbe_hw *hw = &adapter->hw;
Greg Rosede4c7f62011-09-29 05:57:33 +00001584
Emil Tantilov600a5072014-10-16 15:49:02 +00001585 if (vf >= adapter->num_vfs)
1586 return -EINVAL;
1587
Greg Rosede4c7f62011-09-29 05:57:33 +00001588 adapter->vfinfo[vf].spoofchk_enabled = setting;
1589
Emil Tantilov77f192a2016-03-18 16:11:14 -07001590 /* configure MAC spoofing */
1591 hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
Greg Rosede4c7f62011-09-29 05:57:33 +00001592
Emil Tantilov77f192a2016-03-18 16:11:14 -07001593 /* configure VLAN spoofing */
Emil Tantilovd3dec7c2016-03-18 16:11:19 -07001594 hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
Emil Tantilov77f192a2016-03-18 16:11:14 -07001595
1596 /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
1597 * calling set_ethertype_anti_spoofing for each VF in loop below
1598 */
1599 if (hw->mac.ops.set_ethertype_anti_spoofing) {
1600 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
1601 (IXGBE_ETQF_FILTER_EN |
1602 IXGBE_ETQF_TX_ANTISPOOF |
1603 IXGBE_ETH_P_LLDP));
1604
1605 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
1606 (IXGBE_ETQF_FILTER_EN |
1607 IXGBE_ETQF_TX_ANTISPOOF |
1608 ETH_P_PAUSE));
1609
1610 hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
Greg Rosede4c7f62011-09-29 05:57:33 +00001611 }
1612
1613 return 0;
1614}
1615
Vlad Zolotarove65ce0d2015-03-30 21:35:24 +03001616int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
1617 bool setting)
1618{
1619 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1620
1621 /* This operation is currently supported only for 82599 and x540
1622 * devices.
1623 */
1624 if (adapter->hw.mac.type < ixgbe_mac_82599EB ||
1625 adapter->hw.mac.type >= ixgbe_mac_X550)
1626 return -EOPNOTSUPP;
1627
1628 if (vf >= adapter->num_vfs)
1629 return -EINVAL;
1630
1631 adapter->vfinfo[vf].rss_query_enabled = setting;
1632
1633 return 0;
1634}
1635
Hiroshi Shimamoto54011e42015-08-28 06:58:33 +00001636int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
1637{
1638 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1639
1640 if (vf >= adapter->num_vfs)
1641 return -EINVAL;
1642
1643 /* nothing to do */
1644 if (adapter->vfinfo[vf].trusted == setting)
1645 return 0;
1646
1647 adapter->vfinfo[vf].trusted = setting;
1648
1649 /* reset VF to reconfigure features */
1650 adapter->vfinfo[vf].clear_to_send = false;
1651 ixgbe_ping_vf(adapter, vf);
1652
1653 e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
1654
1655 return 0;
1656}
1657
Greg Rose7f016482010-05-04 22:12:06 +00001658int ixgbe_ndo_get_vf_config(struct net_device *netdev,
1659 int vf, struct ifla_vf_info *ivi)
1660{
1661 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1662 if (vf >= adapter->num_vfs)
1663 return -EINVAL;
1664 ivi->vf = vf;
1665 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001666 ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
1667 ivi->min_tx_rate = 0;
Greg Rose7f016482010-05-04 22:12:06 +00001668 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
1669 ivi->qos = adapter->vfinfo[vf].pf_qos;
Greg Rosede4c7f62011-09-29 05:57:33 +00001670 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
Vlad Zolotarove65ce0d2015-03-30 21:35:24 +03001671 ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
Hiroshi Shimamoto54011e42015-08-28 06:58:33 +00001672 ivi->trusted = adapter->vfinfo[vf].trusted;
Greg Rose7f016482010-05-04 22:12:06 +00001673 return 0;
1674}