blob: 2649c06d877b28f58bfb537368b89f15c0a484de [file] [log] [blame]
Jeff Kirsher51dce242018-04-26 08:08:09 -07001// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
Greg Rose17367272010-01-09 02:25:48 +00003
Greg Rose17367272010-01-09 02:25:48 +00004#include <linux/types.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/netdevice.h>
8#include <linux/vmalloc.h>
9#include <linux/string.h>
10#include <linux/in.h>
11#include <linux/ip.h>
12#include <linux/tcp.h>
13#include <linux/ipv6.h>
Don Skidmoreaa2bacb2015-04-09 22:03:22 -070014#include <linux/if_bridge.h>
Patrick McHardyf6469682013-04-19 02:04:27 +000015#ifdef NETIF_F_HW_VLAN_CTAG_TX
Greg Rose17367272010-01-09 02:25:48 +000016#include <linux/if_vlan.h>
17#endif
18
19#include "ixgbe.h"
Greg Rosec6bda302011-08-24 02:37:55 +000020#include "ixgbe_type.h"
Greg Rose17367272010-01-09 02:25:48 +000021#include "ixgbe_sriov.h"
22
Greg Rosec6bda302011-08-24 02:37:55 +000023#ifdef CONFIG_PCI_IOV
Emil Tantilov5c11f002017-01-20 14:11:56 -080024static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
25 unsigned int num_vfs)
Emil Tantilov2bc09722017-01-20 14:11:45 -080026{
27 struct ixgbe_hw *hw = &adapter->hw;
28 struct vf_macvlans *mv_list;
29 int num_vf_macvlans, i;
30
31 num_vf_macvlans = hw->mac.num_rar_entries -
Emil Tantilov5c11f002017-01-20 14:11:56 -080032 (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
Emil Tantilov2bc09722017-01-20 14:11:45 -080033 if (!num_vf_macvlans)
34 return;
35
36 mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
37 GFP_KERNEL);
38 if (mv_list) {
39 /* Initialize list of VF macvlans */
40 INIT_LIST_HEAD(&adapter->vf_mvs.l);
41 for (i = 0; i < num_vf_macvlans; i++) {
42 mv_list[i].vf = -1;
43 mv_list[i].free = true;
44 list_add(&mv_list[i].l, &adapter->vf_mvs.l);
45 }
46 adapter->mv_list = mv_list;
47 }
48}
49
Emil Tantilov5c11f002017-01-20 14:11:56 -080050static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
51 unsigned int num_vfs)
Greg Rosec6bda302011-08-24 02:37:55 +000052{
53 struct ixgbe_hw *hw = &adapter->hw;
Emil Tantilovda614d02017-01-20 14:11:50 -080054 int i;
Greg Rosec6bda302011-08-24 02:37:55 +000055
Alexander Duyck73079ea2012-07-14 06:48:49 +000056 /* Enable VMDq flag so device will be set in VM mode */
Alexander Duycka8e87d92017-11-22 10:57:05 -080057 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
58 IXGBE_FLAG_VMDQ_ENABLED;
Alexander Duyck73079ea2012-07-14 06:48:49 +000059
Emil Tantilov5c11f002017-01-20 14:11:56 -080060 /* Allocate memory for per VF control structures */
61 adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
62 GFP_KERNEL);
Emil Tantilovda614d02017-01-20 14:11:50 -080063 if (!adapter->vfinfo)
64 return -ENOMEM;
65
Emil Tantilov5c11f002017-01-20 14:11:56 -080066 adapter->num_vfs = num_vfs;
67
68 ixgbe_alloc_vf_macvlans(adapter, num_vfs);
69 adapter->ring_feature[RING_F_VMDQ].offset = num_vfs;
Emil Tantilovda614d02017-01-20 14:11:50 -080070
John Fastabend815cccb2012-10-24 08:13:09 +000071 /* Initialize default switching mode VEB */
72 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
Don Skidmoreaa2bacb2015-04-09 22:03:22 -070073 adapter->bridge_mode = BRIDGE_MODE_VEB;
John Fastabend815cccb2012-10-24 08:13:09 +000074
Emil Tantilovda614d02017-01-20 14:11:50 -080075 /* limit trafffic classes based on VFs enabled */
Emil Tantilov5c11f002017-01-20 14:11:56 -080076 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
Emil Tantilovda614d02017-01-20 14:11:50 -080077 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
78 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
Emil Tantilov5c11f002017-01-20 14:11:56 -080079 } else if (num_vfs < 32) {
Emil Tantilovda614d02017-01-20 14:11:50 -080080 adapter->dcb_cfg.num_tcs.pg_tcs = 4;
81 adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
82 } else {
83 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
84 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
Greg Rosec6bda302011-08-24 02:37:55 +000085 }
86
Emil Tantilovda614d02017-01-20 14:11:50 -080087 /* Disable RSC when in SR-IOV mode */
88 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
89 IXGBE_FLAG2_RSC_ENABLED);
90
Emil Tantilov5c11f002017-01-20 14:11:56 -080091 for (i = 0; i < num_vfs; i++) {
Emil Tantilovda614d02017-01-20 14:11:50 -080092 /* enable spoof checking for all VFs */
93 adapter->vfinfo[i].spoofchk_enabled = true;
94
95 /* We support VF RSS querying only for 82599 and x540
96 * devices at the moment. These devices share RSS
97 * indirection table and RSS hash key with PF therefore
98 * we want to disable the querying by default.
99 */
100 adapter->vfinfo[i].rss_query_enabled = 0;
101
102 /* Untrust all VFs */
103 adapter->vfinfo[i].trusted = false;
104
105 /* set the default xcast mode */
106 adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
107 }
108
Emil Tantilov5c11f002017-01-20 14:11:56 -0800109 e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs);
Emil Tantilovda614d02017-01-20 14:11:50 -0800110 return 0;
Greg Rose66dcfd72012-12-11 08:26:38 +0000111}
112
Mark Rustad988d1302015-10-30 15:29:34 -0700113/**
114 * ixgbe_get_vfs - Find and take references to all vf devices
115 * @adapter: Pointer to adapter struct
116 */
117static void ixgbe_get_vfs(struct ixgbe_adapter *adapter)
118{
119 struct pci_dev *pdev = adapter->pdev;
120 u16 vendor = pdev->vendor;
121 struct pci_dev *vfdev;
122 int vf = 0;
123 u16 vf_id;
124 int pos;
125
126 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
127 if (!pos)
128 return;
129 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
130
131 vfdev = pci_get_device(vendor, vf_id, NULL);
132 for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) {
133 if (!vfdev->is_virtfn)
134 continue;
135 if (vfdev->physfn != pdev)
136 continue;
137 if (vf >= adapter->num_vfs)
138 continue;
139 pci_dev_get(vfdev);
140 adapter->vfinfo[vf].vfdev = vfdev;
141 ++vf;
142 }
143}
144
Greg Rose66dcfd72012-12-11 08:26:38 +0000145/* Note this function is called when the user wants to enable SR-IOV
146 * VFs using the now deprecated module parameter
147 */
Emil Tantilov5c11f002017-01-20 14:11:56 -0800148void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
Greg Rose66dcfd72012-12-11 08:26:38 +0000149{
150 int pre_existing_vfs = 0;
Emil Tantilov5c11f002017-01-20 14:11:56 -0800151 unsigned int num_vfs;
Greg Rose66dcfd72012-12-11 08:26:38 +0000152
153 pre_existing_vfs = pci_num_vf(adapter->pdev);
Emil Tantilov5c11f002017-01-20 14:11:56 -0800154 if (!pre_existing_vfs && !max_vfs)
Greg Rose66dcfd72012-12-11 08:26:38 +0000155 return;
156
Greg Rose66dcfd72012-12-11 08:26:38 +0000157 /* If there are pre-existing VFs then we have to force
158 * use of that many - over ride any module parameter value.
159 * This may result from the user unloading the PF driver
160 * while VFs were assigned to guest VMs or because the VFs
161 * have been created via the new PCI SR-IOV sysfs interface.
162 */
163 if (pre_existing_vfs) {
Emil Tantilov5c11f002017-01-20 14:11:56 -0800164 num_vfs = pre_existing_vfs;
Greg Rose66dcfd72012-12-11 08:26:38 +0000165 dev_warn(&adapter->pdev->dev,
166 "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
167 } else {
168 int err;
169 /*
170 * The 82599 supports up to 64 VFs per physical function
171 * but this implementation limits allocation to 63 so that
172 * basic networking resources are still available to the
Joe Perchesdbedd442015-03-06 20:49:12 -0800173 * physical function. If the user requests greater than
Greg Rose66dcfd72012-12-11 08:26:38 +0000174 * 63 VFs then it is an error - reset to default of zero.
175 */
Emil Tantilov5c11f002017-01-20 14:11:56 -0800176 num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
Greg Rose66dcfd72012-12-11 08:26:38 +0000177
Emil Tantilov5c11f002017-01-20 14:11:56 -0800178 err = pci_enable_sriov(adapter->pdev, num_vfs);
Greg Rose66dcfd72012-12-11 08:26:38 +0000179 if (err) {
180 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
Greg Rose66dcfd72012-12-11 08:26:38 +0000181 return;
182 }
183 }
184
Emil Tantilov5c11f002017-01-20 14:11:56 -0800185 if (!__ixgbe_enable_sriov(adapter, num_vfs)) {
Mark Rustad988d1302015-10-30 15:29:34 -0700186 ixgbe_get_vfs(adapter);
Greg Rose66dcfd72012-12-11 08:26:38 +0000187 return;
Mark Rustad988d1302015-10-30 15:29:34 -0700188 }
Greg Rose66dcfd72012-12-11 08:26:38 +0000189
190 /* If we have gotten to this point then there is no memory available
191 * to manage the VF devices - print message and bail.
192 */
Greg Rosec6bda302011-08-24 02:37:55 +0000193 e_err(probe, "Unable to allocate memory for VF Data Storage - "
194 "SRIOV disabled\n");
Alexander Duyck99d74482012-05-09 08:09:25 +0000195 ixgbe_disable_sriov(adapter);
Greg Rosec6bda302011-08-24 02:37:55 +0000196}
Greg Rosec6bda302011-08-24 02:37:55 +0000197
Alexander Duyck92971272012-05-23 02:58:40 +0000198#endif /* #ifdef CONFIG_PCI_IOV */
Greg Roseda36b642012-12-11 08:26:43 +0000199int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
Greg Rosec6bda302011-08-24 02:37:55 +0000200{
Mark Rustad988d1302015-10-30 15:29:34 -0700201 unsigned int num_vfs = adapter->num_vfs, vf;
Greg Roseda36b642012-12-11 08:26:43 +0000202 int rss;
Greg Rosec6bda302011-08-24 02:37:55 +0000203
Alexander Duyckd773d132012-05-05 05:32:26 +0000204 /* set num VFs to 0 to prevent access to vfinfo */
205 adapter->num_vfs = 0;
206
Mark Rustad988d1302015-10-30 15:29:34 -0700207 /* put the reference to all of the vf devices */
208 for (vf = 0; vf < num_vfs; ++vf) {
209 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
210
211 if (!vfdev)
212 continue;
213 adapter->vfinfo[vf].vfdev = NULL;
214 pci_dev_put(vfdev);
215 }
216
Alexander Duyckd773d132012-05-05 05:32:26 +0000217 /* free VF control structures */
218 kfree(adapter->vfinfo);
219 adapter->vfinfo = NULL;
220
221 /* free macvlan list */
222 kfree(adapter->mv_list);
223 adapter->mv_list = NULL;
224
Alexander Duyck99d74482012-05-09 08:09:25 +0000225 /* if SR-IOV is already disabled then there is nothing to do */
226 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
Greg Roseda36b642012-12-11 08:26:43 +0000227 return 0;
Alexander Duyck99d74482012-05-09 08:09:25 +0000228
Greg Rosec6bda302011-08-24 02:37:55 +0000229#ifdef CONFIG_PCI_IOV
Alexander Duyck92971272012-05-23 02:58:40 +0000230 /*
231 * If our VFs are assigned we cannot shut down SR-IOV
232 * without causing issues, so just leave the hardware
233 * available but disabled
234 */
Alexander Duycke507d0c2013-03-26 00:03:21 +0000235 if (pci_vfs_assigned(adapter->pdev)) {
Alexander Duyck92971272012-05-23 02:58:40 +0000236 e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
Greg Roseda36b642012-12-11 08:26:43 +0000237 return -EPERM;
David S. Millerd47e12d2012-07-22 12:36:41 -0700238 }
Greg Rosec6bda302011-08-24 02:37:55 +0000239 /* disable iov and allow time for transactions to clear */
240 pci_disable_sriov(adapter->pdev);
241#endif
242
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +0000243 /* Disable VMDq flag so device will be set in VM mode */
Alexander Duyck8315ef62018-04-03 17:16:45 -0400244 if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) {
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +0000245 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
John Fastabend2a47fa42013-11-06 09:54:52 -0800246 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
Don Skidmore0f9b2322014-11-18 09:35:08 +0000247 rss = min_t(int, ixgbe_max_rss_indices(adapter),
248 num_online_cpus());
John Fastabend2a47fa42013-11-06 09:54:52 -0800249 } else {
250 rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
251 }
Alexander Duyck1d9c0bf2012-05-05 05:32:21 +0000252
John Fastabend2a47fa42013-11-06 09:54:52 -0800253 adapter->ring_feature[RING_F_VMDQ].offset = 0;
Greg Roseda36b642012-12-11 08:26:43 +0000254 adapter->ring_feature[RING_F_RSS].limit = rss;
255
Greg Rosec6bda302011-08-24 02:37:55 +0000256 /* take a breather then clean up driver data */
257 msleep(100);
Greg Roseda36b642012-12-11 08:26:43 +0000258 return 0;
259}
260
261static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
262{
263#ifdef CONFIG_PCI_IOV
264 struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
Greg Roseda36b642012-12-11 08:26:43 +0000265 int pre_existing_vfs = pci_num_vf(dev);
Alexander Duyck4e039c12017-11-22 10:56:40 -0800266 int err = 0, num_rx_pools, i, limit;
267 u8 num_tc;
Greg Roseda36b642012-12-11 08:26:43 +0000268
269 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
270 err = ixgbe_disable_sriov(adapter);
271 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
Mark Rustade90dd262014-07-22 06:51:08 +0000272 return num_vfs;
Greg Roseda36b642012-12-11 08:26:43 +0000273
274 if (err)
Mark Rustade90dd262014-07-22 06:51:08 +0000275 return err;
Greg Roseda36b642012-12-11 08:26:43 +0000276
Jacob Kelleraac2f1b2014-08-21 06:17:59 +0000277 /* While the SR-IOV capability structure reports total VFs to be 64,
Usha Ketinenib5d8acb2016-12-23 10:08:14 -0800278 * we limit the actual number allocated as below based on two factors.
279 * Num_TCs MAX_VFs
280 * 1 63
281 * <=4 31
282 * >4 15
Jacob Kelleraac2f1b2014-08-21 06:17:59 +0000283 * First, we reserve some transmit/receive resources for the PF.
284 * Second, VMDQ also uses the same pools that SR-IOV does. We need to
285 * account for this, so that we don't accidentally allocate more VFs
286 * than we have available pools. The PCI bus driver already checks for
287 * other values out of range.
Greg Roseda36b642012-12-11 08:26:43 +0000288 */
Alexander Duyck0efbf122017-11-22 10:57:11 -0800289 num_tc = adapter->hw_tcs;
Alexander Duyck8315ef62018-04-03 17:16:45 -0400290 num_rx_pools = bitmap_weight(adapter->fwd_bitmask,
291 adapter->num_rx_pools);
Alexander Duyck4e039c12017-11-22 10:56:40 -0800292 limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
293 (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
Greg Roseda36b642012-12-11 08:26:43 +0000294
Alexander Duyck4e039c12017-11-22 10:56:40 -0800295 if (num_vfs > (limit - num_rx_pools)) {
296 e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
297 num_tc, num_rx_pools - 1, limit - num_rx_pools);
298 return -EPERM;
Usha Ketinenib5d8acb2016-12-23 10:08:14 -0800299 }
Greg Roseda36b642012-12-11 08:26:43 +0000300
Emil Tantilov5c11f002017-01-20 14:11:56 -0800301 err = __ixgbe_enable_sriov(adapter, num_vfs);
Greg Roseda36b642012-12-11 08:26:43 +0000302 if (err)
Mark Rustade90dd262014-07-22 06:51:08 +0000303 return err;
Greg Roseda36b642012-12-11 08:26:43 +0000304
Emil Tantilov5c11f002017-01-20 14:11:56 -0800305 for (i = 0; i < num_vfs; i++)
Greg Roseda36b642012-12-11 08:26:43 +0000306 ixgbe_vf_configuration(dev, (i | 0x10000000));
307
Emil Tantilov0c339bf2016-09-09 12:59:10 -0700308 /* reset before enabling SRIOV to avoid mailbox issues */
309 ixgbe_sriov_reinit(adapter);
310
Greg Roseda36b642012-12-11 08:26:43 +0000311 err = pci_enable_sriov(dev, num_vfs);
312 if (err) {
313 e_dev_warn("Failed to enable PCI sriov: %d\n", err);
Mark Rustade90dd262014-07-22 06:51:08 +0000314 return err;
Greg Roseda36b642012-12-11 08:26:43 +0000315 }
Mark Rustad988d1302015-10-30 15:29:34 -0700316 ixgbe_get_vfs(adapter);
Greg Roseda36b642012-12-11 08:26:43 +0000317
Greg Roseda36b642012-12-11 08:26:43 +0000318 return num_vfs;
Mark Rustade90dd262014-07-22 06:51:08 +0000319#else
Greg Roseda36b642012-12-11 08:26:43 +0000320 return 0;
Mark Rustade90dd262014-07-22 06:51:08 +0000321#endif
Greg Roseda36b642012-12-11 08:26:43 +0000322}
323
324static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
325{
326 struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
327 int err;
Don Skidmore8f48f5b2013-11-22 04:27:23 +0000328#ifdef CONFIG_PCI_IOV
Greg Roseda36b642012-12-11 08:26:43 +0000329 u32 current_flags = adapter->flags;
Alexander Duyck2097db72017-11-22 10:56:22 -0800330 int prev_num_vf = pci_num_vf(dev);
Don Skidmore8f48f5b2013-11-22 04:27:23 +0000331#endif
Greg Roseda36b642012-12-11 08:26:43 +0000332
333 err = ixgbe_disable_sriov(adapter);
334
335 /* Only reinit if no error and state changed */
Greg Roseda36b642012-12-11 08:26:43 +0000336#ifdef CONFIG_PCI_IOV
Alexander Duyck2097db72017-11-22 10:56:22 -0800337 if (!err && (current_flags != adapter->flags ||
338 prev_num_vf != pci_num_vf(dev)))
Greg Roseda36b642012-12-11 08:26:43 +0000339 ixgbe_sriov_reinit(adapter);
340#endif
Greg Roseda36b642012-12-11 08:26:43 +0000341
342 return err;
343}
344
345int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
346{
347 if (num_vfs == 0)
348 return ixgbe_pci_sriov_disable(dev);
349 else
350 return ixgbe_pci_sriov_enable(dev, num_vfs);
Greg Rosec6bda302011-08-24 02:37:55 +0000351}
352
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000353static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
Alexander Duyck58a02be2012-07-20 08:09:17 +0000354 u32 *msgbuf, u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000355{
Alexander Duyck58a02be2012-07-20 08:09:17 +0000356 int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
357 >> IXGBE_VT_MSGINFO_SHIFT;
358 u16 *hash_list = (u16 *)&msgbuf[1];
Greg Rose17367272010-01-09 02:25:48 +0000359 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
Greg Rose8a07a222010-05-05 19:57:30 +0000360 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose17367272010-01-09 02:25:48 +0000361 int i;
Greg Rose8a07a222010-05-05 19:57:30 +0000362 u32 vector_bit;
363 u32 vector_reg;
364 u32 mta_reg;
Jacob Kellerb335e752014-03-25 07:45:27 +0000365 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
Greg Rose17367272010-01-09 02:25:48 +0000366
367 /* only so many hash values supported */
368 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
369
370 /*
371 * salt away the number of multi cast addresses assigned
372 * to this VF for later use to restore when the PF multi cast
373 * list changes
374 */
375 vfinfo->num_vf_mc_hashes = entries;
376
377 /*
378 * VFs are limited to using the MTA hash table for their multicast
379 * addresses
380 */
381 for (i = 0; i < entries; i++) {
Joe Perchese81a1ba2010-11-14 17:04:33 +0000382 vfinfo->vf_mc_hashes[i] = hash_list[i];
Greg Rose17367272010-01-09 02:25:48 +0000383 }
384
Greg Rose8a07a222010-05-05 19:57:30 +0000385 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
386 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
387 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
388 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700389 mta_reg |= BIT(vector_bit);
Greg Rose8a07a222010-05-05 19:57:30 +0000390 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
391 }
Jacob Kellerb335e752014-03-25 07:45:27 +0000392 vmolr |= IXGBE_VMOLR_ROMPE;
393 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
Greg Rose17367272010-01-09 02:25:48 +0000394
395 return 0;
396}
397
Jacob Kellerb335e752014-03-25 07:45:27 +0000398#ifdef CONFIG_PCI_IOV
Greg Rose17367272010-01-09 02:25:48 +0000399void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
400{
401 struct ixgbe_hw *hw = &adapter->hw;
402 struct vf_data_storage *vfinfo;
403 int i, j;
404 u32 vector_bit;
405 u32 vector_reg;
406 u32 mta_reg;
407
408 for (i = 0; i < adapter->num_vfs; i++) {
Jacob Kellerb335e752014-03-25 07:45:27 +0000409 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
Greg Rose17367272010-01-09 02:25:48 +0000410 vfinfo = &adapter->vfinfo[i];
411 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
412 hw->addr_ctrl.mta_in_use++;
413 vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
414 vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
415 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700416 mta_reg |= BIT(vector_bit);
Greg Rose17367272010-01-09 02:25:48 +0000417 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
418 }
Jacob Kellerb335e752014-03-25 07:45:27 +0000419
420 if (vfinfo->num_vf_mc_hashes)
421 vmolr |= IXGBE_VMOLR_ROMPE;
422 else
423 vmolr &= ~IXGBE_VMOLR_ROMPE;
424 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
Greg Rose17367272010-01-09 02:25:48 +0000425 }
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000426
427 /* Restore any VF macvlans */
Jacob Keller5d7daa32014-03-29 06:51:25 +0000428 ixgbe_full_sync_mac_table(adapter);
Greg Rose17367272010-01-09 02:25:48 +0000429}
Jacob Kellerb335e752014-03-25 07:45:27 +0000430#endif
Greg Rose17367272010-01-09 02:25:48 +0000431
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000432static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
433 u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000434{
Alexander Duyckb6488b62015-11-02 17:10:01 -0800435 struct ixgbe_hw *hw = &adapter->hw;
436 int err;
437
Alexander Duyckb6488b62015-11-02 17:10:01 -0800438 /* If VLAN overlaps with one the PF is currently monitoring make
439 * sure that we are able to allocate a VLVF entry. This may be
440 * redundant but it guarantees PF will maintain visibility to
441 * the VLAN.
442 */
443 if (add && test_bit(vid, adapter->active_vlans)) {
444 err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false);
445 if (err)
446 return err;
447 }
448
449 err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false);
450
Alexander Duycke1d0a2a2015-11-02 17:10:19 -0800451 if (add && !err)
452 return err;
453
454 /* If we failed to add the VF VLAN or we are removing the VF VLAN
455 * we may need to drop the PF pool bit in order to allow us to free
456 * up the VLVF resources.
457 */
458 if (test_bit(vid, adapter->active_vlans) ||
459 (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
460 ixgbe_update_pf_promisc_vlvf(adapter, vid);
461
Alexander Duyckb6488b62015-11-02 17:10:01 -0800462 return err;
Greg Rose17367272010-01-09 02:25:48 +0000463}
464
Alexander Duyck872844d2012-08-15 02:10:43 +0000465static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
Greg Rosee9f98072011-01-26 01:06:07 +0000466{
467 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck872844d2012-08-15 02:10:43 +0000468 int max_frame = msgbuf[1];
Greg Rosee9f98072011-01-26 01:06:07 +0000469 u32 max_frs;
Greg Rosee9f98072011-01-26 01:06:07 +0000470
Alexander Duyck872844d2012-08-15 02:10:43 +0000471 /*
472 * For 82599EB we have to keep all PFs and VFs operating with
473 * the same max_frame value in order to avoid sending an oversize
474 * frame to a VF. In order to guarantee this is handled correctly
475 * for all cases we have several special exceptions to take into
476 * account before we can enable the VF for receive
477 */
478 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
479 struct net_device *dev = adapter->netdev;
480 int pf_max_frame = dev->mtu + ETH_HLEN;
481 u32 reg_offset, vf_shift, vfre;
482 s32 err = 0;
Greg Rosee9f98072011-01-26 01:06:07 +0000483
Alexander Duyck872844d2012-08-15 02:10:43 +0000484#ifdef CONFIG_FCOE
485 if (dev->features & NETIF_F_FCOE_MTU)
486 pf_max_frame = max_t(int, pf_max_frame,
487 IXGBE_FCOE_JUMBO_FRAME_SIZE);
488
489#endif /* CONFIG_FCOE */
Alexander Duyckbffb3bc2012-07-20 08:09:37 +0000490 switch (adapter->vfinfo[vf].vf_api) {
491 case ixgbe_mbox_api_11:
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +0300492 case ixgbe_mbox_api_12:
Don Skidmore07eea572016-12-15 21:18:32 -0500493 case ixgbe_mbox_api_13:
Tony Nguyen93df9462017-05-31 04:43:47 -0700494 /* Version 1.1 supports jumbo frames on VFs if PF has
Alexander Duyckbffb3bc2012-07-20 08:09:37 +0000495 * jumbo frames enabled which means legacy VFs are
496 * disabled
497 */
498 if (pf_max_frame > ETH_FRAME_LEN)
499 break;
Tony Nguyen93df9462017-05-31 04:43:47 -0700500 /* fall through */
Alexander Duyckbffb3bc2012-07-20 08:09:37 +0000501 default:
Tony Nguyen93df9462017-05-31 04:43:47 -0700502 /* If the PF or VF are running w/ jumbo frames enabled
Alexander Duyckbffb3bc2012-07-20 08:09:37 +0000503 * we need to shut down the VF Rx path as we cannot
504 * support jumbo frames on legacy VFs
505 */
506 if ((pf_max_frame > ETH_FRAME_LEN) ||
507 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
508 err = -EINVAL;
509 break;
510 }
Alexander Duyck872844d2012-08-15 02:10:43 +0000511
512 /* determine VF receive enable location */
513 vf_shift = vf % 32;
514 reg_offset = vf / 32;
515
516 /* enable or disable receive depending on error */
517 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
518 if (err)
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700519 vfre &= ~BIT(vf_shift);
Alexander Duyck872844d2012-08-15 02:10:43 +0000520 else
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700521 vfre |= BIT(vf_shift);
Alexander Duyck872844d2012-08-15 02:10:43 +0000522 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
523
524 if (err) {
525 e_err(drv, "VF max_frame %d out of range\n", max_frame);
526 return err;
527 }
Greg Rosee9f98072011-01-26 01:06:07 +0000528 }
529
Alexander Duyck872844d2012-08-15 02:10:43 +0000530 /* MTU < 68 is an error and causes problems on some kernels */
531 if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
532 e_err(drv, "VF max_frame %d out of range\n", max_frame);
533 return -EINVAL;
534 }
535
536 /* pull current max frame size from hardware */
537 max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
538 max_frs &= IXGBE_MHADD_MFS_MASK;
539 max_frs >>= IXGBE_MHADD_MFS_SHIFT;
540
541 if (max_frs < max_frame) {
542 max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
Greg Rosee9f98072011-01-26 01:06:07 +0000543 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
544 }
545
Alexander Duyck872844d2012-08-15 02:10:43 +0000546 e_info(hw, "VF requests change max MTU to %d\n", max_frame);
547
548 return 0;
Greg Rosee9f98072011-01-26 01:06:07 +0000549}
550
Emil Tantilov5d5b7c32010-10-12 22:20:59 +0000551static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
Greg Rose17367272010-01-09 02:25:48 +0000552{
553 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
Jacob Kellerb335e752014-03-25 07:45:27 +0000554 vmolr |= IXGBE_VMOLR_BAM;
Greg Rosef0412772010-05-04 22:11:46 +0000555 if (aupe)
556 vmolr |= IXGBE_VMOLR_AUPE;
557 else
558 vmolr &= ~IXGBE_VMOLR_AUPE;
Greg Rose17367272010-01-09 02:25:48 +0000559 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
560}
561
Alexander Duyck107d3012012-10-02 00:17:03 +0000562static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
Greg Rose7f016482010-05-04 22:12:06 +0000563{
564 struct ixgbe_hw *hw = &adapter->hw;
565
Alexander Duyck107d3012012-10-02 00:17:03 +0000566 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
Greg Rose7f016482010-05-04 22:12:06 +0000567}
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800568
569static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
570{
571 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800572 u32 vlvfb_mask, pool_mask, i;
573
574 /* create mask for VF and other pools */
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700575 pool_mask = ~BIT(VMDQ_P(0) % 32);
576 vlvfb_mask = BIT(vf % 32);
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800577
578 /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
579 for (i = IXGBE_VLVF_ENTRIES; i--;) {
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800580 u32 bits[2], vlvfb, vid, vfta, vlvf;
Alexander Duyckab3a3b72015-12-23 09:00:35 -0800581 u32 word = i * 2 + vf / 32;
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800582 u32 mask;
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800583
Alexander Duyckab3a3b72015-12-23 09:00:35 -0800584 vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800585
586 /* if our bit isn't set we can skip it */
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800587 if (!(vlvfb & vlvfb_mask))
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800588 continue;
589
590 /* clear our bit from vlvfb */
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800591 vlvfb ^= vlvfb_mask;
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800592
593 /* create 64b mask to chedk to see if we should clear VLVF */
594 bits[word % 2] = vlvfb;
Alexander Duyckab3a3b72015-12-23 09:00:35 -0800595 bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800596
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800597 /* if other pools are present, just remove ourselves */
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800598 if (bits[(VMDQ_P(0) / 32) ^ 1] ||
599 (bits[VMDQ_P(0) / 32] & pool_mask))
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800600 goto update_vlvfb;
601
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800602 /* if PF is present, leave VFTA */
603 if (bits[0] || bits[1])
604 goto update_vlvf;
605
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800606 /* if we cannot determine VLAN just remove ourselves */
607 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
608 if (!vlvf)
609 goto update_vlvfb;
610
611 vid = vlvf & VLAN_VID_MASK;
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700612 mask = BIT(vid % 32);
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800613
614 /* clear bit from VFTA */
615 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
616 if (vfta & mask)
617 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask);
618update_vlvf:
619 /* clear POOL selection enable */
620 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
Alexander Duyck18be4fc2016-01-06 22:48:44 -0800621
622 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
623 vlvfb = 0;
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800624update_vlvfb:
625 /* clear pool bits */
626 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
627 }
628}
629
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000630static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
631 int vf, int index, unsigned char *mac_addr)
632{
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000633 struct vf_macvlans *entry;
Tony Nguyen0e1ff3062017-07-19 15:00:26 -0700634 struct list_head *pos;
635 int retval = 0;
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000636
637 if (index <= 1) {
638 list_for_each(pos, &adapter->vf_mvs.l) {
639 entry = list_entry(pos, struct vf_macvlans, l);
640 if (entry->vf == vf) {
641 entry->vf = -1;
642 entry->free = true;
643 entry->is_macvlan = false;
Jacob Keller5d7daa32014-03-29 06:51:25 +0000644 ixgbe_del_mac_filter(adapter,
645 entry->vf_macvlan, vf);
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000646 }
647 }
648 }
649
650 /*
651 * If index was zero then we were asked to clear the uc list
652 * for the VF. We're done.
653 */
654 if (!index)
655 return 0;
656
657 entry = NULL;
658
659 list_for_each(pos, &adapter->vf_mvs.l) {
660 entry = list_entry(pos, struct vf_macvlans, l);
661 if (entry->free)
662 break;
663 }
664
665 /*
666 * If we traversed the entire list and didn't find a free entry
667 * then we're out of space on the RAR table. Also entry may
668 * be NULL because the original memory allocation for the list
669 * failed, which is not fatal but does mean we can't support
670 * VF requests for MACVLAN because we couldn't allocate
671 * memory for the list management required.
672 */
673 if (!entry || !entry->free)
674 return -ENOSPC;
675
Tony Nguyen0e1ff3062017-07-19 15:00:26 -0700676 retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
677 if (retval < 0)
678 return retval;
679
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000680 entry->free = false;
681 entry->is_macvlan = true;
682 entry->vf = vf;
683 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
684
Greg Rosea1cbb15c2011-05-13 01:33:48 +0000685 return 0;
686}
687
Emil Tantilove251ecf2017-03-28 11:27:54 -0700688static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
689{
690 struct ixgbe_hw *hw = &adapter->hw;
691 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
Alexander Duyck0efbf122017-11-22 10:57:11 -0800692 u8 num_tcs = adapter->hw_tcs;
Emil Tantilove251ecf2017-03-28 11:27:54 -0700693
694 /* remove VLAN filters beloning to this VF */
695 ixgbe_clear_vf_vlans(adapter, vf);
696
697 /* add back PF assigned VLAN or VLAN 0 */
698 ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
699
700 /* reset offloads to defaults */
701 ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
702
703 /* set outgoing tags for VFs */
704 if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
705 ixgbe_clear_vmvir(adapter, vf);
706 } else {
707 if (vfinfo->pf_qos || !num_tcs)
708 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
709 vfinfo->pf_qos, vf);
710 else
711 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
712 adapter->default_up, vf);
713
714 if (vfinfo->spoofchk_enabled)
715 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
716 }
717
718 /* reset multicast table array for vf */
719 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
720
721 /* Flush and reset the mta with the new values */
722 ixgbe_set_rx_mode(adapter->netdev);
723
724 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
725 ixgbe_set_vf_macvlan(adapter, vf, 0, NULL);
726
727 /* reset VF api back to unknown */
728 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
729}
730
731static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
732 int vf, unsigned char *mac_addr)
733{
Tony Nguyen6af3d0f2017-04-28 12:42:03 -0700734 s32 retval;
Emil Tantilove251ecf2017-03-28 11:27:54 -0700735
Tony Nguyen6af3d0f2017-04-28 12:42:03 -0700736 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
737 retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
738 if (retval >= 0)
739 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr,
740 ETH_ALEN);
741 else
742 memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN);
743
744 return retval;
Emil Tantilove251ecf2017-03-28 11:27:54 -0700745}
746
Greg Rose17367272010-01-09 02:25:48 +0000747int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
748{
Alexander Duyckc60fbb02010-11-16 19:26:54 -0800749 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
Greg Rose17367272010-01-09 02:25:48 +0000750 unsigned int vfn = (event_mask & 0x3f);
751
752 bool enable = ((event_mask & 0x10000000U) != 0);
753
Joe Perchesd458cdf2013-10-01 19:04:40 -0700754 if (enable)
755 eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
Greg Rose17367272010-01-09 02:25:48 +0000756
757 return 0;
758}
759
Don Skidmore8d697e72014-11-05 04:52:09 +0000760static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
761 u32 qde)
762{
763 struct ixgbe_hw *hw = &adapter->hw;
764 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
765 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
766 int i;
767
768 for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
769 u32 reg;
770
771 /* flush previous write */
772 IXGBE_WRITE_FLUSH(hw);
773
774 /* indicate to hardware that we want to set drop enable */
Emil Tantilovd28b1942017-05-23 14:02:23 -0700775 reg = IXGBE_QDE_WRITE | qde;
Don Skidmore8d697e72014-11-05 04:52:09 +0000776 reg |= i << IXGBE_QDE_IDX_SHIFT;
777 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
778 }
779}
780
Alexander Duyck58a02be2012-07-20 08:09:17 +0000781static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
Greg Rose17367272010-01-09 02:25:48 +0000782{
Alexander Duyck87397372014-01-15 17:38:40 -0800783 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
Greg Rose17367272010-01-09 02:25:48 +0000784 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck58a02be2012-07-20 08:09:17 +0000785 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
Emil Tantilovb08e1ed2013-07-26 07:34:54 +0000786 u32 reg, reg_offset, vf_shift;
787 u32 msgbuf[4] = {0, 0, 0, 0};
Alexander Duyck58a02be2012-07-20 08:09:17 +0000788 u8 *addr = (u8 *)(&msgbuf[1]);
Alexander Duyck87397372014-01-15 17:38:40 -0800789 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
790 int i;
Alexander Duyck58a02be2012-07-20 08:09:17 +0000791
792 e_info(probe, "VF Reset msg received from vf %d\n", vf);
793
794 /* reset the filters for the device */
795 ixgbe_vf_reset_event(adapter, vf);
796
797 /* set vf mac address */
Greg Rose35055922013-02-15 05:20:09 +0000798 if (!is_zero_ether_addr(vf_mac))
799 ixgbe_set_vf_mac(adapter, vf, vf_mac);
Greg Rose17367272010-01-09 02:25:48 +0000800
801 vf_shift = vf % 32;
802 reg_offset = vf / 32;
803
Alexander Duyck58a02be2012-07-20 08:09:17 +0000804 /* enable transmit for vf */
Greg Rose17367272010-01-09 02:25:48 +0000805 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700806 reg |= BIT(vf_shift);
Greg Rose17367272010-01-09 02:25:48 +0000807 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
808
Alexander Duyck87397372014-01-15 17:38:40 -0800809 /* force drop enable for all VF Rx queues */
Paul Greenwaltb03254d2018-03-08 07:26:08 -0500810 reg = IXGBE_QDE_ENABLE;
811 if (adapter->vfinfo[vf].pf_vlan)
812 reg |= IXGBE_QDE_HIDE_VLAN;
813
814 ixgbe_write_qde(adapter, vf, reg);
Alexander Duyck87397372014-01-15 17:38:40 -0800815
Alexander Duyck58a02be2012-07-20 08:09:17 +0000816 /* enable receive for vf */
Greg Rose17367272010-01-09 02:25:48 +0000817 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700818 reg |= BIT(vf_shift);
Alexander Duyck872844d2012-08-15 02:10:43 +0000819 /*
820 * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
821 * For more info take a look at ixgbe_set_vf_lpe
822 */
823 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
824 struct net_device *dev = adapter->netdev;
825 int pf_max_frame = dev->mtu + ETH_HLEN;
826
827#ifdef CONFIG_FCOE
828 if (dev->features & NETIF_F_FCOE_MTU)
829 pf_max_frame = max_t(int, pf_max_frame,
830 IXGBE_FCOE_JUMBO_FRAME_SIZE);
831
832#endif /* CONFIG_FCOE */
833 if (pf_max_frame > ETH_FRAME_LEN)
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700834 reg &= ~BIT(vf_shift);
Alexander Duyck872844d2012-08-15 02:10:43 +0000835 }
Greg Rose17367272010-01-09 02:25:48 +0000836 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
837
Alexander Duyck58a02be2012-07-20 08:09:17 +0000838 /* enable VF mailbox for further messages */
839 adapter->vfinfo[vf].clear_to_send = true;
840
Greg Rosea985b6c32010-11-18 03:02:52 +0000841 /* Enable counting of spoofed packets in the SSVPC register */
842 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
Jacob Kellerb4f47a42016-04-13 16:08:22 -0700843 reg |= BIT(vf_shift);
Greg Rosea985b6c32010-11-18 03:02:52 +0000844 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
845
Alexander Duyckdbf231a2014-01-15 17:38:41 -0800846 /*
847 * Reset the VFs TDWBAL and TDWBAH registers
848 * which are not cleared by an FLR
849 */
850 for (i = 0; i < q_per_pool; i++) {
851 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
852 IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
853 }
854
Alexander Duyck58a02be2012-07-20 08:09:17 +0000855 /* reply to reset with ack and vf mac address */
Greg Rose35055922013-02-15 05:20:09 +0000856 msgbuf[0] = IXGBE_VF_RESET;
857 if (!is_zero_ether_addr(vf_mac)) {
858 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
859 memcpy(addr, vf_mac, ETH_ALEN);
860 } else {
861 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
862 dev_warn(&adapter->pdev->dev,
863 "VF %d has no MAC address assigned, you may have to assign one manually\n",
864 vf);
865 }
Alexander Duyck58a02be2012-07-20 08:09:17 +0000866
867 /*
868 * Piggyback the multicast filter type so VF can compute the
869 * correct vectors
870 */
871 msgbuf[3] = hw->mac.mc_filter_type;
872 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
873
874 return 0;
875}
876
877static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
878 u32 *msgbuf, u32 vf)
879{
880 u8 *new_mac = ((u8 *)(&msgbuf[1]));
881
882 if (!is_valid_ether_addr(new_mac)) {
883 e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
884 return -1;
885 }
886
chas williams1d96cf92016-01-05 17:30:39 -0500887 if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
dingtianhong4012dda2013-12-30 15:40:50 +0800888 !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
Alexander Duyck58a02be2012-07-20 08:09:17 +0000889 e_warn(drv,
890 "VF %d attempted to override administratively set MAC address\n"
891 "Reload the VF driver to resume operations\n",
892 vf);
893 return -1;
894 }
895
Greg Rose3970c322012-09-25 02:25:30 +0000896 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
Alexander Duyck58a02be2012-07-20 08:09:17 +0000897}
898
899static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
900 u32 *msgbuf, u32 vf)
901{
Alexander Duycke1d0a2a2015-11-02 17:10:19 -0800902 u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
903 u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
Alexander Duyck0efbf122017-11-22 10:57:11 -0800904 u8 tcs = adapter->hw_tcs;
Alexander Duyck58a02be2012-07-20 08:09:17 +0000905
Alexander Duyck107d3012012-10-02 00:17:03 +0000906 if (adapter->vfinfo[vf].pf_vlan || tcs) {
Alexander Duyck58a02be2012-07-20 08:09:17 +0000907 e_warn(drv,
908 "VF %d attempted to override administratively set VLAN configuration\n"
909 "Reload the VF driver to resume operations\n",
910 vf);
911 return -1;
912 }
913
Alexander Duyck4c7f35f2015-11-02 17:10:32 -0800914 /* VLAN 0 is a special case, don't allow it to be removed */
915 if (!vid && !add)
916 return 0;
917
Emil Tantilovd3dec7c2016-03-18 16:11:19 -0700918 return ixgbe_set_vf_vlan(adapter, add, vid, vf);
Alexander Duyck58a02be2012-07-20 08:09:17 +0000919}
920
921static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
922 u32 *msgbuf, u32 vf)
923{
924 u8 *new_mac = ((u8 *)(&msgbuf[1]));
925 int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
926 IXGBE_VT_MSGINFO_SHIFT;
927 int err;
928
Ken Coxa9d2d532016-11-15 13:00:37 -0600929 if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
930 index > 0) {
Alexander Duyck58a02be2012-07-20 08:09:17 +0000931 e_warn(drv,
932 "VF %d requested MACVLAN filter but is administratively denied\n",
933 vf);
934 return -1;
935 }
936
937 /* An non-zero index indicates the VF is setting a filter */
938 if (index) {
939 if (!is_valid_ether_addr(new_mac)) {
940 e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
941 return -1;
942 }
943
944 /*
945 * If the VF is allowed to set MAC filters then turn off
946 * anti-spoofing to avoid false positives.
947 */
Emil Tantilov77f192a2016-03-18 16:11:14 -0700948 if (adapter->vfinfo[vf].spoofchk_enabled) {
949 struct ixgbe_hw *hw = &adapter->hw;
950
951 hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
Emil Tantilov581e0c72016-06-01 18:59:44 -0700952 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
Emil Tantilov77f192a2016-03-18 16:11:14 -0700953 }
Alexander Duyck58a02be2012-07-20 08:09:17 +0000954 }
955
956 err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
957 if (err == -ENOSPC)
958 e_warn(drv,
959 "VF %d has requested a MACVLAN filter but there is no space for it\n",
960 vf);
Greg Rosea3013402012-10-30 00:40:02 +0000961
962 return err < 0;
Greg Rose17367272010-01-09 02:25:48 +0000963}
964
Alexander Duyck374c65d2012-07-20 08:09:22 +0000965static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
966 u32 *msgbuf, u32 vf)
967{
968 int api = msgbuf[1];
969
970 switch (api) {
971 case ixgbe_mbox_api_10:
Alexander Duyckbffb3bc2012-07-20 08:09:37 +0000972 case ixgbe_mbox_api_11:
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +0300973 case ixgbe_mbox_api_12:
Don Skidmore07eea572016-12-15 21:18:32 -0500974 case ixgbe_mbox_api_13:
Alexander Duyck374c65d2012-07-20 08:09:22 +0000975 adapter->vfinfo[vf].vf_api = api;
976 return 0;
977 default:
978 break;
979 }
980
981 e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
982
983 return -1;
984}
985
Alexander Duyckf591cd92012-07-20 08:09:32 +0000986static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
987 u32 *msgbuf, u32 vf)
988{
989 struct net_device *dev = adapter->netdev;
990 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
991 unsigned int default_tc = 0;
Alexander Duyck0efbf122017-11-22 10:57:11 -0800992 u8 num_tcs = adapter->hw_tcs;
Alexander Duyckf591cd92012-07-20 08:09:32 +0000993
994 /* verify the PF is supporting the correct APIs */
995 switch (adapter->vfinfo[vf].vf_api) {
996 case ixgbe_mbox_api_20:
997 case ixgbe_mbox_api_11:
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +0300998 case ixgbe_mbox_api_12:
Don Skidmore07eea572016-12-15 21:18:32 -0500999 case ixgbe_mbox_api_13:
Alexander Duyckf591cd92012-07-20 08:09:32 +00001000 break;
1001 default:
1002 return -1;
1003 }
1004
1005 /* only allow 1 Tx queue for bandwidth limiting */
1006 msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1007 msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1008
1009 /* if TCs > 1 determine which TC belongs to default user priority */
1010 if (num_tcs > 1)
1011 default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
1012
1013 /* notify VF of need for VLAN tag stripping, and correct queue */
1014 if (num_tcs)
1015 msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
1016 else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
1017 msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
1018 else
1019 msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
1020
1021 /* notify VF of default queue */
1022 msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
1023
1024 return 0;
1025}
1026
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +03001027static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
1028{
1029 u32 i, j;
1030 u32 *out_buf = &msgbuf[1];
1031 const u8 *reta = adapter->rss_indir_tbl;
1032 u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter);
1033
1034 /* Check if operation is permitted */
1035 if (!adapter->vfinfo[vf].rss_query_enabled)
1036 return -EPERM;
1037
1038 /* verify the PF is supporting the correct API */
Don Skidmore07eea572016-12-15 21:18:32 -05001039 switch (adapter->vfinfo[vf].vf_api) {
1040 case ixgbe_mbox_api_13:
1041 case ixgbe_mbox_api_12:
1042 break;
1043 default:
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +03001044 return -EOPNOTSUPP;
Don Skidmore07eea572016-12-15 21:18:32 -05001045 }
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +03001046
1047 /* This mailbox command is supported (required) only for 82599 and x540
1048 * VFs which support up to 4 RSS queues. Therefore we will compress the
1049 * RETA by saving only 2 bits from each entry. This way we will be able
1050 * to transfer the whole RETA in a single mailbox operation.
1051 */
1052 for (i = 0; i < reta_size / 16; i++) {
1053 out_buf[i] = 0;
1054 for (j = 0; j < 16; j++)
1055 out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j);
1056 }
1057
1058 return 0;
1059}
1060
Vlad Zolotarov3c0841a2015-03-30 21:35:27 +03001061static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
1062 u32 *msgbuf, u32 vf)
1063{
1064 u32 *rss_key = &msgbuf[1];
1065
1066 /* Check if the operation is permitted */
1067 if (!adapter->vfinfo[vf].rss_query_enabled)
1068 return -EPERM;
1069
1070 /* verify the PF is supporting the correct API */
Don Skidmore07eea572016-12-15 21:18:32 -05001071 switch (adapter->vfinfo[vf].vf_api) {
1072 case ixgbe_mbox_api_13:
1073 case ixgbe_mbox_api_12:
1074 break;
1075 default:
Vlad Zolotarov3c0841a2015-03-30 21:35:27 +03001076 return -EOPNOTSUPP;
Don Skidmore07eea572016-12-15 21:18:32 -05001077 }
Vlad Zolotarov3c0841a2015-03-30 21:35:27 +03001078
Tony Nguyen3dfbfc72017-04-13 07:26:05 -07001079 memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE);
Vlad Zolotarov3c0841a2015-03-30 21:35:27 +03001080
1081 return 0;
1082}
1083
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001084static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
1085 u32 *msgbuf, u32 vf)
1086{
1087 struct ixgbe_hw *hw = &adapter->hw;
1088 int xcast_mode = msgbuf[1];
Don Skidmore07eea572016-12-15 21:18:32 -05001089 u32 vmolr, fctrl, disable, enable;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001090
1091 /* verify the PF is supporting the correct APIs */
1092 switch (adapter->vfinfo[vf].vf_api) {
1093 case ixgbe_mbox_api_12:
Don Skidmore07eea572016-12-15 21:18:32 -05001094 /* promisc introduced in 1.3 version */
1095 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
1096 return -EOPNOTSUPP;
1097 /* Fall threw */
1098 case ixgbe_mbox_api_13:
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001099 break;
1100 default:
1101 return -EOPNOTSUPP;
1102 }
1103
1104 if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
1105 !adapter->vfinfo[vf].trusted) {
1106 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
1107 }
1108
1109 if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
1110 goto out;
1111
1112 switch (xcast_mode) {
1113 case IXGBEVF_XCAST_MODE_NONE:
Don Skidmore07eea572016-12-15 21:18:32 -05001114 disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
1115 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001116 enable = 0;
1117 break;
1118 case IXGBEVF_XCAST_MODE_MULTI:
Don Skidmore07eea572016-12-15 21:18:32 -05001119 disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001120 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
1121 break;
1122 case IXGBEVF_XCAST_MODE_ALLMULTI:
Don Skidmore07eea572016-12-15 21:18:32 -05001123 disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001124 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
1125 break;
Don Skidmore07eea572016-12-15 21:18:32 -05001126 case IXGBEVF_XCAST_MODE_PROMISC:
1127 if (hw->mac.type <= ixgbe_mac_82599EB)
1128 return -EOPNOTSUPP;
1129
1130 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1131 if (!(fctrl & IXGBE_FCTRL_UPE)) {
1132 /* VF promisc requires PF in promisc */
1133 e_warn(drv,
1134 "Enabling VF promisc requires PF in promisc\n");
1135 return -EPERM;
1136 }
1137
1138 disable = 0;
1139 enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
1140 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1141 break;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001142 default:
1143 return -EOPNOTSUPP;
1144 }
1145
1146 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
1147 vmolr &= ~disable;
1148 vmolr |= enable;
1149 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
1150
1151 adapter->vfinfo[vf].xcast_mode = xcast_mode;
1152
1153out:
1154 msgbuf[1] = xcast_mode;
1155
1156 return 0;
1157}
1158
Greg Rose17367272010-01-09 02:25:48 +00001159static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1160{
1161 u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
Emil Tantilovc0509992011-05-07 06:49:18 +00001162 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
Greg Rose17367272010-01-09 02:25:48 +00001163 struct ixgbe_hw *hw = &adapter->hw;
1164 s32 retval;
Greg Rose17367272010-01-09 02:25:48 +00001165
1166 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
1167
Alexander Duyckdcaccc82012-03-28 08:03:38 +00001168 if (retval) {
Emil Tantilov849c4542010-06-03 16:53:41 +00001169 pr_err("Error receiving message from VF\n");
Alexander Duyckdcaccc82012-03-28 08:03:38 +00001170 return retval;
1171 }
Greg Rose17367272010-01-09 02:25:48 +00001172
1173 /* this is a message we already processed, do nothing */
1174 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
Mark Rustade90dd262014-07-22 06:51:08 +00001175 return 0;
Greg Rose17367272010-01-09 02:25:48 +00001176
Alexander Duyckdcaccc82012-03-28 08:03:38 +00001177 /* flush the ack before we write any messages back */
1178 IXGBE_WRITE_FLUSH(hw);
1179
Alexander Duyck374c65d2012-07-20 08:09:22 +00001180 if (msgbuf[0] == IXGBE_VF_RESET)
1181 return ixgbe_vf_reset_msg(adapter, vf);
1182
Greg Rose17367272010-01-09 02:25:48 +00001183 /*
1184 * until the vf completes a virtual function reset it should not be
1185 * allowed to start any configuration.
1186 */
Greg Rose17367272010-01-09 02:25:48 +00001187 if (!adapter->vfinfo[vf].clear_to_send) {
1188 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1189 ixgbe_write_mbx(hw, msgbuf, 1, vf);
Mark Rustade90dd262014-07-22 06:51:08 +00001190 return 0;
Greg Rose17367272010-01-09 02:25:48 +00001191 }
1192
1193 switch ((msgbuf[0] & 0xFFFF)) {
1194 case IXGBE_VF_SET_MAC_ADDR:
Alexander Duyck58a02be2012-07-20 08:09:17 +00001195 retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
Greg Rose17367272010-01-09 02:25:48 +00001196 break;
1197 case IXGBE_VF_SET_MULTICAST:
Alexander Duyck58a02be2012-07-20 08:09:17 +00001198 retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
1199 break;
1200 case IXGBE_VF_SET_VLAN:
1201 retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
Greg Rose17367272010-01-09 02:25:48 +00001202 break;
1203 case IXGBE_VF_SET_LPE:
Alexander Duyck872844d2012-08-15 02:10:43 +00001204 retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
Greg Rose17367272010-01-09 02:25:48 +00001205 break;
Greg Rosea1cbb15c2011-05-13 01:33:48 +00001206 case IXGBE_VF_SET_MACVLAN:
Alexander Duyck58a02be2012-07-20 08:09:17 +00001207 retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
Greg Rosea1cbb15c2011-05-13 01:33:48 +00001208 break;
Alexander Duyck374c65d2012-07-20 08:09:22 +00001209 case IXGBE_VF_API_NEGOTIATE:
1210 retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
1211 break;
Alexander Duyckf591cd92012-07-20 08:09:32 +00001212 case IXGBE_VF_GET_QUEUES:
1213 retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
1214 break;
Vlad Zolotarov4ce37a42015-04-01 11:24:54 +03001215 case IXGBE_VF_GET_RETA:
1216 retval = ixgbe_get_vf_reta(adapter, msgbuf, vf);
1217 break;
Vlad Zolotarov3c0841a2015-03-30 21:35:27 +03001218 case IXGBE_VF_GET_RSS_KEY:
1219 retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
1220 break;
Hiroshi Shimamoto8443c1a42015-08-28 06:59:03 +00001221 case IXGBE_VF_UPDATE_XCAST_MODE:
1222 retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
1223 break;
Greg Rose17367272010-01-09 02:25:48 +00001224 default:
Emil Tantilov396e7992010-07-01 20:05:12 +00001225 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
Greg Rose17367272010-01-09 02:25:48 +00001226 retval = IXGBE_ERR_MBX;
1227 break;
1228 }
1229
1230 /* notify the VF of the results of what it sent us */
1231 if (retval)
1232 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1233 else
1234 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
1235
1236 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
1237
Alexander Duyck374c65d2012-07-20 08:09:22 +00001238 ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
Greg Rose17367272010-01-09 02:25:48 +00001239
1240 return retval;
1241}
1242
1243static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1244{
1245 struct ixgbe_hw *hw = &adapter->hw;
1246 u32 msg = IXGBE_VT_MSGTYPE_NACK;
1247
1248 /* if device isn't clear to send it shouldn't be reading either */
1249 if (!adapter->vfinfo[vf].clear_to_send)
1250 ixgbe_write_mbx(hw, &msg, 1, vf);
1251}
1252
1253void ixgbe_msg_task(struct ixgbe_adapter *adapter)
1254{
1255 struct ixgbe_hw *hw = &adapter->hw;
1256 u32 vf;
1257
1258 for (vf = 0; vf < adapter->num_vfs; vf++) {
1259 /* process any reset requests */
1260 if (!ixgbe_check_for_rst(hw, vf))
1261 ixgbe_vf_reset_event(adapter, vf);
1262
1263 /* process any messages pending */
1264 if (!ixgbe_check_for_msg(hw, vf))
1265 ixgbe_rcv_msg_from_vf(adapter, vf);
1266
1267 /* process any acks */
1268 if (!ixgbe_check_for_ack(hw, vf))
1269 ixgbe_rcv_ack_from_vf(adapter, vf);
1270 }
1271}
1272
Greg Rose767081a2010-01-22 22:46:40 +00001273void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
1274{
1275 struct ixgbe_hw *hw = &adapter->hw;
1276
1277 /* disable transmit and receive for all vfs */
1278 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
1279 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
1280
1281 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
1282 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
1283}
1284
Hiroshi Shimamoto54011e42015-08-28 06:58:33 +00001285static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
1286{
1287 struct ixgbe_hw *hw = &adapter->hw;
1288 u32 ping;
1289
1290 ping = IXGBE_PF_CONTROL_MSG;
1291 if (adapter->vfinfo[vf].clear_to_send)
1292 ping |= IXGBE_VT_MSGTYPE_CTS;
1293 ixgbe_write_mbx(hw, &ping, 1, vf);
1294}
1295
Greg Rose767081a2010-01-22 22:46:40 +00001296void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
1297{
1298 struct ixgbe_hw *hw = &adapter->hw;
1299 u32 ping;
1300 int i;
1301
1302 for (i = 0 ; i < adapter->num_vfs; i++) {
1303 ping = IXGBE_PF_CONTROL_MSG;
1304 if (adapter->vfinfo[i].clear_to_send)
1305 ping |= IXGBE_VT_MSGTYPE_CTS;
1306 ixgbe_write_mbx(hw, &ping, 1, i);
1307 }
1308}
1309
Greg Rose7f016482010-05-04 22:12:06 +00001310int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1311{
1312 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Tony Nguyen6af3d0f2017-04-28 12:42:03 -07001313 s32 retval;
Tony Nguyen27bdc442017-04-12 13:35:22 -07001314
1315 if (vf >= adapter->num_vfs)
Greg Rose7f016482010-05-04 22:12:06 +00001316 return -EINVAL;
Tony Nguyen27bdc442017-04-12 13:35:22 -07001317
Tony Nguyen6af3d0f2017-04-28 12:42:03 -07001318 if (is_valid_ether_addr(mac)) {
Tony Nguyen27bdc442017-04-12 13:35:22 -07001319 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
1320 mac, vf);
1321 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.");
Tony Nguyen6af3d0f2017-04-28 12:42:03 -07001322
1323 retval = ixgbe_set_vf_mac(adapter, vf, mac);
1324 if (retval >= 0) {
1325 adapter->vfinfo[vf].pf_set_mac = true;
1326
1327 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1328 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
1329 dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
1330 }
1331 } else {
1332 dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n");
1333 }
1334 } else if (is_zero_ether_addr(mac)) {
1335 unsigned char *vf_mac_addr =
1336 adapter->vfinfo[vf].vf_mac_addresses;
1337
1338 /* nothing to do */
1339 if (is_zero_ether_addr(vf_mac_addr))
1340 return 0;
1341
1342 dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
1343
1344 retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf);
1345 if (retval >= 0) {
1346 adapter->vfinfo[vf].pf_set_mac = false;
1347 memcpy(vf_mac_addr, mac, ETH_ALEN);
1348 } else {
1349 dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n");
Tony Nguyen27bdc442017-04-12 13:35:22 -07001350 }
1351 } else {
Tony Nguyen6af3d0f2017-04-28 12:42:03 -07001352 retval = -EINVAL;
Greg Rose7f016482010-05-04 22:12:06 +00001353 }
Tony Nguyen27bdc442017-04-12 13:35:22 -07001354
Tony Nguyen6af3d0f2017-04-28 12:42:03 -07001355 return retval;
Greg Rose7f016482010-05-04 22:12:06 +00001356}
1357
Don Skidmore2b509c02014-11-01 01:06:57 +00001358static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
1359 u16 vlan, u8 qos)
1360{
1361 struct ixgbe_hw *hw = &adapter->hw;
Emil Tantilov42ce2c82014-12-10 05:28:51 +00001362 int err;
Don Skidmore2b509c02014-11-01 01:06:57 +00001363
Emil Tantilov42ce2c82014-12-10 05:28:51 +00001364 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
Don Skidmore2b509c02014-11-01 01:06:57 +00001365 if (err)
1366 goto out;
Emil Tantilov42ce2c82014-12-10 05:28:51 +00001367
Alexander Duyck4c7f35f2015-11-02 17:10:32 -08001368 /* Revoke tagless access via VLAN 0 */
1369 ixgbe_set_vf_vlan(adapter, false, 0, vf);
1370
Don Skidmore2b509c02014-11-01 01:06:57 +00001371 ixgbe_set_vmvir(adapter, vlan, qos, vf);
1372 ixgbe_set_vmolr(hw, vf, false);
Don Skidmore9a75a1a2014-11-07 03:53:35 +00001373
1374 /* enable hide vlan on X550 */
1375 if (hw->mac.type >= ixgbe_mac_X550)
1376 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE |
1377 IXGBE_QDE_HIDE_VLAN);
1378
Don Skidmore2b509c02014-11-01 01:06:57 +00001379 adapter->vfinfo[vf].pf_vlan = vlan;
1380 adapter->vfinfo[vf].pf_qos = qos;
1381 dev_info(&adapter->pdev->dev,
1382 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
1383 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1384 dev_warn(&adapter->pdev->dev,
1385 "The VF VLAN has been set, but the PF device is not up.\n");
1386 dev_warn(&adapter->pdev->dev,
1387 "Bring the PF device up before attempting to use the VF device.\n");
1388 }
1389
1390out:
1391 return err;
1392}
1393
1394static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
1395{
1396 struct ixgbe_hw *hw = &adapter->hw;
1397 int err;
1398
1399 err = ixgbe_set_vf_vlan(adapter, false,
1400 adapter->vfinfo[vf].pf_vlan, vf);
Alexander Duyck4c7f35f2015-11-02 17:10:32 -08001401 /* Restore tagless access via VLAN 0 */
1402 ixgbe_set_vf_vlan(adapter, true, 0, vf);
Don Skidmore2b509c02014-11-01 01:06:57 +00001403 ixgbe_clear_vmvir(adapter, vf);
1404 ixgbe_set_vmolr(hw, vf, true);
Emil Tantilov42ce2c82014-12-10 05:28:51 +00001405
1406 /* disable hide VLAN on X550 */
1407 if (hw->mac.type >= ixgbe_mac_X550)
1408 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
1409
Don Skidmore2b509c02014-11-01 01:06:57 +00001410 adapter->vfinfo[vf].pf_vlan = 0;
1411 adapter->vfinfo[vf].pf_qos = 0;
1412
1413 return err;
1414}
1415
Moshe Shemesh79aab092016-09-22 12:11:15 +03001416int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1417 u8 qos, __be16 vlan_proto)
Greg Rose7f016482010-05-04 22:12:06 +00001418{
1419 int err = 0;
1420 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1421
1422 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
1423 return -EINVAL;
Moshe Shemesh79aab092016-09-22 12:11:15 +03001424 if (vlan_proto != htons(ETH_P_8021Q))
1425 return -EPROTONOSUPPORT;
Greg Rose7f016482010-05-04 22:12:06 +00001426 if (vlan || qos) {
Don Skidmore2b509c02014-11-01 01:06:57 +00001427 /* Check if there is already a port VLAN set, if so
1428 * we have to delete the old one first before we
1429 * can set the new one. The usage model had
1430 * previously assumed the user would delete the
1431 * old port VLAN before setting a new one but this
1432 * is not necessarily the case.
1433 */
Greg Rose026ac672013-04-17 20:41:35 +00001434 if (adapter->vfinfo[vf].pf_vlan)
Don Skidmore2b509c02014-11-01 01:06:57 +00001435 err = ixgbe_disable_port_vlan(adapter, vf);
Greg Rose026ac672013-04-17 20:41:35 +00001436 if (err)
1437 goto out;
Don Skidmore2b509c02014-11-01 01:06:57 +00001438 err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos);
Greg Rose7f016482010-05-04 22:12:06 +00001439 } else {
Don Skidmore2b509c02014-11-01 01:06:57 +00001440 err = ixgbe_disable_port_vlan(adapter, vf);
Jacob Kellere7cf7452014-04-09 06:03:10 +00001441 }
Don Skidmore2b509c02014-11-01 01:06:57 +00001442
Greg Rose7f016482010-05-04 22:12:06 +00001443out:
Jacob Kellere7cf7452014-04-09 06:03:10 +00001444 return err;
Greg Rose7f016482010-05-04 22:12:06 +00001445}
1446
Rostislav Pehlivanovc04f90e2016-01-27 18:33:30 +00001447int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
Lior Levyff4ab202011-03-11 02:03:07 +00001448{
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001449 switch (adapter->link_speed) {
Lior Levyff4ab202011-03-11 02:03:07 +00001450 case IXGBE_LINK_SPEED_100_FULL:
1451 return 100;
1452 case IXGBE_LINK_SPEED_1GB_FULL:
1453 return 1000;
1454 case IXGBE_LINK_SPEED_10GB_FULL:
1455 return 10000;
1456 default:
1457 return 0;
1458 }
1459}
1460
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001461static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
Lior Levyff4ab202011-03-11 02:03:07 +00001462{
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001463 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
1464 struct ixgbe_hw *hw = &adapter->hw;
1465 u32 bcnrc_val = 0;
1466 u16 queue, queues_per_pool;
1467 u16 tx_rate = adapter->vfinfo[vf].tx_rate;
Lior Levyff4ab202011-03-11 02:03:07 +00001468
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001469 if (tx_rate) {
1470 /* start with base link speed value */
1471 bcnrc_val = adapter->vf_rate_link_speed;
1472
Lior Levyff4ab202011-03-11 02:03:07 +00001473 /* Calculate the rate factor values to set */
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001474 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1475 bcnrc_val /= tx_rate;
Lior Levyff4ab202011-03-11 02:03:07 +00001476
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001477 /* clear everything but the rate factor */
1478 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1479 IXGBE_RTTBCNRC_RF_DEC_MASK;
1480
1481 /* enable the rate scheduler */
1482 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
Lior Levyff4ab202011-03-11 02:03:07 +00001483 }
1484
Lior Levy7555e832011-06-25 00:09:08 -07001485 /*
1486 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
1487 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
1488 * and 0x004 otherwise.
1489 */
1490 switch (hw->mac.type) {
1491 case ixgbe_mac_82599EB:
1492 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
1493 break;
1494 case ixgbe_mac_X540:
1495 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
1496 break;
1497 default:
1498 break;
1499 }
1500
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001501 /* determine how many queues per pool based on VMDq mask */
1502 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
1503
1504 /* write value for all Tx queues belonging to VF */
1505 for (queue = 0; queue < queues_per_pool; queue++) {
1506 unsigned int reg_idx = (vf * queues_per_pool) + queue;
1507
1508 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
1509 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1510 }
Lior Levyff4ab202011-03-11 02:03:07 +00001511}
1512
1513void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
1514{
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001515 int i;
Lior Levyff4ab202011-03-11 02:03:07 +00001516
1517 /* VF Tx rate limit was not set */
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001518 if (!adapter->vf_rate_link_speed)
Lior Levyff4ab202011-03-11 02:03:07 +00001519 return;
1520
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001521 if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
Lior Levyff4ab202011-03-11 02:03:07 +00001522 adapter->vf_rate_link_speed = 0;
1523 dev_info(&adapter->pdev->dev,
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001524 "Link speed has been changed. VF Transmit rate is disabled\n");
Lior Levyff4ab202011-03-11 02:03:07 +00001525 }
1526
1527 for (i = 0; i < adapter->num_vfs; i++) {
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001528 if (!adapter->vf_rate_link_speed)
Lior Levyff4ab202011-03-11 02:03:07 +00001529 adapter->vfinfo[i].tx_rate = 0;
1530
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001531 ixgbe_set_vf_rate_limit(adapter, i);
Lior Levyff4ab202011-03-11 02:03:07 +00001532 }
1533}
1534
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001535int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
1536 int max_tx_rate)
Greg Rose7f016482010-05-04 22:12:06 +00001537{
Lior Levyff4ab202011-03-11 02:03:07 +00001538 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001539 int link_speed;
Lior Levyff4ab202011-03-11 02:03:07 +00001540
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001541 /* verify VF is active */
1542 if (vf >= adapter->num_vfs)
Lior Levyff4ab202011-03-11 02:03:07 +00001543 return -EINVAL;
1544
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001545 /* verify link is up */
1546 if (!adapter->link_up)
1547 return -EINVAL;
1548
1549 /* verify we are linked at 10Gbps */
1550 link_speed = ixgbe_link_mbps(adapter);
1551 if (link_speed != 10000)
1552 return -EINVAL;
1553
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001554 if (min_tx_rate)
1555 return -EINVAL;
1556
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001557 /* rate limit cannot be less than 10Mbs or greater than link speed */
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001558 if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001559 return -EINVAL;
1560
1561 /* store values */
1562 adapter->vf_rate_link_speed = link_speed;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001563 adapter->vfinfo[vf].tx_rate = max_tx_rate;
Alexander Duyck9f66d3e2012-07-20 08:09:06 +00001564
1565 /* update hardware configuration */
1566 ixgbe_set_vf_rate_limit(adapter, vf);
Lior Levyff4ab202011-03-11 02:03:07 +00001567
1568 return 0;
Greg Rose7f016482010-05-04 22:12:06 +00001569}
1570
Greg Rosede4c7f62011-09-29 05:57:33 +00001571int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
1572{
1573 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Greg Rosede4c7f62011-09-29 05:57:33 +00001574 struct ixgbe_hw *hw = &adapter->hw;
Greg Rosede4c7f62011-09-29 05:57:33 +00001575
Emil Tantilov600a5072014-10-16 15:49:02 +00001576 if (vf >= adapter->num_vfs)
1577 return -EINVAL;
1578
Greg Rosede4c7f62011-09-29 05:57:33 +00001579 adapter->vfinfo[vf].spoofchk_enabled = setting;
1580
Emil Tantilov77f192a2016-03-18 16:11:14 -07001581 /* configure MAC spoofing */
1582 hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
Greg Rosede4c7f62011-09-29 05:57:33 +00001583
Emil Tantilov77f192a2016-03-18 16:11:14 -07001584 /* configure VLAN spoofing */
Emil Tantilovd3dec7c2016-03-18 16:11:19 -07001585 hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
Emil Tantilov77f192a2016-03-18 16:11:14 -07001586
1587 /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
1588 * calling set_ethertype_anti_spoofing for each VF in loop below
1589 */
1590 if (hw->mac.ops.set_ethertype_anti_spoofing) {
1591 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
1592 (IXGBE_ETQF_FILTER_EN |
1593 IXGBE_ETQF_TX_ANTISPOOF |
1594 IXGBE_ETH_P_LLDP));
1595
1596 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
1597 (IXGBE_ETQF_FILTER_EN |
1598 IXGBE_ETQF_TX_ANTISPOOF |
1599 ETH_P_PAUSE));
1600
1601 hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
Greg Rosede4c7f62011-09-29 05:57:33 +00001602 }
1603
1604 return 0;
1605}
1606
Vlad Zolotarove65ce0d2015-03-30 21:35:24 +03001607int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
1608 bool setting)
1609{
1610 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1611
1612 /* This operation is currently supported only for 82599 and x540
1613 * devices.
1614 */
1615 if (adapter->hw.mac.type < ixgbe_mac_82599EB ||
1616 adapter->hw.mac.type >= ixgbe_mac_X550)
1617 return -EOPNOTSUPP;
1618
1619 if (vf >= adapter->num_vfs)
1620 return -EINVAL;
1621
1622 adapter->vfinfo[vf].rss_query_enabled = setting;
1623
1624 return 0;
1625}
1626
Hiroshi Shimamoto54011e42015-08-28 06:58:33 +00001627int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
1628{
1629 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1630
1631 if (vf >= adapter->num_vfs)
1632 return -EINVAL;
1633
1634 /* nothing to do */
1635 if (adapter->vfinfo[vf].trusted == setting)
1636 return 0;
1637
1638 adapter->vfinfo[vf].trusted = setting;
1639
1640 /* reset VF to reconfigure features */
1641 adapter->vfinfo[vf].clear_to_send = false;
1642 ixgbe_ping_vf(adapter, vf);
1643
1644 e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
1645
1646 return 0;
1647}
1648
Greg Rose7f016482010-05-04 22:12:06 +00001649int ixgbe_ndo_get_vf_config(struct net_device *netdev,
1650 int vf, struct ifla_vf_info *ivi)
1651{
1652 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1653 if (vf >= adapter->num_vfs)
1654 return -EINVAL;
1655 ivi->vf = vf;
1656 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001657 ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
1658 ivi->min_tx_rate = 0;
Greg Rose7f016482010-05-04 22:12:06 +00001659 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
1660 ivi->qos = adapter->vfinfo[vf].pf_qos;
Greg Rosede4c7f62011-09-29 05:57:33 +00001661 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
Vlad Zolotarove65ce0d2015-03-30 21:35:24 +03001662 ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
Hiroshi Shimamoto54011e42015-08-28 06:58:33 +00001663 ivi->trusted = adapter->vfinfo[vf].trusted;
Greg Rose7f016482010-05-04 22:12:06 +00001664 return 0;
1665}