Jeff Kirsher | 51dce24 | 2018-04-26 08:08:09 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Copyright(c) 1999 - 2018 Intel Corporation. */ |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 3 | |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 4 | #include <linux/types.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/pci.h> |
| 7 | #include <linux/netdevice.h> |
| 8 | #include <linux/vmalloc.h> |
| 9 | #include <linux/string.h> |
| 10 | #include <linux/in.h> |
| 11 | #include <linux/ip.h> |
| 12 | #include <linux/tcp.h> |
| 13 | #include <linux/ipv6.h> |
Don Skidmore | aa2bacb | 2015-04-09 22:03:22 -0700 | [diff] [blame] | 14 | #include <linux/if_bridge.h> |
Patrick McHardy | f646968 | 2013-04-19 02:04:27 +0000 | [diff] [blame] | 15 | #ifdef NETIF_F_HW_VLAN_CTAG_TX |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 16 | #include <linux/if_vlan.h> |
| 17 | #endif |
| 18 | |
| 19 | #include "ixgbe.h" |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 20 | #include "ixgbe_type.h" |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 21 | #include "ixgbe_sriov.h" |
| 22 | |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 23 | #ifdef CONFIG_PCI_IOV |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 24 | static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, |
| 25 | unsigned int num_vfs) |
Emil Tantilov | 2bc0972 | 2017-01-20 14:11:45 -0800 | [diff] [blame] | 26 | { |
| 27 | struct ixgbe_hw *hw = &adapter->hw; |
| 28 | struct vf_macvlans *mv_list; |
| 29 | int num_vf_macvlans, i; |
| 30 | |
| 31 | num_vf_macvlans = hw->mac.num_rar_entries - |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 32 | (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); |
Emil Tantilov | 2bc0972 | 2017-01-20 14:11:45 -0800 | [diff] [blame] | 33 | if (!num_vf_macvlans) |
| 34 | return; |
| 35 | |
| 36 | mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), |
| 37 | GFP_KERNEL); |
| 38 | if (mv_list) { |
| 39 | /* Initialize list of VF macvlans */ |
| 40 | INIT_LIST_HEAD(&adapter->vf_mvs.l); |
| 41 | for (i = 0; i < num_vf_macvlans; i++) { |
| 42 | mv_list[i].vf = -1; |
| 43 | mv_list[i].free = true; |
| 44 | list_add(&mv_list[i].l, &adapter->vf_mvs.l); |
| 45 | } |
| 46 | adapter->mv_list = mv_list; |
| 47 | } |
| 48 | } |
| 49 | |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 50 | static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, |
| 51 | unsigned int num_vfs) |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 52 | { |
| 53 | struct ixgbe_hw *hw = &adapter->hw; |
Emil Tantilov | da614d0 | 2017-01-20 14:11:50 -0800 | [diff] [blame] | 54 | int i; |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 55 | |
Tony Nguyen | fabf1bc | 2018-07-30 15:52:48 -0700 | [diff] [blame] | 56 | if (adapter->xdp_prog) { |
| 57 | e_warn(probe, "SRIOV is not supported with XDP\n"); |
| 58 | return -EINVAL; |
| 59 | } |
| 60 | |
Alexander Duyck | 73079ea | 2012-07-14 06:48:49 +0000 | [diff] [blame] | 61 | /* Enable VMDq flag so device will be set in VM mode */ |
Alexander Duyck | a8e87d9 | 2017-11-22 10:57:05 -0800 | [diff] [blame] | 62 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | |
| 63 | IXGBE_FLAG_VMDQ_ENABLED; |
Alexander Duyck | 73079ea | 2012-07-14 06:48:49 +0000 | [diff] [blame] | 64 | |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 65 | /* Allocate memory for per VF control structures */ |
| 66 | adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), |
| 67 | GFP_KERNEL); |
Emil Tantilov | da614d0 | 2017-01-20 14:11:50 -0800 | [diff] [blame] | 68 | if (!adapter->vfinfo) |
| 69 | return -ENOMEM; |
| 70 | |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 71 | adapter->num_vfs = num_vfs; |
| 72 | |
| 73 | ixgbe_alloc_vf_macvlans(adapter, num_vfs); |
| 74 | adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; |
Emil Tantilov | da614d0 | 2017-01-20 14:11:50 -0800 | [diff] [blame] | 75 | |
John Fastabend | 815cccb | 2012-10-24 08:13:09 +0000 | [diff] [blame] | 76 | /* Initialize default switching mode VEB */ |
| 77 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); |
Don Skidmore | aa2bacb | 2015-04-09 22:03:22 -0700 | [diff] [blame] | 78 | adapter->bridge_mode = BRIDGE_MODE_VEB; |
John Fastabend | 815cccb | 2012-10-24 08:13:09 +0000 | [diff] [blame] | 79 | |
Emil Tantilov | da614d0 | 2017-01-20 14:11:50 -0800 | [diff] [blame] | 80 | /* limit trafffic classes based on VFs enabled */ |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 81 | if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) { |
Emil Tantilov | da614d0 | 2017-01-20 14:11:50 -0800 | [diff] [blame] | 82 | adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; |
| 83 | adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 84 | } else if (num_vfs < 32) { |
Emil Tantilov | da614d0 | 2017-01-20 14:11:50 -0800 | [diff] [blame] | 85 | adapter->dcb_cfg.num_tcs.pg_tcs = 4; |
| 86 | adapter->dcb_cfg.num_tcs.pfc_tcs = 4; |
| 87 | } else { |
| 88 | adapter->dcb_cfg.num_tcs.pg_tcs = 1; |
| 89 | adapter->dcb_cfg.num_tcs.pfc_tcs = 1; |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 90 | } |
| 91 | |
Emil Tantilov | da614d0 | 2017-01-20 14:11:50 -0800 | [diff] [blame] | 92 | /* Disable RSC when in SR-IOV mode */ |
| 93 | adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | |
| 94 | IXGBE_FLAG2_RSC_ENABLED); |
| 95 | |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 96 | for (i = 0; i < num_vfs; i++) { |
Emil Tantilov | da614d0 | 2017-01-20 14:11:50 -0800 | [diff] [blame] | 97 | /* enable spoof checking for all VFs */ |
| 98 | adapter->vfinfo[i].spoofchk_enabled = true; |
| 99 | |
| 100 | /* We support VF RSS querying only for 82599 and x540 |
| 101 | * devices at the moment. These devices share RSS |
| 102 | * indirection table and RSS hash key with PF therefore |
| 103 | * we want to disable the querying by default. |
| 104 | */ |
| 105 | adapter->vfinfo[i].rss_query_enabled = 0; |
| 106 | |
| 107 | /* Untrust all VFs */ |
| 108 | adapter->vfinfo[i].trusted = false; |
| 109 | |
| 110 | /* set the default xcast mode */ |
| 111 | adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; |
| 112 | } |
| 113 | |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 114 | e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs); |
Emil Tantilov | da614d0 | 2017-01-20 14:11:50 -0800 | [diff] [blame] | 115 | return 0; |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 116 | } |
| 117 | |
Mark Rustad | 988d130 | 2015-10-30 15:29:34 -0700 | [diff] [blame] | 118 | /** |
| 119 | * ixgbe_get_vfs - Find and take references to all vf devices |
| 120 | * @adapter: Pointer to adapter struct |
| 121 | */ |
| 122 | static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) |
| 123 | { |
| 124 | struct pci_dev *pdev = adapter->pdev; |
| 125 | u16 vendor = pdev->vendor; |
| 126 | struct pci_dev *vfdev; |
| 127 | int vf = 0; |
| 128 | u16 vf_id; |
| 129 | int pos; |
| 130 | |
| 131 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); |
| 132 | if (!pos) |
| 133 | return; |
| 134 | pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); |
| 135 | |
| 136 | vfdev = pci_get_device(vendor, vf_id, NULL); |
| 137 | for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { |
| 138 | if (!vfdev->is_virtfn) |
| 139 | continue; |
| 140 | if (vfdev->physfn != pdev) |
| 141 | continue; |
| 142 | if (vf >= adapter->num_vfs) |
| 143 | continue; |
| 144 | pci_dev_get(vfdev); |
| 145 | adapter->vfinfo[vf].vfdev = vfdev; |
| 146 | ++vf; |
| 147 | } |
| 148 | } |
| 149 | |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 150 | /* Note this function is called when the user wants to enable SR-IOV |
| 151 | * VFs using the now deprecated module parameter |
| 152 | */ |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 153 | void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 154 | { |
| 155 | int pre_existing_vfs = 0; |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 156 | unsigned int num_vfs; |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 157 | |
| 158 | pre_existing_vfs = pci_num_vf(adapter->pdev); |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 159 | if (!pre_existing_vfs && !max_vfs) |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 160 | return; |
| 161 | |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 162 | /* If there are pre-existing VFs then we have to force |
| 163 | * use of that many - over ride any module parameter value. |
| 164 | * This may result from the user unloading the PF driver |
| 165 | * while VFs were assigned to guest VMs or because the VFs |
| 166 | * have been created via the new PCI SR-IOV sysfs interface. |
| 167 | */ |
| 168 | if (pre_existing_vfs) { |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 169 | num_vfs = pre_existing_vfs; |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 170 | dev_warn(&adapter->pdev->dev, |
| 171 | "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); |
| 172 | } else { |
| 173 | int err; |
| 174 | /* |
| 175 | * The 82599 supports up to 64 VFs per physical function |
| 176 | * but this implementation limits allocation to 63 so that |
| 177 | * basic networking resources are still available to the |
Joe Perches | dbedd44 | 2015-03-06 20:49:12 -0800 | [diff] [blame] | 178 | * physical function. If the user requests greater than |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 179 | * 63 VFs then it is an error - reset to default of zero. |
| 180 | */ |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 181 | num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT); |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 182 | |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 183 | err = pci_enable_sriov(adapter->pdev, num_vfs); |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 184 | if (err) { |
| 185 | e_err(probe, "Failed to enable PCI sriov: %d\n", err); |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 186 | return; |
| 187 | } |
| 188 | } |
| 189 | |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 190 | if (!__ixgbe_enable_sriov(adapter, num_vfs)) { |
Mark Rustad | 988d130 | 2015-10-30 15:29:34 -0700 | [diff] [blame] | 191 | ixgbe_get_vfs(adapter); |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 192 | return; |
Mark Rustad | 988d130 | 2015-10-30 15:29:34 -0700 | [diff] [blame] | 193 | } |
Greg Rose | 66dcfd7 | 2012-12-11 08:26:38 +0000 | [diff] [blame] | 194 | |
| 195 | /* If we have gotten to this point then there is no memory available |
| 196 | * to manage the VF devices - print message and bail. |
| 197 | */ |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 198 | e_err(probe, "Unable to allocate memory for VF Data Storage - " |
| 199 | "SRIOV disabled\n"); |
Alexander Duyck | 99d7448 | 2012-05-09 08:09:25 +0000 | [diff] [blame] | 200 | ixgbe_disable_sriov(adapter); |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 201 | } |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 202 | |
Alexander Duyck | 9297127 | 2012-05-23 02:58:40 +0000 | [diff] [blame] | 203 | #endif /* #ifdef CONFIG_PCI_IOV */ |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 204 | int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 205 | { |
Mark Rustad | 988d130 | 2015-10-30 15:29:34 -0700 | [diff] [blame] | 206 | unsigned int num_vfs = adapter->num_vfs, vf; |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 207 | int rss; |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 208 | |
Alexander Duyck | d773d13 | 2012-05-05 05:32:26 +0000 | [diff] [blame] | 209 | /* set num VFs to 0 to prevent access to vfinfo */ |
| 210 | adapter->num_vfs = 0; |
| 211 | |
Mark Rustad | 988d130 | 2015-10-30 15:29:34 -0700 | [diff] [blame] | 212 | /* put the reference to all of the vf devices */ |
| 213 | for (vf = 0; vf < num_vfs; ++vf) { |
| 214 | struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; |
| 215 | |
| 216 | if (!vfdev) |
| 217 | continue; |
| 218 | adapter->vfinfo[vf].vfdev = NULL; |
| 219 | pci_dev_put(vfdev); |
| 220 | } |
| 221 | |
Alexander Duyck | d773d13 | 2012-05-05 05:32:26 +0000 | [diff] [blame] | 222 | /* free VF control structures */ |
| 223 | kfree(adapter->vfinfo); |
| 224 | adapter->vfinfo = NULL; |
| 225 | |
| 226 | /* free macvlan list */ |
| 227 | kfree(adapter->mv_list); |
| 228 | adapter->mv_list = NULL; |
| 229 | |
Alexander Duyck | 99d7448 | 2012-05-09 08:09:25 +0000 | [diff] [blame] | 230 | /* if SR-IOV is already disabled then there is nothing to do */ |
| 231 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 232 | return 0; |
Alexander Duyck | 99d7448 | 2012-05-09 08:09:25 +0000 | [diff] [blame] | 233 | |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 234 | #ifdef CONFIG_PCI_IOV |
Alexander Duyck | 9297127 | 2012-05-23 02:58:40 +0000 | [diff] [blame] | 235 | /* |
| 236 | * If our VFs are assigned we cannot shut down SR-IOV |
| 237 | * without causing issues, so just leave the hardware |
| 238 | * available but disabled |
| 239 | */ |
Alexander Duyck | e507d0c | 2013-03-26 00:03:21 +0000 | [diff] [blame] | 240 | if (pci_vfs_assigned(adapter->pdev)) { |
Alexander Duyck | 9297127 | 2012-05-23 02:58:40 +0000 | [diff] [blame] | 241 | e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 242 | return -EPERM; |
David S. Miller | d47e12d | 2012-07-22 12:36:41 -0700 | [diff] [blame] | 243 | } |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 244 | /* disable iov and allow time for transactions to clear */ |
| 245 | pci_disable_sriov(adapter->pdev); |
| 246 | #endif |
| 247 | |
Alexander Duyck | 1d9c0bf | 2012-05-05 05:32:21 +0000 | [diff] [blame] | 248 | /* Disable VMDq flag so device will be set in VM mode */ |
Alexander Duyck | 8315ef6 | 2018-04-03 17:16:45 -0400 | [diff] [blame] | 249 | if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) { |
Alexander Duyck | 1d9c0bf | 2012-05-05 05:32:21 +0000 | [diff] [blame] | 250 | adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; |
John Fastabend | 2a47fa4 | 2013-11-06 09:54:52 -0800 | [diff] [blame] | 251 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; |
Don Skidmore | 0f9b232 | 2014-11-18 09:35:08 +0000 | [diff] [blame] | 252 | rss = min_t(int, ixgbe_max_rss_indices(adapter), |
| 253 | num_online_cpus()); |
John Fastabend | 2a47fa4 | 2013-11-06 09:54:52 -0800 | [diff] [blame] | 254 | } else { |
| 255 | rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); |
| 256 | } |
Alexander Duyck | 1d9c0bf | 2012-05-05 05:32:21 +0000 | [diff] [blame] | 257 | |
John Fastabend | 2a47fa4 | 2013-11-06 09:54:52 -0800 | [diff] [blame] | 258 | adapter->ring_feature[RING_F_VMDQ].offset = 0; |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 259 | adapter->ring_feature[RING_F_RSS].limit = rss; |
| 260 | |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 261 | /* take a breather then clean up driver data */ |
| 262 | msleep(100); |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 263 | return 0; |
| 264 | } |
| 265 | |
| 266 | static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) |
| 267 | { |
| 268 | #ifdef CONFIG_PCI_IOV |
| 269 | struct ixgbe_adapter *adapter = pci_get_drvdata(dev); |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 270 | int pre_existing_vfs = pci_num_vf(dev); |
Alexander Duyck | 4e039c1 | 2017-11-22 10:56:40 -0800 | [diff] [blame] | 271 | int err = 0, num_rx_pools, i, limit; |
| 272 | u8 num_tc; |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 273 | |
| 274 | if (pre_existing_vfs && pre_existing_vfs != num_vfs) |
| 275 | err = ixgbe_disable_sriov(adapter); |
| 276 | else if (pre_existing_vfs && pre_existing_vfs == num_vfs) |
Mark Rustad | e90dd26 | 2014-07-22 06:51:08 +0000 | [diff] [blame] | 277 | return num_vfs; |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 278 | |
| 279 | if (err) |
Mark Rustad | e90dd26 | 2014-07-22 06:51:08 +0000 | [diff] [blame] | 280 | return err; |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 281 | |
Jacob Keller | aac2f1b | 2014-08-21 06:17:59 +0000 | [diff] [blame] | 282 | /* While the SR-IOV capability structure reports total VFs to be 64, |
Usha Ketineni | b5d8acb | 2016-12-23 10:08:14 -0800 | [diff] [blame] | 283 | * we limit the actual number allocated as below based on two factors. |
| 284 | * Num_TCs MAX_VFs |
| 285 | * 1 63 |
| 286 | * <=4 31 |
| 287 | * >4 15 |
Jacob Keller | aac2f1b | 2014-08-21 06:17:59 +0000 | [diff] [blame] | 288 | * First, we reserve some transmit/receive resources for the PF. |
| 289 | * Second, VMDQ also uses the same pools that SR-IOV does. We need to |
| 290 | * account for this, so that we don't accidentally allocate more VFs |
| 291 | * than we have available pools. The PCI bus driver already checks for |
| 292 | * other values out of range. |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 293 | */ |
Alexander Duyck | 0efbf12 | 2017-11-22 10:57:11 -0800 | [diff] [blame] | 294 | num_tc = adapter->hw_tcs; |
Alexander Duyck | 8315ef6 | 2018-04-03 17:16:45 -0400 | [diff] [blame] | 295 | num_rx_pools = bitmap_weight(adapter->fwd_bitmask, |
| 296 | adapter->num_rx_pools); |
Alexander Duyck | 4e039c1 | 2017-11-22 10:56:40 -0800 | [diff] [blame] | 297 | limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : |
| 298 | (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 299 | |
Alexander Duyck | 4e039c1 | 2017-11-22 10:56:40 -0800 | [diff] [blame] | 300 | if (num_vfs > (limit - num_rx_pools)) { |
| 301 | e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n", |
| 302 | num_tc, num_rx_pools - 1, limit - num_rx_pools); |
| 303 | return -EPERM; |
Usha Ketineni | b5d8acb | 2016-12-23 10:08:14 -0800 | [diff] [blame] | 304 | } |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 305 | |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 306 | err = __ixgbe_enable_sriov(adapter, num_vfs); |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 307 | if (err) |
Mark Rustad | e90dd26 | 2014-07-22 06:51:08 +0000 | [diff] [blame] | 308 | return err; |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 309 | |
Emil Tantilov | 5c11f00 | 2017-01-20 14:11:56 -0800 | [diff] [blame] | 310 | for (i = 0; i < num_vfs; i++) |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 311 | ixgbe_vf_configuration(dev, (i | 0x10000000)); |
| 312 | |
Emil Tantilov | 0c339bf | 2016-09-09 12:59:10 -0700 | [diff] [blame] | 313 | /* reset before enabling SRIOV to avoid mailbox issues */ |
| 314 | ixgbe_sriov_reinit(adapter); |
| 315 | |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 316 | err = pci_enable_sriov(dev, num_vfs); |
| 317 | if (err) { |
| 318 | e_dev_warn("Failed to enable PCI sriov: %d\n", err); |
Mark Rustad | e90dd26 | 2014-07-22 06:51:08 +0000 | [diff] [blame] | 319 | return err; |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 320 | } |
Mark Rustad | 988d130 | 2015-10-30 15:29:34 -0700 | [diff] [blame] | 321 | ixgbe_get_vfs(adapter); |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 322 | |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 323 | return num_vfs; |
Mark Rustad | e90dd26 | 2014-07-22 06:51:08 +0000 | [diff] [blame] | 324 | #else |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 325 | return 0; |
Mark Rustad | e90dd26 | 2014-07-22 06:51:08 +0000 | [diff] [blame] | 326 | #endif |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 327 | } |
| 328 | |
| 329 | static int ixgbe_pci_sriov_disable(struct pci_dev *dev) |
| 330 | { |
| 331 | struct ixgbe_adapter *adapter = pci_get_drvdata(dev); |
| 332 | int err; |
Don Skidmore | 8f48f5b | 2013-11-22 04:27:23 +0000 | [diff] [blame] | 333 | #ifdef CONFIG_PCI_IOV |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 334 | u32 current_flags = adapter->flags; |
Alexander Duyck | 2097db7 | 2017-11-22 10:56:22 -0800 | [diff] [blame] | 335 | int prev_num_vf = pci_num_vf(dev); |
Don Skidmore | 8f48f5b | 2013-11-22 04:27:23 +0000 | [diff] [blame] | 336 | #endif |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 337 | |
| 338 | err = ixgbe_disable_sriov(adapter); |
| 339 | |
| 340 | /* Only reinit if no error and state changed */ |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 341 | #ifdef CONFIG_PCI_IOV |
Alexander Duyck | 2097db7 | 2017-11-22 10:56:22 -0800 | [diff] [blame] | 342 | if (!err && (current_flags != adapter->flags || |
| 343 | prev_num_vf != pci_num_vf(dev))) |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 344 | ixgbe_sriov_reinit(adapter); |
| 345 | #endif |
Greg Rose | da36b64 | 2012-12-11 08:26:43 +0000 | [diff] [blame] | 346 | |
| 347 | return err; |
| 348 | } |
| 349 | |
| 350 | int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) |
| 351 | { |
| 352 | if (num_vfs == 0) |
| 353 | return ixgbe_pci_sriov_disable(dev); |
| 354 | else |
| 355 | return ixgbe_pci_sriov_enable(dev, num_vfs); |
Greg Rose | c6bda30 | 2011-08-24 02:37:55 +0000 | [diff] [blame] | 356 | } |
| 357 | |
Emil Tantilov | 5d5b7c3 | 2010-10-12 22:20:59 +0000 | [diff] [blame] | 358 | static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 359 | u32 *msgbuf, u32 vf) |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 360 | { |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 361 | int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) |
| 362 | >> IXGBE_VT_MSGINFO_SHIFT; |
| 363 | u16 *hash_list = (u16 *)&msgbuf[1]; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 364 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; |
Greg Rose | 8a07a22 | 2010-05-05 19:57:30 +0000 | [diff] [blame] | 365 | struct ixgbe_hw *hw = &adapter->hw; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 366 | int i; |
Greg Rose | 8a07a22 | 2010-05-05 19:57:30 +0000 | [diff] [blame] | 367 | u32 vector_bit; |
| 368 | u32 vector_reg; |
| 369 | u32 mta_reg; |
Jacob Keller | b335e75 | 2014-03-25 07:45:27 +0000 | [diff] [blame] | 370 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 371 | |
| 372 | /* only so many hash values supported */ |
| 373 | entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); |
| 374 | |
| 375 | /* |
| 376 | * salt away the number of multi cast addresses assigned |
| 377 | * to this VF for later use to restore when the PF multi cast |
| 378 | * list changes |
| 379 | */ |
| 380 | vfinfo->num_vf_mc_hashes = entries; |
| 381 | |
| 382 | /* |
| 383 | * VFs are limited to using the MTA hash table for their multicast |
| 384 | * addresses |
| 385 | */ |
| 386 | for (i = 0; i < entries; i++) { |
Joe Perches | e81a1ba | 2010-11-14 17:04:33 +0000 | [diff] [blame] | 387 | vfinfo->vf_mc_hashes[i] = hash_list[i]; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 388 | } |
| 389 | |
Greg Rose | 8a07a22 | 2010-05-05 19:57:30 +0000 | [diff] [blame] | 390 | for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { |
| 391 | vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; |
| 392 | vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; |
| 393 | mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); |
Jacob Keller | b4f47a4 | 2016-04-13 16:08:22 -0700 | [diff] [blame] | 394 | mta_reg |= BIT(vector_bit); |
Greg Rose | 8a07a22 | 2010-05-05 19:57:30 +0000 | [diff] [blame] | 395 | IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); |
| 396 | } |
Jacob Keller | b335e75 | 2014-03-25 07:45:27 +0000 | [diff] [blame] | 397 | vmolr |= IXGBE_VMOLR_ROMPE; |
| 398 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 399 | |
| 400 | return 0; |
| 401 | } |
| 402 | |
Jacob Keller | b335e75 | 2014-03-25 07:45:27 +0000 | [diff] [blame] | 403 | #ifdef CONFIG_PCI_IOV |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 404 | void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) |
| 405 | { |
| 406 | struct ixgbe_hw *hw = &adapter->hw; |
| 407 | struct vf_data_storage *vfinfo; |
| 408 | int i, j; |
| 409 | u32 vector_bit; |
| 410 | u32 vector_reg; |
| 411 | u32 mta_reg; |
| 412 | |
| 413 | for (i = 0; i < adapter->num_vfs; i++) { |
Jacob Keller | b335e75 | 2014-03-25 07:45:27 +0000 | [diff] [blame] | 414 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 415 | vfinfo = &adapter->vfinfo[i]; |
| 416 | for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { |
| 417 | hw->addr_ctrl.mta_in_use++; |
| 418 | vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; |
| 419 | vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; |
| 420 | mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); |
Jacob Keller | b4f47a4 | 2016-04-13 16:08:22 -0700 | [diff] [blame] | 421 | mta_reg |= BIT(vector_bit); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 422 | IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); |
| 423 | } |
Jacob Keller | b335e75 | 2014-03-25 07:45:27 +0000 | [diff] [blame] | 424 | |
| 425 | if (vfinfo->num_vf_mc_hashes) |
| 426 | vmolr |= IXGBE_VMOLR_ROMPE; |
| 427 | else |
| 428 | vmolr &= ~IXGBE_VMOLR_ROMPE; |
| 429 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 430 | } |
Greg Rose | a1cbb15c | 2011-05-13 01:33:48 +0000 | [diff] [blame] | 431 | |
| 432 | /* Restore any VF macvlans */ |
Jacob Keller | 5d7daa3 | 2014-03-29 06:51:25 +0000 | [diff] [blame] | 433 | ixgbe_full_sync_mac_table(adapter); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 434 | } |
Jacob Keller | b335e75 | 2014-03-25 07:45:27 +0000 | [diff] [blame] | 435 | #endif |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 436 | |
Emil Tantilov | 5d5b7c3 | 2010-10-12 22:20:59 +0000 | [diff] [blame] | 437 | static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, |
| 438 | u32 vf) |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 439 | { |
Alexander Duyck | b6488b6 | 2015-11-02 17:10:01 -0800 | [diff] [blame] | 440 | struct ixgbe_hw *hw = &adapter->hw; |
| 441 | int err; |
| 442 | |
Alexander Duyck | b6488b6 | 2015-11-02 17:10:01 -0800 | [diff] [blame] | 443 | /* If VLAN overlaps with one the PF is currently monitoring make |
| 444 | * sure that we are able to allocate a VLVF entry. This may be |
| 445 | * redundant but it guarantees PF will maintain visibility to |
| 446 | * the VLAN. |
| 447 | */ |
| 448 | if (add && test_bit(vid, adapter->active_vlans)) { |
| 449 | err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); |
| 450 | if (err) |
| 451 | return err; |
| 452 | } |
| 453 | |
| 454 | err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); |
| 455 | |
Alexander Duyck | e1d0a2a | 2015-11-02 17:10:19 -0800 | [diff] [blame] | 456 | if (add && !err) |
| 457 | return err; |
| 458 | |
| 459 | /* If we failed to add the VF VLAN or we are removing the VF VLAN |
| 460 | * we may need to drop the PF pool bit in order to allow us to free |
| 461 | * up the VLVF resources. |
| 462 | */ |
| 463 | if (test_bit(vid, adapter->active_vlans) || |
| 464 | (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) |
| 465 | ixgbe_update_pf_promisc_vlvf(adapter, vid); |
| 466 | |
Alexander Duyck | b6488b6 | 2015-11-02 17:10:01 -0800 | [diff] [blame] | 467 | return err; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 468 | } |
| 469 | |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 470 | static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) |
Greg Rose | e9f9807 | 2011-01-26 01:06:07 +0000 | [diff] [blame] | 471 | { |
| 472 | struct ixgbe_hw *hw = &adapter->hw; |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 473 | int max_frame = msgbuf[1]; |
Greg Rose | e9f9807 | 2011-01-26 01:06:07 +0000 | [diff] [blame] | 474 | u32 max_frs; |
Greg Rose | e9f9807 | 2011-01-26 01:06:07 +0000 | [diff] [blame] | 475 | |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 476 | /* |
| 477 | * For 82599EB we have to keep all PFs and VFs operating with |
| 478 | * the same max_frame value in order to avoid sending an oversize |
| 479 | * frame to a VF. In order to guarantee this is handled correctly |
| 480 | * for all cases we have several special exceptions to take into |
| 481 | * account before we can enable the VF for receive |
| 482 | */ |
| 483 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
| 484 | struct net_device *dev = adapter->netdev; |
| 485 | int pf_max_frame = dev->mtu + ETH_HLEN; |
| 486 | u32 reg_offset, vf_shift, vfre; |
| 487 | s32 err = 0; |
Greg Rose | e9f9807 | 2011-01-26 01:06:07 +0000 | [diff] [blame] | 488 | |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 489 | #ifdef CONFIG_FCOE |
| 490 | if (dev->features & NETIF_F_FCOE_MTU) |
| 491 | pf_max_frame = max_t(int, pf_max_frame, |
| 492 | IXGBE_FCOE_JUMBO_FRAME_SIZE); |
| 493 | |
| 494 | #endif /* CONFIG_FCOE */ |
Alexander Duyck | bffb3bc | 2012-07-20 08:09:37 +0000 | [diff] [blame] | 495 | switch (adapter->vfinfo[vf].vf_api) { |
| 496 | case ixgbe_mbox_api_11: |
Vlad Zolotarov | 4ce37a4 | 2015-04-01 11:24:54 +0300 | [diff] [blame] | 497 | case ixgbe_mbox_api_12: |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 498 | case ixgbe_mbox_api_13: |
Shannon Nelson | 7269824 | 2018-08-13 11:43:42 -0700 | [diff] [blame] | 499 | case ixgbe_mbox_api_14: |
Tony Nguyen | 93df946 | 2017-05-31 04:43:47 -0700 | [diff] [blame] | 500 | /* Version 1.1 supports jumbo frames on VFs if PF has |
Alexander Duyck | bffb3bc | 2012-07-20 08:09:37 +0000 | [diff] [blame] | 501 | * jumbo frames enabled which means legacy VFs are |
| 502 | * disabled |
| 503 | */ |
| 504 | if (pf_max_frame > ETH_FRAME_LEN) |
| 505 | break; |
Tony Nguyen | 93df946 | 2017-05-31 04:43:47 -0700 | [diff] [blame] | 506 | /* fall through */ |
Alexander Duyck | bffb3bc | 2012-07-20 08:09:37 +0000 | [diff] [blame] | 507 | default: |
Tony Nguyen | 93df946 | 2017-05-31 04:43:47 -0700 | [diff] [blame] | 508 | /* If the PF or VF are running w/ jumbo frames enabled |
Alexander Duyck | bffb3bc | 2012-07-20 08:09:37 +0000 | [diff] [blame] | 509 | * we need to shut down the VF Rx path as we cannot |
| 510 | * support jumbo frames on legacy VFs |
| 511 | */ |
| 512 | if ((pf_max_frame > ETH_FRAME_LEN) || |
| 513 | (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) |
| 514 | err = -EINVAL; |
| 515 | break; |
| 516 | } |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 517 | |
| 518 | /* determine VF receive enable location */ |
| 519 | vf_shift = vf % 32; |
| 520 | reg_offset = vf / 32; |
| 521 | |
| 522 | /* enable or disable receive depending on error */ |
| 523 | vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); |
| 524 | if (err) |
Jacob Keller | b4f47a4 | 2016-04-13 16:08:22 -0700 | [diff] [blame] | 525 | vfre &= ~BIT(vf_shift); |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 526 | else |
Jacob Keller | b4f47a4 | 2016-04-13 16:08:22 -0700 | [diff] [blame] | 527 | vfre |= BIT(vf_shift); |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 528 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); |
| 529 | |
| 530 | if (err) { |
| 531 | e_err(drv, "VF max_frame %d out of range\n", max_frame); |
| 532 | return err; |
| 533 | } |
Greg Rose | e9f9807 | 2011-01-26 01:06:07 +0000 | [diff] [blame] | 534 | } |
| 535 | |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 536 | /* MTU < 68 is an error and causes problems on some kernels */ |
| 537 | if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { |
| 538 | e_err(drv, "VF max_frame %d out of range\n", max_frame); |
| 539 | return -EINVAL; |
| 540 | } |
| 541 | |
| 542 | /* pull current max frame size from hardware */ |
| 543 | max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); |
| 544 | max_frs &= IXGBE_MHADD_MFS_MASK; |
| 545 | max_frs >>= IXGBE_MHADD_MFS_SHIFT; |
| 546 | |
| 547 | if (max_frs < max_frame) { |
| 548 | max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; |
Greg Rose | e9f9807 | 2011-01-26 01:06:07 +0000 | [diff] [blame] | 549 | IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); |
| 550 | } |
| 551 | |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 552 | e_info(hw, "VF requests change max MTU to %d\n", max_frame); |
| 553 | |
| 554 | return 0; |
Greg Rose | e9f9807 | 2011-01-26 01:06:07 +0000 | [diff] [blame] | 555 | } |
| 556 | |
Emil Tantilov | 5d5b7c3 | 2010-10-12 22:20:59 +0000 | [diff] [blame] | 557 | static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 558 | { |
| 559 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
Jacob Keller | b335e75 | 2014-03-25 07:45:27 +0000 | [diff] [blame] | 560 | vmolr |= IXGBE_VMOLR_BAM; |
Greg Rose | f041277 | 2010-05-04 22:11:46 +0000 | [diff] [blame] | 561 | if (aupe) |
| 562 | vmolr |= IXGBE_VMOLR_AUPE; |
| 563 | else |
| 564 | vmolr &= ~IXGBE_VMOLR_AUPE; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 565 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); |
| 566 | } |
| 567 | |
Alexander Duyck | 107d301 | 2012-10-02 00:17:03 +0000 | [diff] [blame] | 568 | static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 569 | { |
| 570 | struct ixgbe_hw *hw = &adapter->hw; |
| 571 | |
Alexander Duyck | 107d301 | 2012-10-02 00:17:03 +0000 | [diff] [blame] | 572 | IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 573 | } |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 574 | |
| 575 | static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) |
| 576 | { |
| 577 | struct ixgbe_hw *hw = &adapter->hw; |
Alexander Duyck | 18be4fc | 2016-01-06 22:48:44 -0800 | [diff] [blame] | 578 | u32 vlvfb_mask, pool_mask, i; |
| 579 | |
| 580 | /* create mask for VF and other pools */ |
Jacob Keller | b4f47a4 | 2016-04-13 16:08:22 -0700 | [diff] [blame] | 581 | pool_mask = ~BIT(VMDQ_P(0) % 32); |
| 582 | vlvfb_mask = BIT(vf % 32); |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 583 | |
| 584 | /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ |
| 585 | for (i = IXGBE_VLVF_ENTRIES; i--;) { |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 586 | u32 bits[2], vlvfb, vid, vfta, vlvf; |
Alexander Duyck | ab3a3b7 | 2015-12-23 09:00:35 -0800 | [diff] [blame] | 587 | u32 word = i * 2 + vf / 32; |
Alexander Duyck | 18be4fc | 2016-01-06 22:48:44 -0800 | [diff] [blame] | 588 | u32 mask; |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 589 | |
Alexander Duyck | ab3a3b7 | 2015-12-23 09:00:35 -0800 | [diff] [blame] | 590 | vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 591 | |
| 592 | /* if our bit isn't set we can skip it */ |
Alexander Duyck | 18be4fc | 2016-01-06 22:48:44 -0800 | [diff] [blame] | 593 | if (!(vlvfb & vlvfb_mask)) |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 594 | continue; |
| 595 | |
| 596 | /* clear our bit from vlvfb */ |
Alexander Duyck | 18be4fc | 2016-01-06 22:48:44 -0800 | [diff] [blame] | 597 | vlvfb ^= vlvfb_mask; |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 598 | |
| 599 | /* create 64b mask to chedk to see if we should clear VLVF */ |
| 600 | bits[word % 2] = vlvfb; |
Alexander Duyck | ab3a3b7 | 2015-12-23 09:00:35 -0800 | [diff] [blame] | 601 | bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 602 | |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 603 | /* if other pools are present, just remove ourselves */ |
Alexander Duyck | 18be4fc | 2016-01-06 22:48:44 -0800 | [diff] [blame] | 604 | if (bits[(VMDQ_P(0) / 32) ^ 1] || |
| 605 | (bits[VMDQ_P(0) / 32] & pool_mask)) |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 606 | goto update_vlvfb; |
| 607 | |
Alexander Duyck | 18be4fc | 2016-01-06 22:48:44 -0800 | [diff] [blame] | 608 | /* if PF is present, leave VFTA */ |
| 609 | if (bits[0] || bits[1]) |
| 610 | goto update_vlvf; |
| 611 | |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 612 | /* if we cannot determine VLAN just remove ourselves */ |
| 613 | vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); |
| 614 | if (!vlvf) |
| 615 | goto update_vlvfb; |
| 616 | |
| 617 | vid = vlvf & VLAN_VID_MASK; |
Jacob Keller | b4f47a4 | 2016-04-13 16:08:22 -0700 | [diff] [blame] | 618 | mask = BIT(vid % 32); |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 619 | |
| 620 | /* clear bit from VFTA */ |
| 621 | vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); |
| 622 | if (vfta & mask) |
| 623 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); |
| 624 | update_vlvf: |
| 625 | /* clear POOL selection enable */ |
| 626 | IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); |
Alexander Duyck | 18be4fc | 2016-01-06 22:48:44 -0800 | [diff] [blame] | 627 | |
| 628 | if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) |
| 629 | vlvfb = 0; |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 630 | update_vlvfb: |
| 631 | /* clear pool bits */ |
| 632 | IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); |
| 633 | } |
| 634 | } |
| 635 | |
Greg Rose | a1cbb15c | 2011-05-13 01:33:48 +0000 | [diff] [blame] | 636 | static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, |
| 637 | int vf, int index, unsigned char *mac_addr) |
| 638 | { |
Greg Rose | a1cbb15c | 2011-05-13 01:33:48 +0000 | [diff] [blame] | 639 | struct vf_macvlans *entry; |
Tony Nguyen | 0e1ff306 | 2017-07-19 15:00:26 -0700 | [diff] [blame] | 640 | struct list_head *pos; |
| 641 | int retval = 0; |
Greg Rose | a1cbb15c | 2011-05-13 01:33:48 +0000 | [diff] [blame] | 642 | |
| 643 | if (index <= 1) { |
| 644 | list_for_each(pos, &adapter->vf_mvs.l) { |
| 645 | entry = list_entry(pos, struct vf_macvlans, l); |
| 646 | if (entry->vf == vf) { |
| 647 | entry->vf = -1; |
| 648 | entry->free = true; |
| 649 | entry->is_macvlan = false; |
Jacob Keller | 5d7daa3 | 2014-03-29 06:51:25 +0000 | [diff] [blame] | 650 | ixgbe_del_mac_filter(adapter, |
| 651 | entry->vf_macvlan, vf); |
Greg Rose | a1cbb15c | 2011-05-13 01:33:48 +0000 | [diff] [blame] | 652 | } |
| 653 | } |
| 654 | } |
| 655 | |
| 656 | /* |
| 657 | * If index was zero then we were asked to clear the uc list |
| 658 | * for the VF. We're done. |
| 659 | */ |
| 660 | if (!index) |
| 661 | return 0; |
| 662 | |
| 663 | entry = NULL; |
| 664 | |
| 665 | list_for_each(pos, &adapter->vf_mvs.l) { |
| 666 | entry = list_entry(pos, struct vf_macvlans, l); |
| 667 | if (entry->free) |
| 668 | break; |
| 669 | } |
| 670 | |
| 671 | /* |
| 672 | * If we traversed the entire list and didn't find a free entry |
| 673 | * then we're out of space on the RAR table. Also entry may |
| 674 | * be NULL because the original memory allocation for the list |
| 675 | * failed, which is not fatal but does mean we can't support |
| 676 | * VF requests for MACVLAN because we couldn't allocate |
| 677 | * memory for the list management required. |
| 678 | */ |
| 679 | if (!entry || !entry->free) |
| 680 | return -ENOSPC; |
| 681 | |
Tony Nguyen | 0e1ff306 | 2017-07-19 15:00:26 -0700 | [diff] [blame] | 682 | retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); |
| 683 | if (retval < 0) |
| 684 | return retval; |
| 685 | |
Greg Rose | a1cbb15c | 2011-05-13 01:33:48 +0000 | [diff] [blame] | 686 | entry->free = false; |
| 687 | entry->is_macvlan = true; |
| 688 | entry->vf = vf; |
| 689 | memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); |
| 690 | |
Greg Rose | a1cbb15c | 2011-05-13 01:33:48 +0000 | [diff] [blame] | 691 | return 0; |
| 692 | } |
| 693 | |
Emil Tantilov | e251ecf | 2017-03-28 11:27:54 -0700 | [diff] [blame] | 694 | static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) |
| 695 | { |
| 696 | struct ixgbe_hw *hw = &adapter->hw; |
Sebastian Basierski | 939b701 | 2018-07-31 18:16:00 +0200 | [diff] [blame] | 697 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
Emil Tantilov | e251ecf | 2017-03-28 11:27:54 -0700 | [diff] [blame] | 698 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; |
Sebastian Basierski | 939b701 | 2018-07-31 18:16:00 +0200 | [diff] [blame] | 699 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
Alexander Duyck | 0efbf12 | 2017-11-22 10:57:11 -0800 | [diff] [blame] | 700 | u8 num_tcs = adapter->hw_tcs; |
Sebastian Basierski | 939b701 | 2018-07-31 18:16:00 +0200 | [diff] [blame] | 701 | u32 reg_val; |
| 702 | u32 queue; |
Emil Tantilov | e251ecf | 2017-03-28 11:27:54 -0700 | [diff] [blame] | 703 | |
| 704 | /* remove VLAN filters beloning to this VF */ |
| 705 | ixgbe_clear_vf_vlans(adapter, vf); |
| 706 | |
| 707 | /* add back PF assigned VLAN or VLAN 0 */ |
| 708 | ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); |
| 709 | |
| 710 | /* reset offloads to defaults */ |
| 711 | ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); |
| 712 | |
| 713 | /* set outgoing tags for VFs */ |
| 714 | if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { |
| 715 | ixgbe_clear_vmvir(adapter, vf); |
| 716 | } else { |
| 717 | if (vfinfo->pf_qos || !num_tcs) |
| 718 | ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, |
| 719 | vfinfo->pf_qos, vf); |
| 720 | else |
| 721 | ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, |
| 722 | adapter->default_up, vf); |
| 723 | |
Radoslaw Tyl | 6702185 | 2018-10-22 08:44:31 +0200 | [diff] [blame] | 724 | if (vfinfo->spoofchk_enabled) { |
Emil Tantilov | e251ecf | 2017-03-28 11:27:54 -0700 | [diff] [blame] | 725 | hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); |
Radoslaw Tyl | 6702185 | 2018-10-22 08:44:31 +0200 | [diff] [blame] | 726 | hw->mac.ops.set_mac_anti_spoofing(hw, true, vf); |
| 727 | } |
Emil Tantilov | e251ecf | 2017-03-28 11:27:54 -0700 | [diff] [blame] | 728 | } |
| 729 | |
| 730 | /* reset multicast table array for vf */ |
| 731 | adapter->vfinfo[vf].num_vf_mc_hashes = 0; |
| 732 | |
Shannon Nelson | 7269824 | 2018-08-13 11:43:42 -0700 | [diff] [blame] | 733 | /* clear any ipsec table info */ |
| 734 | ixgbe_ipsec_vf_clear(adapter, vf); |
| 735 | |
Emil Tantilov | e251ecf | 2017-03-28 11:27:54 -0700 | [diff] [blame] | 736 | /* Flush and reset the mta with the new values */ |
| 737 | ixgbe_set_rx_mode(adapter->netdev); |
| 738 | |
| 739 | ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); |
| 740 | ixgbe_set_vf_macvlan(adapter, vf, 0, NULL); |
| 741 | |
| 742 | /* reset VF api back to unknown */ |
| 743 | adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; |
Sebastian Basierski | 939b701 | 2018-07-31 18:16:00 +0200 | [diff] [blame] | 744 | |
| 745 | /* Restart each queue for given VF */ |
| 746 | for (queue = 0; queue < q_per_pool; queue++) { |
| 747 | unsigned int reg_idx = (vf * q_per_pool) + queue; |
| 748 | |
| 749 | reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); |
| 750 | |
| 751 | /* Re-enabling only configured queues */ |
| 752 | if (reg_val) { |
| 753 | reg_val |= IXGBE_TXDCTL_ENABLE; |
| 754 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); |
| 755 | reg_val &= ~IXGBE_TXDCTL_ENABLE; |
| 756 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); |
| 757 | } |
| 758 | } |
| 759 | |
Ross Lagerwall | 96d1a73 | 2018-12-05 13:54:26 +0000 | [diff] [blame] | 760 | IXGBE_WRITE_FLUSH(hw); |
| 761 | } |
| 762 | |
| 763 | static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf) |
| 764 | { |
| 765 | struct ixgbe_hw *hw = &adapter->hw; |
| 766 | u32 word; |
| 767 | |
Sebastian Basierski | 939b701 | 2018-07-31 18:16:00 +0200 | [diff] [blame] | 768 | /* Clear VF's mailbox memory */ |
| 769 | for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) |
| 770 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); |
| 771 | |
| 772 | IXGBE_WRITE_FLUSH(hw); |
Emil Tantilov | e251ecf | 2017-03-28 11:27:54 -0700 | [diff] [blame] | 773 | } |
| 774 | |
| 775 | static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, |
| 776 | int vf, unsigned char *mac_addr) |
| 777 | { |
Tony Nguyen | 6af3d0f | 2017-04-28 12:42:03 -0700 | [diff] [blame] | 778 | s32 retval; |
Emil Tantilov | e251ecf | 2017-03-28 11:27:54 -0700 | [diff] [blame] | 779 | |
Tony Nguyen | 6af3d0f | 2017-04-28 12:42:03 -0700 | [diff] [blame] | 780 | ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); |
| 781 | retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); |
| 782 | if (retval >= 0) |
| 783 | memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, |
| 784 | ETH_ALEN); |
| 785 | else |
| 786 | memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); |
| 787 | |
| 788 | return retval; |
Emil Tantilov | e251ecf | 2017-03-28 11:27:54 -0700 | [diff] [blame] | 789 | } |
| 790 | |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 791 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) |
| 792 | { |
Alexander Duyck | c60fbb0 | 2010-11-16 19:26:54 -0800 | [diff] [blame] | 793 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 794 | unsigned int vfn = (event_mask & 0x3f); |
| 795 | |
| 796 | bool enable = ((event_mask & 0x10000000U) != 0); |
| 797 | |
Joe Perches | d458cdf | 2013-10-01 19:04:40 -0700 | [diff] [blame] | 798 | if (enable) |
| 799 | eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 800 | |
| 801 | return 0; |
| 802 | } |
| 803 | |
Don Skidmore | 8d697e7 | 2014-11-05 04:52:09 +0000 | [diff] [blame] | 804 | static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, |
| 805 | u32 qde) |
| 806 | { |
| 807 | struct ixgbe_hw *hw = &adapter->hw; |
| 808 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
| 809 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
| 810 | int i; |
| 811 | |
| 812 | for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { |
| 813 | u32 reg; |
| 814 | |
| 815 | /* flush previous write */ |
| 816 | IXGBE_WRITE_FLUSH(hw); |
| 817 | |
| 818 | /* indicate to hardware that we want to set drop enable */ |
Emil Tantilov | d28b194 | 2017-05-23 14:02:23 -0700 | [diff] [blame] | 819 | reg = IXGBE_QDE_WRITE | qde; |
Don Skidmore | 8d697e7 | 2014-11-05 04:52:09 +0000 | [diff] [blame] | 820 | reg |= i << IXGBE_QDE_IDX_SHIFT; |
| 821 | IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); |
| 822 | } |
| 823 | } |
| 824 | |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 825 | static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 826 | { |
Alexander Duyck | 8739737 | 2014-01-15 17:38:40 -0800 | [diff] [blame] | 827 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 828 | struct ixgbe_hw *hw = &adapter->hw; |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 829 | unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; |
Emil Tantilov | b08e1ed | 2013-07-26 07:34:54 +0000 | [diff] [blame] | 830 | u32 reg, reg_offset, vf_shift; |
| 831 | u32 msgbuf[4] = {0, 0, 0, 0}; |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 832 | u8 *addr = (u8 *)(&msgbuf[1]); |
Alexander Duyck | 8739737 | 2014-01-15 17:38:40 -0800 | [diff] [blame] | 833 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
| 834 | int i; |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 835 | |
| 836 | e_info(probe, "VF Reset msg received from vf %d\n", vf); |
| 837 | |
| 838 | /* reset the filters for the device */ |
| 839 | ixgbe_vf_reset_event(adapter, vf); |
| 840 | |
Ross Lagerwall | 96d1a73 | 2018-12-05 13:54:26 +0000 | [diff] [blame] | 841 | ixgbe_vf_clear_mbx(adapter, vf); |
| 842 | |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 843 | /* set vf mac address */ |
Greg Rose | 3505592 | 2013-02-15 05:20:09 +0000 | [diff] [blame] | 844 | if (!is_zero_ether_addr(vf_mac)) |
| 845 | ixgbe_set_vf_mac(adapter, vf, vf_mac); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 846 | |
| 847 | vf_shift = vf % 32; |
| 848 | reg_offset = vf / 32; |
| 849 | |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 850 | /* enable transmit for vf */ |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 851 | reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); |
Jacob Keller | b4f47a4 | 2016-04-13 16:08:22 -0700 | [diff] [blame] | 852 | reg |= BIT(vf_shift); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 853 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); |
| 854 | |
Alexander Duyck | 8739737 | 2014-01-15 17:38:40 -0800 | [diff] [blame] | 855 | /* force drop enable for all VF Rx queues */ |
Paul Greenwalt | b03254d | 2018-03-08 07:26:08 -0500 | [diff] [blame] | 856 | reg = IXGBE_QDE_ENABLE; |
| 857 | if (adapter->vfinfo[vf].pf_vlan) |
| 858 | reg |= IXGBE_QDE_HIDE_VLAN; |
| 859 | |
| 860 | ixgbe_write_qde(adapter, vf, reg); |
Alexander Duyck | 8739737 | 2014-01-15 17:38:40 -0800 | [diff] [blame] | 861 | |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 862 | /* enable receive for vf */ |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 863 | reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); |
Jacob Keller | b4f47a4 | 2016-04-13 16:08:22 -0700 | [diff] [blame] | 864 | reg |= BIT(vf_shift); |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 865 | /* |
| 866 | * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. |
| 867 | * For more info take a look at ixgbe_set_vf_lpe |
| 868 | */ |
| 869 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
| 870 | struct net_device *dev = adapter->netdev; |
| 871 | int pf_max_frame = dev->mtu + ETH_HLEN; |
| 872 | |
| 873 | #ifdef CONFIG_FCOE |
| 874 | if (dev->features & NETIF_F_FCOE_MTU) |
| 875 | pf_max_frame = max_t(int, pf_max_frame, |
| 876 | IXGBE_FCOE_JUMBO_FRAME_SIZE); |
| 877 | |
| 878 | #endif /* CONFIG_FCOE */ |
| 879 | if (pf_max_frame > ETH_FRAME_LEN) |
Jacob Keller | b4f47a4 | 2016-04-13 16:08:22 -0700 | [diff] [blame] | 880 | reg &= ~BIT(vf_shift); |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 881 | } |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 882 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); |
| 883 | |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 884 | /* enable VF mailbox for further messages */ |
| 885 | adapter->vfinfo[vf].clear_to_send = true; |
| 886 | |
Greg Rose | a985b6c3 | 2010-11-18 03:02:52 +0000 | [diff] [blame] | 887 | /* Enable counting of spoofed packets in the SSVPC register */ |
| 888 | reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); |
Jacob Keller | b4f47a4 | 2016-04-13 16:08:22 -0700 | [diff] [blame] | 889 | reg |= BIT(vf_shift); |
Greg Rose | a985b6c3 | 2010-11-18 03:02:52 +0000 | [diff] [blame] | 890 | IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); |
| 891 | |
Alexander Duyck | dbf231a | 2014-01-15 17:38:41 -0800 | [diff] [blame] | 892 | /* |
| 893 | * Reset the VFs TDWBAL and TDWBAH registers |
| 894 | * which are not cleared by an FLR |
| 895 | */ |
| 896 | for (i = 0; i < q_per_pool; i++) { |
| 897 | IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); |
| 898 | IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); |
| 899 | } |
| 900 | |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 901 | /* reply to reset with ack and vf mac address */ |
Greg Rose | 3505592 | 2013-02-15 05:20:09 +0000 | [diff] [blame] | 902 | msgbuf[0] = IXGBE_VF_RESET; |
Emil Tantilov | a8d9bb3 | 2018-05-14 11:16:11 -0700 | [diff] [blame] | 903 | if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) { |
Greg Rose | 3505592 | 2013-02-15 05:20:09 +0000 | [diff] [blame] | 904 | msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; |
| 905 | memcpy(addr, vf_mac, ETH_ALEN); |
| 906 | } else { |
| 907 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; |
Greg Rose | 3505592 | 2013-02-15 05:20:09 +0000 | [diff] [blame] | 908 | } |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 909 | |
| 910 | /* |
| 911 | * Piggyback the multicast filter type so VF can compute the |
| 912 | * correct vectors |
| 913 | */ |
| 914 | msgbuf[3] = hw->mac.mc_filter_type; |
| 915 | ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); |
| 916 | |
| 917 | return 0; |
| 918 | } |
| 919 | |
| 920 | static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, |
| 921 | u32 *msgbuf, u32 vf) |
| 922 | { |
| 923 | u8 *new_mac = ((u8 *)(&msgbuf[1])); |
| 924 | |
| 925 | if (!is_valid_ether_addr(new_mac)) { |
| 926 | e_warn(drv, "VF %d attempted to set invalid mac\n", vf); |
| 927 | return -1; |
| 928 | } |
| 929 | |
chas williams | 1d96cf9 | 2016-01-05 17:30:39 -0500 | [diff] [blame] | 930 | if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && |
dingtianhong | 4012dda | 2013-12-30 15:40:50 +0800 | [diff] [blame] | 931 | !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 932 | e_warn(drv, |
| 933 | "VF %d attempted to override administratively set MAC address\n" |
| 934 | "Reload the VF driver to resume operations\n", |
| 935 | vf); |
| 936 | return -1; |
| 937 | } |
| 938 | |
Greg Rose | 3970c32 | 2012-09-25 02:25:30 +0000 | [diff] [blame] | 939 | return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 940 | } |
| 941 | |
| 942 | static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, |
| 943 | u32 *msgbuf, u32 vf) |
| 944 | { |
Alexander Duyck | e1d0a2a | 2015-11-02 17:10:19 -0800 | [diff] [blame] | 945 | u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; |
| 946 | u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); |
Alexander Duyck | 0efbf12 | 2017-11-22 10:57:11 -0800 | [diff] [blame] | 947 | u8 tcs = adapter->hw_tcs; |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 948 | |
Alexander Duyck | 107d301 | 2012-10-02 00:17:03 +0000 | [diff] [blame] | 949 | if (adapter->vfinfo[vf].pf_vlan || tcs) { |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 950 | e_warn(drv, |
| 951 | "VF %d attempted to override administratively set VLAN configuration\n" |
| 952 | "Reload the VF driver to resume operations\n", |
| 953 | vf); |
| 954 | return -1; |
| 955 | } |
| 956 | |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 957 | /* VLAN 0 is a special case, don't allow it to be removed */ |
| 958 | if (!vid && !add) |
| 959 | return 0; |
| 960 | |
Emil Tantilov | d3dec7c | 2016-03-18 16:11:19 -0700 | [diff] [blame] | 961 | return ixgbe_set_vf_vlan(adapter, add, vid, vf); |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 962 | } |
| 963 | |
| 964 | static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, |
| 965 | u32 *msgbuf, u32 vf) |
| 966 | { |
| 967 | u8 *new_mac = ((u8 *)(&msgbuf[1])); |
| 968 | int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> |
| 969 | IXGBE_VT_MSGINFO_SHIFT; |
| 970 | int err; |
| 971 | |
Ken Cox | a9d2d53 | 2016-11-15 13:00:37 -0600 | [diff] [blame] | 972 | if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && |
| 973 | index > 0) { |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 974 | e_warn(drv, |
| 975 | "VF %d requested MACVLAN filter but is administratively denied\n", |
| 976 | vf); |
| 977 | return -1; |
| 978 | } |
| 979 | |
| 980 | /* An non-zero index indicates the VF is setting a filter */ |
| 981 | if (index) { |
| 982 | if (!is_valid_ether_addr(new_mac)) { |
| 983 | e_warn(drv, "VF %d attempted to set invalid mac\n", vf); |
| 984 | return -1; |
| 985 | } |
| 986 | |
| 987 | /* |
| 988 | * If the VF is allowed to set MAC filters then turn off |
| 989 | * anti-spoofing to avoid false positives. |
| 990 | */ |
Emil Tantilov | 77f192a | 2016-03-18 16:11:14 -0700 | [diff] [blame] | 991 | if (adapter->vfinfo[vf].spoofchk_enabled) { |
| 992 | struct ixgbe_hw *hw = &adapter->hw; |
| 993 | |
| 994 | hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); |
Emil Tantilov | 581e0c7 | 2016-06-01 18:59:44 -0700 | [diff] [blame] | 995 | hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); |
Emil Tantilov | 77f192a | 2016-03-18 16:11:14 -0700 | [diff] [blame] | 996 | } |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 997 | } |
| 998 | |
| 999 | err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); |
| 1000 | if (err == -ENOSPC) |
| 1001 | e_warn(drv, |
| 1002 | "VF %d has requested a MACVLAN filter but there is no space for it\n", |
| 1003 | vf); |
Greg Rose | a301340 | 2012-10-30 00:40:02 +0000 | [diff] [blame] | 1004 | |
| 1005 | return err < 0; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1006 | } |
| 1007 | |
Alexander Duyck | 374c65d | 2012-07-20 08:09:22 +0000 | [diff] [blame] | 1008 | static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, |
| 1009 | u32 *msgbuf, u32 vf) |
| 1010 | { |
| 1011 | int api = msgbuf[1]; |
| 1012 | |
| 1013 | switch (api) { |
| 1014 | case ixgbe_mbox_api_10: |
Alexander Duyck | bffb3bc | 2012-07-20 08:09:37 +0000 | [diff] [blame] | 1015 | case ixgbe_mbox_api_11: |
Vlad Zolotarov | 4ce37a4 | 2015-04-01 11:24:54 +0300 | [diff] [blame] | 1016 | case ixgbe_mbox_api_12: |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1017 | case ixgbe_mbox_api_13: |
Shannon Nelson | 7269824 | 2018-08-13 11:43:42 -0700 | [diff] [blame] | 1018 | case ixgbe_mbox_api_14: |
Alexander Duyck | 374c65d | 2012-07-20 08:09:22 +0000 | [diff] [blame] | 1019 | adapter->vfinfo[vf].vf_api = api; |
| 1020 | return 0; |
| 1021 | default: |
| 1022 | break; |
| 1023 | } |
| 1024 | |
| 1025 | e_info(drv, "VF %d requested invalid api version %u\n", vf, api); |
| 1026 | |
| 1027 | return -1; |
| 1028 | } |
| 1029 | |
Alexander Duyck | f591cd9 | 2012-07-20 08:09:32 +0000 | [diff] [blame] | 1030 | static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, |
| 1031 | u32 *msgbuf, u32 vf) |
| 1032 | { |
| 1033 | struct net_device *dev = adapter->netdev; |
| 1034 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
| 1035 | unsigned int default_tc = 0; |
Alexander Duyck | 0efbf12 | 2017-11-22 10:57:11 -0800 | [diff] [blame] | 1036 | u8 num_tcs = adapter->hw_tcs; |
Alexander Duyck | f591cd9 | 2012-07-20 08:09:32 +0000 | [diff] [blame] | 1037 | |
| 1038 | /* verify the PF is supporting the correct APIs */ |
| 1039 | switch (adapter->vfinfo[vf].vf_api) { |
| 1040 | case ixgbe_mbox_api_20: |
| 1041 | case ixgbe_mbox_api_11: |
Vlad Zolotarov | 4ce37a4 | 2015-04-01 11:24:54 +0300 | [diff] [blame] | 1042 | case ixgbe_mbox_api_12: |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1043 | case ixgbe_mbox_api_13: |
Shannon Nelson | 7269824 | 2018-08-13 11:43:42 -0700 | [diff] [blame] | 1044 | case ixgbe_mbox_api_14: |
Alexander Duyck | f591cd9 | 2012-07-20 08:09:32 +0000 | [diff] [blame] | 1045 | break; |
| 1046 | default: |
| 1047 | return -1; |
| 1048 | } |
| 1049 | |
| 1050 | /* only allow 1 Tx queue for bandwidth limiting */ |
| 1051 | msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); |
| 1052 | msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); |
| 1053 | |
| 1054 | /* if TCs > 1 determine which TC belongs to default user priority */ |
| 1055 | if (num_tcs > 1) |
| 1056 | default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); |
| 1057 | |
| 1058 | /* notify VF of need for VLAN tag stripping, and correct queue */ |
| 1059 | if (num_tcs) |
| 1060 | msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; |
| 1061 | else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) |
| 1062 | msgbuf[IXGBE_VF_TRANS_VLAN] = 1; |
| 1063 | else |
| 1064 | msgbuf[IXGBE_VF_TRANS_VLAN] = 0; |
| 1065 | |
| 1066 | /* notify VF of default queue */ |
| 1067 | msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; |
| 1068 | |
| 1069 | return 0; |
| 1070 | } |
| 1071 | |
Vlad Zolotarov | 4ce37a4 | 2015-04-01 11:24:54 +0300 | [diff] [blame] | 1072 | static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) |
| 1073 | { |
| 1074 | u32 i, j; |
| 1075 | u32 *out_buf = &msgbuf[1]; |
| 1076 | const u8 *reta = adapter->rss_indir_tbl; |
| 1077 | u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); |
| 1078 | |
| 1079 | /* Check if operation is permitted */ |
| 1080 | if (!adapter->vfinfo[vf].rss_query_enabled) |
| 1081 | return -EPERM; |
| 1082 | |
| 1083 | /* verify the PF is supporting the correct API */ |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1084 | switch (adapter->vfinfo[vf].vf_api) { |
Shannon Nelson | 7269824 | 2018-08-13 11:43:42 -0700 | [diff] [blame] | 1085 | case ixgbe_mbox_api_14: |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1086 | case ixgbe_mbox_api_13: |
| 1087 | case ixgbe_mbox_api_12: |
| 1088 | break; |
| 1089 | default: |
Vlad Zolotarov | 4ce37a4 | 2015-04-01 11:24:54 +0300 | [diff] [blame] | 1090 | return -EOPNOTSUPP; |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1091 | } |
Vlad Zolotarov | 4ce37a4 | 2015-04-01 11:24:54 +0300 | [diff] [blame] | 1092 | |
| 1093 | /* This mailbox command is supported (required) only for 82599 and x540 |
| 1094 | * VFs which support up to 4 RSS queues. Therefore we will compress the |
| 1095 | * RETA by saving only 2 bits from each entry. This way we will be able |
| 1096 | * to transfer the whole RETA in a single mailbox operation. |
| 1097 | */ |
| 1098 | for (i = 0; i < reta_size / 16; i++) { |
| 1099 | out_buf[i] = 0; |
| 1100 | for (j = 0; j < 16; j++) |
| 1101 | out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); |
| 1102 | } |
| 1103 | |
| 1104 | return 0; |
| 1105 | } |
| 1106 | |
Vlad Zolotarov | 3c0841a | 2015-03-30 21:35:27 +0300 | [diff] [blame] | 1107 | static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, |
| 1108 | u32 *msgbuf, u32 vf) |
| 1109 | { |
| 1110 | u32 *rss_key = &msgbuf[1]; |
| 1111 | |
| 1112 | /* Check if the operation is permitted */ |
| 1113 | if (!adapter->vfinfo[vf].rss_query_enabled) |
| 1114 | return -EPERM; |
| 1115 | |
| 1116 | /* verify the PF is supporting the correct API */ |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1117 | switch (adapter->vfinfo[vf].vf_api) { |
Shannon Nelson | 7269824 | 2018-08-13 11:43:42 -0700 | [diff] [blame] | 1118 | case ixgbe_mbox_api_14: |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1119 | case ixgbe_mbox_api_13: |
| 1120 | case ixgbe_mbox_api_12: |
| 1121 | break; |
| 1122 | default: |
Vlad Zolotarov | 3c0841a | 2015-03-30 21:35:27 +0300 | [diff] [blame] | 1123 | return -EOPNOTSUPP; |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1124 | } |
Vlad Zolotarov | 3c0841a | 2015-03-30 21:35:27 +0300 | [diff] [blame] | 1125 | |
Tony Nguyen | 3dfbfc7 | 2017-04-13 07:26:05 -0700 | [diff] [blame] | 1126 | memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE); |
Vlad Zolotarov | 3c0841a | 2015-03-30 21:35:27 +0300 | [diff] [blame] | 1127 | |
| 1128 | return 0; |
| 1129 | } |
| 1130 | |
Hiroshi Shimamoto | 8443c1a4 | 2015-08-28 06:59:03 +0000 | [diff] [blame] | 1131 | static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, |
| 1132 | u32 *msgbuf, u32 vf) |
| 1133 | { |
| 1134 | struct ixgbe_hw *hw = &adapter->hw; |
| 1135 | int xcast_mode = msgbuf[1]; |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1136 | u32 vmolr, fctrl, disable, enable; |
Hiroshi Shimamoto | 8443c1a4 | 2015-08-28 06:59:03 +0000 | [diff] [blame] | 1137 | |
| 1138 | /* verify the PF is supporting the correct APIs */ |
| 1139 | switch (adapter->vfinfo[vf].vf_api) { |
| 1140 | case ixgbe_mbox_api_12: |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1141 | /* promisc introduced in 1.3 version */ |
| 1142 | if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) |
| 1143 | return -EOPNOTSUPP; |
Shannon Nelson | 7269824 | 2018-08-13 11:43:42 -0700 | [diff] [blame] | 1144 | /* Fall through */ |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1145 | case ixgbe_mbox_api_13: |
Shannon Nelson | 7269824 | 2018-08-13 11:43:42 -0700 | [diff] [blame] | 1146 | case ixgbe_mbox_api_14: |
Hiroshi Shimamoto | 8443c1a4 | 2015-08-28 06:59:03 +0000 | [diff] [blame] | 1147 | break; |
| 1148 | default: |
| 1149 | return -EOPNOTSUPP; |
| 1150 | } |
| 1151 | |
| 1152 | if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && |
| 1153 | !adapter->vfinfo[vf].trusted) { |
| 1154 | xcast_mode = IXGBEVF_XCAST_MODE_MULTI; |
| 1155 | } |
| 1156 | |
| 1157 | if (adapter->vfinfo[vf].xcast_mode == xcast_mode) |
| 1158 | goto out; |
| 1159 | |
| 1160 | switch (xcast_mode) { |
| 1161 | case IXGBEVF_XCAST_MODE_NONE: |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1162 | disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | |
| 1163 | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; |
Hiroshi Shimamoto | 8443c1a4 | 2015-08-28 06:59:03 +0000 | [diff] [blame] | 1164 | enable = 0; |
| 1165 | break; |
| 1166 | case IXGBEVF_XCAST_MODE_MULTI: |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1167 | disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; |
Hiroshi Shimamoto | 8443c1a4 | 2015-08-28 06:59:03 +0000 | [diff] [blame] | 1168 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; |
| 1169 | break; |
| 1170 | case IXGBEVF_XCAST_MODE_ALLMULTI: |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1171 | disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; |
Hiroshi Shimamoto | 8443c1a4 | 2015-08-28 06:59:03 +0000 | [diff] [blame] | 1172 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; |
| 1173 | break; |
Don Skidmore | 07eea57 | 2016-12-15 21:18:32 -0500 | [diff] [blame] | 1174 | case IXGBEVF_XCAST_MODE_PROMISC: |
| 1175 | if (hw->mac.type <= ixgbe_mac_82599EB) |
| 1176 | return -EOPNOTSUPP; |
| 1177 | |
| 1178 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
| 1179 | if (!(fctrl & IXGBE_FCTRL_UPE)) { |
| 1180 | /* VF promisc requires PF in promisc */ |
| 1181 | e_warn(drv, |
| 1182 | "Enabling VF promisc requires PF in promisc\n"); |
| 1183 | return -EPERM; |
| 1184 | } |
| 1185 | |
| 1186 | disable = 0; |
| 1187 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | |
| 1188 | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; |
| 1189 | break; |
Hiroshi Shimamoto | 8443c1a4 | 2015-08-28 06:59:03 +0000 | [diff] [blame] | 1190 | default: |
| 1191 | return -EOPNOTSUPP; |
| 1192 | } |
| 1193 | |
| 1194 | vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
| 1195 | vmolr &= ~disable; |
| 1196 | vmolr |= enable; |
| 1197 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); |
| 1198 | |
| 1199 | adapter->vfinfo[vf].xcast_mode = xcast_mode; |
| 1200 | |
| 1201 | out: |
| 1202 | msgbuf[1] = xcast_mode; |
| 1203 | |
| 1204 | return 0; |
| 1205 | } |
| 1206 | |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1207 | static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) |
| 1208 | { |
| 1209 | u32 mbx_size = IXGBE_VFMAILBOX_SIZE; |
Emil Tantilov | c050999 | 2011-05-07 06:49:18 +0000 | [diff] [blame] | 1210 | u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1211 | struct ixgbe_hw *hw = &adapter->hw; |
| 1212 | s32 retval; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1213 | |
| 1214 | retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); |
| 1215 | |
Alexander Duyck | dcaccc8 | 2012-03-28 08:03:38 +0000 | [diff] [blame] | 1216 | if (retval) { |
Emil Tantilov | 849c454 | 2010-06-03 16:53:41 +0000 | [diff] [blame] | 1217 | pr_err("Error receiving message from VF\n"); |
Alexander Duyck | dcaccc8 | 2012-03-28 08:03:38 +0000 | [diff] [blame] | 1218 | return retval; |
| 1219 | } |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1220 | |
| 1221 | /* this is a message we already processed, do nothing */ |
| 1222 | if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) |
Mark Rustad | e90dd26 | 2014-07-22 06:51:08 +0000 | [diff] [blame] | 1223 | return 0; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1224 | |
Alexander Duyck | dcaccc8 | 2012-03-28 08:03:38 +0000 | [diff] [blame] | 1225 | /* flush the ack before we write any messages back */ |
| 1226 | IXGBE_WRITE_FLUSH(hw); |
| 1227 | |
Alexander Duyck | 374c65d | 2012-07-20 08:09:22 +0000 | [diff] [blame] | 1228 | if (msgbuf[0] == IXGBE_VF_RESET) |
| 1229 | return ixgbe_vf_reset_msg(adapter, vf); |
| 1230 | |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1231 | /* |
| 1232 | * until the vf completes a virtual function reset it should not be |
| 1233 | * allowed to start any configuration. |
| 1234 | */ |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1235 | if (!adapter->vfinfo[vf].clear_to_send) { |
| 1236 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; |
| 1237 | ixgbe_write_mbx(hw, msgbuf, 1, vf); |
Mark Rustad | e90dd26 | 2014-07-22 06:51:08 +0000 | [diff] [blame] | 1238 | return 0; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1239 | } |
| 1240 | |
| 1241 | switch ((msgbuf[0] & 0xFFFF)) { |
| 1242 | case IXGBE_VF_SET_MAC_ADDR: |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 1243 | retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1244 | break; |
| 1245 | case IXGBE_VF_SET_MULTICAST: |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 1246 | retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); |
| 1247 | break; |
| 1248 | case IXGBE_VF_SET_VLAN: |
| 1249 | retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1250 | break; |
| 1251 | case IXGBE_VF_SET_LPE: |
Alexander Duyck | 872844d | 2012-08-15 02:10:43 +0000 | [diff] [blame] | 1252 | retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1253 | break; |
Greg Rose | a1cbb15c | 2011-05-13 01:33:48 +0000 | [diff] [blame] | 1254 | case IXGBE_VF_SET_MACVLAN: |
Alexander Duyck | 58a02be | 2012-07-20 08:09:17 +0000 | [diff] [blame] | 1255 | retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); |
Greg Rose | a1cbb15c | 2011-05-13 01:33:48 +0000 | [diff] [blame] | 1256 | break; |
Alexander Duyck | 374c65d | 2012-07-20 08:09:22 +0000 | [diff] [blame] | 1257 | case IXGBE_VF_API_NEGOTIATE: |
| 1258 | retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); |
| 1259 | break; |
Alexander Duyck | f591cd9 | 2012-07-20 08:09:32 +0000 | [diff] [blame] | 1260 | case IXGBE_VF_GET_QUEUES: |
| 1261 | retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); |
| 1262 | break; |
Vlad Zolotarov | 4ce37a4 | 2015-04-01 11:24:54 +0300 | [diff] [blame] | 1263 | case IXGBE_VF_GET_RETA: |
| 1264 | retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); |
| 1265 | break; |
Vlad Zolotarov | 3c0841a | 2015-03-30 21:35:27 +0300 | [diff] [blame] | 1266 | case IXGBE_VF_GET_RSS_KEY: |
| 1267 | retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); |
| 1268 | break; |
Hiroshi Shimamoto | 8443c1a4 | 2015-08-28 06:59:03 +0000 | [diff] [blame] | 1269 | case IXGBE_VF_UPDATE_XCAST_MODE: |
| 1270 | retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); |
| 1271 | break; |
Shannon Nelson | 7269824 | 2018-08-13 11:43:42 -0700 | [diff] [blame] | 1272 | case IXGBE_VF_IPSEC_ADD: |
| 1273 | retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf); |
| 1274 | break; |
| 1275 | case IXGBE_VF_IPSEC_DEL: |
| 1276 | retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf); |
| 1277 | break; |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1278 | default: |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 1279 | e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1280 | retval = IXGBE_ERR_MBX; |
| 1281 | break; |
| 1282 | } |
| 1283 | |
| 1284 | /* notify the VF of the results of what it sent us */ |
| 1285 | if (retval) |
| 1286 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; |
| 1287 | else |
| 1288 | msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; |
| 1289 | |
| 1290 | msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; |
| 1291 | |
Alexander Duyck | 374c65d | 2012-07-20 08:09:22 +0000 | [diff] [blame] | 1292 | ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); |
Greg Rose | 1736727 | 2010-01-09 02:25:48 +0000 | [diff] [blame] | 1293 | |
| 1294 | return retval; |
| 1295 | } |
| 1296 | |
| 1297 | static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) |
| 1298 | { |
| 1299 | struct ixgbe_hw *hw = &adapter->hw; |
| 1300 | u32 msg = IXGBE_VT_MSGTYPE_NACK; |
| 1301 | |
| 1302 | /* if device isn't clear to send it shouldn't be reading either */ |
| 1303 | if (!adapter->vfinfo[vf].clear_to_send) |
| 1304 | ixgbe_write_mbx(hw, &msg, 1, vf); |
| 1305 | } |
| 1306 | |
| 1307 | void ixgbe_msg_task(struct ixgbe_adapter *adapter) |
| 1308 | { |
| 1309 | struct ixgbe_hw *hw = &adapter->hw; |
| 1310 | u32 vf; |
| 1311 | |
| 1312 | for (vf = 0; vf < adapter->num_vfs; vf++) { |
| 1313 | /* process any reset requests */ |
| 1314 | if (!ixgbe_check_for_rst(hw, vf)) |
| 1315 | ixgbe_vf_reset_event(adapter, vf); |
| 1316 | |
| 1317 | /* process any messages pending */ |
| 1318 | if (!ixgbe_check_for_msg(hw, vf)) |
| 1319 | ixgbe_rcv_msg_from_vf(adapter, vf); |
| 1320 | |
| 1321 | /* process any acks */ |
| 1322 | if (!ixgbe_check_for_ack(hw, vf)) |
| 1323 | ixgbe_rcv_ack_from_vf(adapter, vf); |
| 1324 | } |
| 1325 | } |
| 1326 | |
Greg Rose | 767081a | 2010-01-22 22:46:40 +0000 | [diff] [blame] | 1327 | void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) |
| 1328 | { |
| 1329 | struct ixgbe_hw *hw = &adapter->hw; |
| 1330 | |
| 1331 | /* disable transmit and receive for all vfs */ |
| 1332 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); |
| 1333 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); |
| 1334 | |
| 1335 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); |
| 1336 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); |
| 1337 | } |
| 1338 | |
Hiroshi Shimamoto | 54011e4 | 2015-08-28 06:58:33 +0000 | [diff] [blame] | 1339 | static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) |
| 1340 | { |
| 1341 | struct ixgbe_hw *hw = &adapter->hw; |
| 1342 | u32 ping; |
| 1343 | |
| 1344 | ping = IXGBE_PF_CONTROL_MSG; |
| 1345 | if (adapter->vfinfo[vf].clear_to_send) |
| 1346 | ping |= IXGBE_VT_MSGTYPE_CTS; |
| 1347 | ixgbe_write_mbx(hw, &ping, 1, vf); |
| 1348 | } |
| 1349 | |
Greg Rose | 767081a | 2010-01-22 22:46:40 +0000 | [diff] [blame] | 1350 | void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) |
| 1351 | { |
| 1352 | struct ixgbe_hw *hw = &adapter->hw; |
| 1353 | u32 ping; |
| 1354 | int i; |
| 1355 | |
| 1356 | for (i = 0 ; i < adapter->num_vfs; i++) { |
| 1357 | ping = IXGBE_PF_CONTROL_MSG; |
| 1358 | if (adapter->vfinfo[i].clear_to_send) |
| 1359 | ping |= IXGBE_VT_MSGTYPE_CTS; |
| 1360 | ixgbe_write_mbx(hw, &ping, 1, i); |
| 1361 | } |
| 1362 | } |
| 1363 | |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1364 | int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) |
| 1365 | { |
| 1366 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
Tony Nguyen | 6af3d0f | 2017-04-28 12:42:03 -0700 | [diff] [blame] | 1367 | s32 retval; |
Tony Nguyen | 27bdc44 | 2017-04-12 13:35:22 -0700 | [diff] [blame] | 1368 | |
| 1369 | if (vf >= adapter->num_vfs) |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1370 | return -EINVAL; |
Tony Nguyen | 27bdc44 | 2017-04-12 13:35:22 -0700 | [diff] [blame] | 1371 | |
Tony Nguyen | 6af3d0f | 2017-04-28 12:42:03 -0700 | [diff] [blame] | 1372 | if (is_valid_ether_addr(mac)) { |
Tony Nguyen | 27bdc44 | 2017-04-12 13:35:22 -0700 | [diff] [blame] | 1373 | dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", |
| 1374 | mac, vf); |
| 1375 | dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective."); |
Tony Nguyen | 6af3d0f | 2017-04-28 12:42:03 -0700 | [diff] [blame] | 1376 | |
| 1377 | retval = ixgbe_set_vf_mac(adapter, vf, mac); |
| 1378 | if (retval >= 0) { |
| 1379 | adapter->vfinfo[vf].pf_set_mac = true; |
| 1380 | |
| 1381 | if (test_bit(__IXGBE_DOWN, &adapter->state)) { |
| 1382 | dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n"); |
| 1383 | dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n"); |
| 1384 | } |
| 1385 | } else { |
| 1386 | dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); |
| 1387 | } |
| 1388 | } else if (is_zero_ether_addr(mac)) { |
| 1389 | unsigned char *vf_mac_addr = |
| 1390 | adapter->vfinfo[vf].vf_mac_addresses; |
| 1391 | |
| 1392 | /* nothing to do */ |
| 1393 | if (is_zero_ether_addr(vf_mac_addr)) |
| 1394 | return 0; |
| 1395 | |
| 1396 | dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf); |
| 1397 | |
| 1398 | retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf); |
| 1399 | if (retval >= 0) { |
| 1400 | adapter->vfinfo[vf].pf_set_mac = false; |
| 1401 | memcpy(vf_mac_addr, mac, ETH_ALEN); |
| 1402 | } else { |
| 1403 | dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n"); |
Tony Nguyen | 27bdc44 | 2017-04-12 13:35:22 -0700 | [diff] [blame] | 1404 | } |
| 1405 | } else { |
Tony Nguyen | 6af3d0f | 2017-04-28 12:42:03 -0700 | [diff] [blame] | 1406 | retval = -EINVAL; |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1407 | } |
Tony Nguyen | 27bdc44 | 2017-04-12 13:35:22 -0700 | [diff] [blame] | 1408 | |
Tony Nguyen | 6af3d0f | 2017-04-28 12:42:03 -0700 | [diff] [blame] | 1409 | return retval; |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1410 | } |
| 1411 | |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1412 | static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, |
| 1413 | u16 vlan, u8 qos) |
| 1414 | { |
| 1415 | struct ixgbe_hw *hw = &adapter->hw; |
Emil Tantilov | 42ce2c8 | 2014-12-10 05:28:51 +0000 | [diff] [blame] | 1416 | int err; |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1417 | |
Emil Tantilov | 42ce2c8 | 2014-12-10 05:28:51 +0000 | [diff] [blame] | 1418 | err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1419 | if (err) |
| 1420 | goto out; |
Emil Tantilov | 42ce2c8 | 2014-12-10 05:28:51 +0000 | [diff] [blame] | 1421 | |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 1422 | /* Revoke tagless access via VLAN 0 */ |
| 1423 | ixgbe_set_vf_vlan(adapter, false, 0, vf); |
| 1424 | |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1425 | ixgbe_set_vmvir(adapter, vlan, qos, vf); |
| 1426 | ixgbe_set_vmolr(hw, vf, false); |
Don Skidmore | 9a75a1a | 2014-11-07 03:53:35 +0000 | [diff] [blame] | 1427 | |
| 1428 | /* enable hide vlan on X550 */ |
| 1429 | if (hw->mac.type >= ixgbe_mac_X550) |
| 1430 | ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | |
| 1431 | IXGBE_QDE_HIDE_VLAN); |
| 1432 | |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1433 | adapter->vfinfo[vf].pf_vlan = vlan; |
| 1434 | adapter->vfinfo[vf].pf_qos = qos; |
| 1435 | dev_info(&adapter->pdev->dev, |
| 1436 | "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); |
| 1437 | if (test_bit(__IXGBE_DOWN, &adapter->state)) { |
| 1438 | dev_warn(&adapter->pdev->dev, |
| 1439 | "The VF VLAN has been set, but the PF device is not up.\n"); |
| 1440 | dev_warn(&adapter->pdev->dev, |
| 1441 | "Bring the PF device up before attempting to use the VF device.\n"); |
| 1442 | } |
| 1443 | |
| 1444 | out: |
| 1445 | return err; |
| 1446 | } |
| 1447 | |
| 1448 | static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) |
| 1449 | { |
| 1450 | struct ixgbe_hw *hw = &adapter->hw; |
| 1451 | int err; |
| 1452 | |
| 1453 | err = ixgbe_set_vf_vlan(adapter, false, |
| 1454 | adapter->vfinfo[vf].pf_vlan, vf); |
Alexander Duyck | 4c7f35f | 2015-11-02 17:10:32 -0800 | [diff] [blame] | 1455 | /* Restore tagless access via VLAN 0 */ |
| 1456 | ixgbe_set_vf_vlan(adapter, true, 0, vf); |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1457 | ixgbe_clear_vmvir(adapter, vf); |
| 1458 | ixgbe_set_vmolr(hw, vf, true); |
Emil Tantilov | 42ce2c8 | 2014-12-10 05:28:51 +0000 | [diff] [blame] | 1459 | |
| 1460 | /* disable hide VLAN on X550 */ |
| 1461 | if (hw->mac.type >= ixgbe_mac_X550) |
| 1462 | ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); |
| 1463 | |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1464 | adapter->vfinfo[vf].pf_vlan = 0; |
| 1465 | adapter->vfinfo[vf].pf_qos = 0; |
| 1466 | |
| 1467 | return err; |
| 1468 | } |
| 1469 | |
Moshe Shemesh | 79aab09 | 2016-09-22 12:11:15 +0300 | [diff] [blame] | 1470 | int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, |
| 1471 | u8 qos, __be16 vlan_proto) |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1472 | { |
| 1473 | int err = 0; |
| 1474 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| 1475 | |
| 1476 | if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) |
| 1477 | return -EINVAL; |
Moshe Shemesh | 79aab09 | 2016-09-22 12:11:15 +0300 | [diff] [blame] | 1478 | if (vlan_proto != htons(ETH_P_8021Q)) |
| 1479 | return -EPROTONOSUPPORT; |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1480 | if (vlan || qos) { |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1481 | /* Check if there is already a port VLAN set, if so |
| 1482 | * we have to delete the old one first before we |
| 1483 | * can set the new one. The usage model had |
| 1484 | * previously assumed the user would delete the |
| 1485 | * old port VLAN before setting a new one but this |
| 1486 | * is not necessarily the case. |
| 1487 | */ |
Greg Rose | 026ac67 | 2013-04-17 20:41:35 +0000 | [diff] [blame] | 1488 | if (adapter->vfinfo[vf].pf_vlan) |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1489 | err = ixgbe_disable_port_vlan(adapter, vf); |
Greg Rose | 026ac67 | 2013-04-17 20:41:35 +0000 | [diff] [blame] | 1490 | if (err) |
| 1491 | goto out; |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1492 | err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1493 | } else { |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1494 | err = ixgbe_disable_port_vlan(adapter, vf); |
Jacob Keller | e7cf745 | 2014-04-09 06:03:10 +0000 | [diff] [blame] | 1495 | } |
Don Skidmore | 2b509c0 | 2014-11-01 01:06:57 +0000 | [diff] [blame] | 1496 | |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1497 | out: |
Jacob Keller | e7cf745 | 2014-04-09 06:03:10 +0000 | [diff] [blame] | 1498 | return err; |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1499 | } |
| 1500 | |
Rostislav Pehlivanov | c04f90e | 2016-01-27 18:33:30 +0000 | [diff] [blame] | 1501 | int ixgbe_link_mbps(struct ixgbe_adapter *adapter) |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1502 | { |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1503 | switch (adapter->link_speed) { |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1504 | case IXGBE_LINK_SPEED_100_FULL: |
| 1505 | return 100; |
| 1506 | case IXGBE_LINK_SPEED_1GB_FULL: |
| 1507 | return 1000; |
| 1508 | case IXGBE_LINK_SPEED_10GB_FULL: |
| 1509 | return 10000; |
| 1510 | default: |
| 1511 | return 0; |
| 1512 | } |
| 1513 | } |
| 1514 | |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1515 | static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1516 | { |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1517 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
| 1518 | struct ixgbe_hw *hw = &adapter->hw; |
| 1519 | u32 bcnrc_val = 0; |
| 1520 | u16 queue, queues_per_pool; |
| 1521 | u16 tx_rate = adapter->vfinfo[vf].tx_rate; |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1522 | |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1523 | if (tx_rate) { |
| 1524 | /* start with base link speed value */ |
| 1525 | bcnrc_val = adapter->vf_rate_link_speed; |
| 1526 | |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1527 | /* Calculate the rate factor values to set */ |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1528 | bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; |
| 1529 | bcnrc_val /= tx_rate; |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1530 | |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1531 | /* clear everything but the rate factor */ |
| 1532 | bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | |
| 1533 | IXGBE_RTTBCNRC_RF_DEC_MASK; |
| 1534 | |
| 1535 | /* enable the rate scheduler */ |
| 1536 | bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1537 | } |
| 1538 | |
Lior Levy | 7555e83 | 2011-06-25 00:09:08 -0700 | [diff] [blame] | 1539 | /* |
| 1540 | * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM |
| 1541 | * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported |
| 1542 | * and 0x004 otherwise. |
| 1543 | */ |
| 1544 | switch (hw->mac.type) { |
| 1545 | case ixgbe_mac_82599EB: |
| 1546 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); |
| 1547 | break; |
| 1548 | case ixgbe_mac_X540: |
| 1549 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); |
| 1550 | break; |
| 1551 | default: |
| 1552 | break; |
| 1553 | } |
| 1554 | |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1555 | /* determine how many queues per pool based on VMDq mask */ |
| 1556 | queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
| 1557 | |
| 1558 | /* write value for all Tx queues belonging to VF */ |
| 1559 | for (queue = 0; queue < queues_per_pool; queue++) { |
| 1560 | unsigned int reg_idx = (vf * queues_per_pool) + queue; |
| 1561 | |
| 1562 | IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); |
| 1563 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); |
| 1564 | } |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1565 | } |
| 1566 | |
| 1567 | void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) |
| 1568 | { |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1569 | int i; |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1570 | |
| 1571 | /* VF Tx rate limit was not set */ |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1572 | if (!adapter->vf_rate_link_speed) |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1573 | return; |
| 1574 | |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1575 | if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1576 | adapter->vf_rate_link_speed = 0; |
| 1577 | dev_info(&adapter->pdev->dev, |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1578 | "Link speed has been changed. VF Transmit rate is disabled\n"); |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1579 | } |
| 1580 | |
| 1581 | for (i = 0; i < adapter->num_vfs; i++) { |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1582 | if (!adapter->vf_rate_link_speed) |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1583 | adapter->vfinfo[i].tx_rate = 0; |
| 1584 | |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1585 | ixgbe_set_vf_rate_limit(adapter, i); |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1586 | } |
| 1587 | } |
| 1588 | |
Sucheta Chakraborty | ed61668 | 2014-05-22 09:59:05 -0400 | [diff] [blame] | 1589 | int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, |
| 1590 | int max_tx_rate) |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1591 | { |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1592 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1593 | int link_speed; |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1594 | |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1595 | /* verify VF is active */ |
| 1596 | if (vf >= adapter->num_vfs) |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1597 | return -EINVAL; |
| 1598 | |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1599 | /* verify link is up */ |
| 1600 | if (!adapter->link_up) |
| 1601 | return -EINVAL; |
| 1602 | |
| 1603 | /* verify we are linked at 10Gbps */ |
| 1604 | link_speed = ixgbe_link_mbps(adapter); |
| 1605 | if (link_speed != 10000) |
| 1606 | return -EINVAL; |
| 1607 | |
Sucheta Chakraborty | ed61668 | 2014-05-22 09:59:05 -0400 | [diff] [blame] | 1608 | if (min_tx_rate) |
| 1609 | return -EINVAL; |
| 1610 | |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1611 | /* rate limit cannot be less than 10Mbs or greater than link speed */ |
Sucheta Chakraborty | ed61668 | 2014-05-22 09:59:05 -0400 | [diff] [blame] | 1612 | if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1613 | return -EINVAL; |
| 1614 | |
| 1615 | /* store values */ |
| 1616 | adapter->vf_rate_link_speed = link_speed; |
Sucheta Chakraborty | ed61668 | 2014-05-22 09:59:05 -0400 | [diff] [blame] | 1617 | adapter->vfinfo[vf].tx_rate = max_tx_rate; |
Alexander Duyck | 9f66d3e | 2012-07-20 08:09:06 +0000 | [diff] [blame] | 1618 | |
| 1619 | /* update hardware configuration */ |
| 1620 | ixgbe_set_vf_rate_limit(adapter, vf); |
Lior Levy | ff4ab20 | 2011-03-11 02:03:07 +0000 | [diff] [blame] | 1621 | |
| 1622 | return 0; |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1623 | } |
| 1624 | |
Greg Rose | de4c7f6 | 2011-09-29 05:57:33 +0000 | [diff] [blame] | 1625 | int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) |
| 1626 | { |
| 1627 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
Greg Rose | de4c7f6 | 2011-09-29 05:57:33 +0000 | [diff] [blame] | 1628 | struct ixgbe_hw *hw = &adapter->hw; |
Greg Rose | de4c7f6 | 2011-09-29 05:57:33 +0000 | [diff] [blame] | 1629 | |
Emil Tantilov | 600a507 | 2014-10-16 15:49:02 +0000 | [diff] [blame] | 1630 | if (vf >= adapter->num_vfs) |
| 1631 | return -EINVAL; |
| 1632 | |
Greg Rose | de4c7f6 | 2011-09-29 05:57:33 +0000 | [diff] [blame] | 1633 | adapter->vfinfo[vf].spoofchk_enabled = setting; |
| 1634 | |
Emil Tantilov | 77f192a | 2016-03-18 16:11:14 -0700 | [diff] [blame] | 1635 | /* configure MAC spoofing */ |
| 1636 | hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); |
Greg Rose | de4c7f6 | 2011-09-29 05:57:33 +0000 | [diff] [blame] | 1637 | |
Emil Tantilov | 77f192a | 2016-03-18 16:11:14 -0700 | [diff] [blame] | 1638 | /* configure VLAN spoofing */ |
Emil Tantilov | d3dec7c | 2016-03-18 16:11:19 -0700 | [diff] [blame] | 1639 | hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); |
Emil Tantilov | 77f192a | 2016-03-18 16:11:14 -0700 | [diff] [blame] | 1640 | |
| 1641 | /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be |
| 1642 | * calling set_ethertype_anti_spoofing for each VF in loop below |
| 1643 | */ |
| 1644 | if (hw->mac.ops.set_ethertype_anti_spoofing) { |
| 1645 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), |
| 1646 | (IXGBE_ETQF_FILTER_EN | |
| 1647 | IXGBE_ETQF_TX_ANTISPOOF | |
| 1648 | IXGBE_ETH_P_LLDP)); |
| 1649 | |
| 1650 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), |
| 1651 | (IXGBE_ETQF_FILTER_EN | |
| 1652 | IXGBE_ETQF_TX_ANTISPOOF | |
| 1653 | ETH_P_PAUSE)); |
| 1654 | |
| 1655 | hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); |
Greg Rose | de4c7f6 | 2011-09-29 05:57:33 +0000 | [diff] [blame] | 1656 | } |
| 1657 | |
| 1658 | return 0; |
| 1659 | } |
| 1660 | |
Vlad Zolotarov | e65ce0d | 2015-03-30 21:35:24 +0300 | [diff] [blame] | 1661 | int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, |
| 1662 | bool setting) |
| 1663 | { |
| 1664 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| 1665 | |
| 1666 | /* This operation is currently supported only for 82599 and x540 |
| 1667 | * devices. |
| 1668 | */ |
| 1669 | if (adapter->hw.mac.type < ixgbe_mac_82599EB || |
| 1670 | adapter->hw.mac.type >= ixgbe_mac_X550) |
| 1671 | return -EOPNOTSUPP; |
| 1672 | |
| 1673 | if (vf >= adapter->num_vfs) |
| 1674 | return -EINVAL; |
| 1675 | |
| 1676 | adapter->vfinfo[vf].rss_query_enabled = setting; |
| 1677 | |
| 1678 | return 0; |
| 1679 | } |
| 1680 | |
Hiroshi Shimamoto | 54011e4 | 2015-08-28 06:58:33 +0000 | [diff] [blame] | 1681 | int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) |
| 1682 | { |
| 1683 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| 1684 | |
| 1685 | if (vf >= adapter->num_vfs) |
| 1686 | return -EINVAL; |
| 1687 | |
| 1688 | /* nothing to do */ |
| 1689 | if (adapter->vfinfo[vf].trusted == setting) |
| 1690 | return 0; |
| 1691 | |
| 1692 | adapter->vfinfo[vf].trusted = setting; |
| 1693 | |
| 1694 | /* reset VF to reconfigure features */ |
| 1695 | adapter->vfinfo[vf].clear_to_send = false; |
| 1696 | ixgbe_ping_vf(adapter, vf); |
| 1697 | |
| 1698 | e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); |
| 1699 | |
| 1700 | return 0; |
| 1701 | } |
| 1702 | |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1703 | int ixgbe_ndo_get_vf_config(struct net_device *netdev, |
| 1704 | int vf, struct ifla_vf_info *ivi) |
| 1705 | { |
| 1706 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| 1707 | if (vf >= adapter->num_vfs) |
| 1708 | return -EINVAL; |
| 1709 | ivi->vf = vf; |
| 1710 | memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); |
Sucheta Chakraborty | ed61668 | 2014-05-22 09:59:05 -0400 | [diff] [blame] | 1711 | ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; |
| 1712 | ivi->min_tx_rate = 0; |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1713 | ivi->vlan = adapter->vfinfo[vf].pf_vlan; |
| 1714 | ivi->qos = adapter->vfinfo[vf].pf_qos; |
Greg Rose | de4c7f6 | 2011-09-29 05:57:33 +0000 | [diff] [blame] | 1715 | ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; |
Vlad Zolotarov | e65ce0d | 2015-03-30 21:35:24 +0300 | [diff] [blame] | 1716 | ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; |
Hiroshi Shimamoto | 54011e4 | 2015-08-28 06:58:33 +0000 | [diff] [blame] | 1717 | ivi->trusted = adapter->vfinfo[vf].trusted; |
Greg Rose | 7f01648 | 2010-05-04 22:12:06 +0000 | [diff] [blame] | 1718 | return 0; |
| 1719 | } |