Jacob Keller | 8664109 | 2016-04-07 08:21:21 -0700 | [diff] [blame] | 1 | /* Intel(R) Ethernet Switch Host Interface Driver |
| 2 | * Copyright(c) 2013 - 2016 Intel Corporation. |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * The full GNU General Public License is included in this distribution in |
| 14 | * the file called "COPYING". |
| 15 | * |
| 16 | * Contact Information: |
| 17 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
| 18 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 19 | */ |
| 20 | |
| 21 | #include "fm10k.h" |
| 22 | #include "fm10k_vf.h" |
| 23 | #include "fm10k_pf.h" |
| 24 | |
| 25 | static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results, |
| 26 | struct fm10k_mbx_info *mbx) |
| 27 | { |
| 28 | struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; |
| 29 | struct fm10k_intfc *interface = hw->back; |
| 30 | struct pci_dev *pdev = interface->pdev; |
| 31 | |
| 32 | dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n", |
| 33 | **results & FM10K_TLV_ID_MASK, vf_info->vf_idx); |
| 34 | |
| 35 | return fm10k_tlv_msg_error(hw, results, mbx); |
| 36 | } |
| 37 | |
| 38 | static const struct fm10k_msg_data iov_mbx_data[] = { |
| 39 | FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), |
| 40 | FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), |
| 41 | FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), |
| 42 | FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), |
| 43 | FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error), |
| 44 | }; |
| 45 | |
| 46 | s32 fm10k_iov_event(struct fm10k_intfc *interface) |
| 47 | { |
| 48 | struct fm10k_hw *hw = &interface->hw; |
| 49 | struct fm10k_iov_data *iov_data; |
Jeff Kirsher | 9de15bd | 2015-04-10 17:20:17 -0700 | [diff] [blame] | 50 | s64 vflre; |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 51 | int i; |
| 52 | |
Jacob Keller | d8ec92f | 2016-02-10 14:45:51 -0800 | [diff] [blame] | 53 | /* if there is no iov_data then there is no mailbox to process */ |
Jacob Keller | ce4dad2 | 2016-06-17 16:21:11 -0700 | [diff] [blame] | 54 | if (!READ_ONCE(interface->iov_data)) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 55 | return 0; |
| 56 | |
| 57 | rcu_read_lock(); |
| 58 | |
| 59 | iov_data = interface->iov_data; |
| 60 | |
| 61 | /* check again now that we are in the RCU block */ |
| 62 | if (!iov_data) |
| 63 | goto read_unlock; |
| 64 | |
| 65 | if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR)) |
Jeff Kirsher | 9de15bd | 2015-04-10 17:20:17 -0700 | [diff] [blame] | 66 | goto read_unlock; |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 67 | |
| 68 | /* read VFLRE to determine if any VFs have been reset */ |
| 69 | do { |
| 70 | vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0)); |
| 71 | vflre <<= 32; |
| 72 | vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1)); |
| 73 | vflre = (vflre << 32) | (vflre >> 32); |
| 74 | vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0)); |
| 75 | |
| 76 | i = iov_data->num_vfs; |
| 77 | |
| 78 | for (vflre <<= 64 - i; vflre && i--; vflre += vflre) { |
| 79 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 80 | |
| 81 | if (vflre >= 0) |
| 82 | continue; |
| 83 | |
| 84 | hw->iov.ops.reset_resources(hw, vf_info); |
| 85 | vf_info->mbx.ops.connect(hw, &vf_info->mbx); |
| 86 | } |
| 87 | } while (i != iov_data->num_vfs); |
| 88 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 89 | read_unlock: |
| 90 | rcu_read_unlock(); |
| 91 | |
| 92 | return 0; |
| 93 | } |
| 94 | |
| 95 | s32 fm10k_iov_mbx(struct fm10k_intfc *interface) |
| 96 | { |
| 97 | struct fm10k_hw *hw = &interface->hw; |
| 98 | struct fm10k_iov_data *iov_data; |
| 99 | int i; |
| 100 | |
Jacob Keller | d8ec92f | 2016-02-10 14:45:51 -0800 | [diff] [blame] | 101 | /* if there is no iov_data then there is no mailbox to process */ |
Jacob Keller | ce4dad2 | 2016-06-17 16:21:11 -0700 | [diff] [blame] | 102 | if (!READ_ONCE(interface->iov_data)) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 103 | return 0; |
| 104 | |
| 105 | rcu_read_lock(); |
| 106 | |
| 107 | iov_data = interface->iov_data; |
| 108 | |
| 109 | /* check again now that we are in the RCU block */ |
| 110 | if (!iov_data) |
| 111 | goto read_unlock; |
| 112 | |
| 113 | /* lock the mailbox for transmit and receive */ |
| 114 | fm10k_mbx_lock(interface); |
| 115 | |
Jeff Kirsher | ada2411 | 2015-04-03 13:27:07 -0700 | [diff] [blame] | 116 | /* Most VF messages sent to the PF cause the PF to respond by |
| 117 | * requesting from the SM mailbox. This means that too many VF |
| 118 | * messages processed at once could cause a mailbox timeout on the PF. |
| 119 | * To prevent this, store a pointer to the next VF mbx to process. Use |
| 120 | * that as the start of the loop so that we don't starve whichever VF |
| 121 | * got ignored on the previous run. |
| 122 | */ |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 123 | process_mbx: |
| 124 | for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) { |
| 125 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 126 | struct fm10k_mbx_info *mbx = &vf_info->mbx; |
| 127 | u16 glort = vf_info->glort; |
| 128 | |
| 129 | /* verify port mapping is valid, if not reset port */ |
| 130 | if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) |
| 131 | hw->iov.ops.reset_lport(hw, vf_info); |
| 132 | |
| 133 | /* reset VFs that have mailbox timed out */ |
| 134 | if (!mbx->timeout) { |
| 135 | hw->iov.ops.reset_resources(hw, vf_info); |
| 136 | mbx->ops.connect(hw, mbx); |
| 137 | } |
| 138 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 139 | /* guarantee we have free space in the SM mailbox */ |
Jacob Keller | 80043f3 | 2015-07-01 17:38:36 -0700 | [diff] [blame] | 140 | if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) { |
| 141 | /* keep track of how many times this occurs */ |
| 142 | interface->hw_sm_mbx_full++; |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 143 | break; |
Jacob Keller | 80043f3 | 2015-07-01 17:38:36 -0700 | [diff] [blame] | 144 | } |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 145 | |
| 146 | /* cleanup mailbox and process received messages */ |
| 147 | mbx->ops.process(hw, mbx); |
| 148 | } |
| 149 | |
Jeff Kirsher | ada2411 | 2015-04-03 13:27:07 -0700 | [diff] [blame] | 150 | /* if we stopped processing mailboxes early, update next_vf_mbx. |
| 151 | * Otherwise, reset next_vf_mbx, and restart loop so that we process |
| 152 | * the remaining mailboxes we skipped at the start. |
| 153 | */ |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 154 | if (i >= 0) { |
| 155 | iov_data->next_vf_mbx = i + 1; |
| 156 | } else if (iov_data->next_vf_mbx) { |
| 157 | iov_data->next_vf_mbx = 0; |
| 158 | goto process_mbx; |
| 159 | } |
| 160 | |
| 161 | /* free the lock */ |
| 162 | fm10k_mbx_unlock(interface); |
| 163 | |
| 164 | read_unlock: |
| 165 | rcu_read_unlock(); |
| 166 | |
| 167 | return 0; |
| 168 | } |
| 169 | |
| 170 | void fm10k_iov_suspend(struct pci_dev *pdev) |
| 171 | { |
| 172 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 173 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 174 | struct fm10k_hw *hw = &interface->hw; |
| 175 | int num_vfs, i; |
| 176 | |
| 177 | /* pull out num_vfs from iov_data */ |
| 178 | num_vfs = iov_data ? iov_data->num_vfs : 0; |
| 179 | |
| 180 | /* shut down queue mapping for VFs */ |
| 181 | fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss), |
| 182 | FM10K_DGLORTMAP_NONE); |
| 183 | |
| 184 | /* Stop any active VFs and reset their resources */ |
| 185 | for (i = 0; i < num_vfs; i++) { |
| 186 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 187 | |
| 188 | hw->iov.ops.reset_resources(hw, vf_info); |
| 189 | hw->iov.ops.reset_lport(hw, vf_info); |
| 190 | } |
| 191 | } |
| 192 | |
| 193 | int fm10k_iov_resume(struct pci_dev *pdev) |
| 194 | { |
| 195 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 196 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 197 | struct fm10k_dglort_cfg dglort = { 0 }; |
| 198 | struct fm10k_hw *hw = &interface->hw; |
| 199 | int num_vfs, i; |
| 200 | |
| 201 | /* pull out num_vfs from iov_data */ |
| 202 | num_vfs = iov_data ? iov_data->num_vfs : 0; |
| 203 | |
| 204 | /* return error if iov_data is not already populated */ |
| 205 | if (!iov_data) |
| 206 | return -ENOMEM; |
| 207 | |
| 208 | /* allocate hardware resources for the VFs */ |
| 209 | hw->iov.ops.assign_resources(hw, num_vfs, num_vfs); |
| 210 | |
| 211 | /* configure DGLORT mapping for RSS */ |
| 212 | dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; |
| 213 | dglort.idx = fm10k_dglort_vf_rss; |
| 214 | dglort.inner_rss = 1; |
| 215 | dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1); |
| 216 | dglort.queue_b = fm10k_vf_queue_index(hw, 0); |
| 217 | dglort.vsi_l = fls(hw->iov.total_vfs - 1); |
| 218 | dglort.vsi_b = 1; |
| 219 | |
| 220 | hw->mac.ops.configure_dglort_map(hw, &dglort); |
| 221 | |
| 222 | /* assign resources to the device */ |
| 223 | for (i = 0; i < num_vfs; i++) { |
| 224 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 225 | |
| 226 | /* allocate all but the last GLORT to the VFs */ |
| 227 | if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT)) |
| 228 | break; |
| 229 | |
| 230 | /* assign GLORT to VF, and restrict it to multicast */ |
| 231 | hw->iov.ops.set_lport(hw, vf_info, i, |
| 232 | FM10K_VF_FLAG_MULTI_CAPABLE); |
| 233 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 234 | /* mailbox is disconnected so we don't send a message */ |
| 235 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
| 236 | |
| 237 | /* now we are ready so we can connect */ |
| 238 | vf_info->mbx.ops.connect(hw, &vf_info->mbx); |
| 239 | } |
| 240 | |
| 241 | return 0; |
| 242 | } |
| 243 | |
| 244 | s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid) |
| 245 | { |
| 246 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 247 | struct fm10k_hw *hw = &interface->hw; |
| 248 | struct fm10k_vf_info *vf_info; |
| 249 | u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE; |
| 250 | |
| 251 | /* no IOV support, not our message to process */ |
| 252 | if (!iov_data) |
| 253 | return FM10K_ERR_PARAM; |
| 254 | |
| 255 | /* glort outside our range, not our message to process */ |
| 256 | if (vf_idx >= iov_data->num_vfs) |
| 257 | return FM10K_ERR_PARAM; |
| 258 | |
Matthew Vick | eca3204 | 2015-01-31 02:23:05 +0000 | [diff] [blame] | 259 | /* determine if an update has occurred and if so notify the VF */ |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 260 | vf_info = &iov_data->vf_info[vf_idx]; |
| 261 | if (vf_info->sw_vid != pvid) { |
| 262 | vf_info->sw_vid = pvid; |
| 263 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
| 264 | } |
| 265 | |
| 266 | return 0; |
| 267 | } |
| 268 | |
| 269 | static void fm10k_iov_free_data(struct pci_dev *pdev) |
| 270 | { |
| 271 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 272 | |
| 273 | if (!interface->iov_data) |
| 274 | return; |
| 275 | |
| 276 | /* reclaim hardware resources */ |
| 277 | fm10k_iov_suspend(pdev); |
| 278 | |
| 279 | /* drop iov_data from interface */ |
| 280 | kfree_rcu(interface->iov_data, rcu); |
| 281 | interface->iov_data = NULL; |
| 282 | } |
| 283 | |
| 284 | static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) |
| 285 | { |
| 286 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 287 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 288 | struct fm10k_hw *hw = &interface->hw; |
| 289 | size_t size; |
| 290 | int i, err; |
| 291 | |
| 292 | /* return error if iov_data is already populated */ |
| 293 | if (iov_data) |
| 294 | return -EBUSY; |
| 295 | |
| 296 | /* The PF should always be able to assign resources */ |
| 297 | if (!hw->iov.ops.assign_resources) |
| 298 | return -ENODEV; |
| 299 | |
| 300 | /* nothing to do if no VFs are requested */ |
| 301 | if (!num_vfs) |
| 302 | return 0; |
| 303 | |
| 304 | /* allocate memory for VF storage */ |
| 305 | size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]); |
| 306 | iov_data = kzalloc(size, GFP_KERNEL); |
| 307 | if (!iov_data) |
| 308 | return -ENOMEM; |
| 309 | |
| 310 | /* record number of VFs */ |
| 311 | iov_data->num_vfs = num_vfs; |
| 312 | |
| 313 | /* loop through vf_info structures initializing each entry */ |
| 314 | for (i = 0; i < num_vfs; i++) { |
| 315 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 316 | |
| 317 | /* Record VF VSI value */ |
| 318 | vf_info->vsi = i + 1; |
| 319 | vf_info->vf_idx = i; |
| 320 | |
| 321 | /* initialize mailbox memory */ |
| 322 | err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i); |
| 323 | if (err) { |
| 324 | dev_err(&pdev->dev, |
| 325 | "Unable to initialize SR-IOV mailbox\n"); |
| 326 | kfree(iov_data); |
| 327 | return err; |
| 328 | } |
| 329 | } |
| 330 | |
| 331 | /* assign iov_data to interface */ |
| 332 | interface->iov_data = iov_data; |
| 333 | |
| 334 | /* allocate hardware resources for the VFs */ |
| 335 | fm10k_iov_resume(pdev); |
| 336 | |
| 337 | return 0; |
| 338 | } |
| 339 | |
| 340 | void fm10k_iov_disable(struct pci_dev *pdev) |
| 341 | { |
| 342 | if (pci_num_vf(pdev) && pci_vfs_assigned(pdev)) |
| 343 | dev_err(&pdev->dev, |
| 344 | "Cannot disable SR-IOV while VFs are assigned\n"); |
| 345 | else |
| 346 | pci_disable_sriov(pdev); |
| 347 | |
| 348 | fm10k_iov_free_data(pdev); |
| 349 | } |
| 350 | |
| 351 | static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev) |
| 352 | { |
| 353 | u32 err_sev; |
| 354 | int pos; |
| 355 | |
| 356 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); |
| 357 | if (!pos) |
| 358 | return; |
| 359 | |
| 360 | pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev); |
| 361 | err_sev &= ~PCI_ERR_UNC_COMP_ABORT; |
| 362 | pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev); |
| 363 | } |
| 364 | |
| 365 | int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) |
| 366 | { |
| 367 | int current_vfs = pci_num_vf(pdev); |
| 368 | int err = 0; |
| 369 | |
| 370 | if (current_vfs && pci_vfs_assigned(pdev)) { |
| 371 | dev_err(&pdev->dev, |
| 372 | "Cannot modify SR-IOV while VFs are assigned\n"); |
| 373 | num_vfs = current_vfs; |
| 374 | } else { |
| 375 | pci_disable_sriov(pdev); |
| 376 | fm10k_iov_free_data(pdev); |
| 377 | } |
| 378 | |
| 379 | /* allocate resources for the VFs */ |
| 380 | err = fm10k_iov_alloc_data(pdev, num_vfs); |
| 381 | if (err) |
| 382 | return err; |
| 383 | |
| 384 | /* allocate VFs if not already allocated */ |
| 385 | if (num_vfs && (num_vfs != current_vfs)) { |
| 386 | /* Disable completer abort error reporting as |
| 387 | * the VFs can trigger this any time they read a queue |
| 388 | * that they don't own. |
| 389 | */ |
| 390 | fm10k_disable_aer_comp_abort(pdev); |
| 391 | |
| 392 | err = pci_enable_sriov(pdev, num_vfs); |
| 393 | if (err) { |
| 394 | dev_err(&pdev->dev, |
| 395 | "Enable PCI SR-IOV failed: %d\n", err); |
| 396 | return err; |
| 397 | } |
| 398 | } |
| 399 | |
| 400 | return num_vfs; |
| 401 | } |
| 402 | |
Jacob Keller | a38488f | 2015-06-03 16:31:07 -0700 | [diff] [blame] | 403 | static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface, |
| 404 | struct fm10k_vf_info *vf_info) |
| 405 | { |
| 406 | struct fm10k_hw *hw = &interface->hw; |
| 407 | |
| 408 | /* assigning the MAC address will send a mailbox message */ |
| 409 | fm10k_mbx_lock(interface); |
| 410 | |
| 411 | /* disable LPORT for this VF which clears switch rules */ |
| 412 | hw->iov.ops.reset_lport(hw, vf_info); |
| 413 | |
| 414 | /* assign new MAC+VLAN for this VF */ |
| 415 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
| 416 | |
| 417 | /* re-enable the LPORT for this VF */ |
| 418 | hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx, |
| 419 | FM10K_VF_FLAG_MULTI_CAPABLE); |
| 420 | |
| 421 | fm10k_mbx_unlock(interface); |
| 422 | } |
| 423 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 424 | int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac) |
| 425 | { |
| 426 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 427 | struct fm10k_iov_data *iov_data = interface->iov_data; |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 428 | struct fm10k_vf_info *vf_info; |
| 429 | |
| 430 | /* verify SR-IOV is active and that vf idx is valid */ |
| 431 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 432 | return -EINVAL; |
| 433 | |
| 434 | /* verify MAC addr is valid */ |
| 435 | if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac)) |
| 436 | return -EINVAL; |
| 437 | |
| 438 | /* record new MAC address */ |
| 439 | vf_info = &iov_data->vf_info[vf_idx]; |
| 440 | ether_addr_copy(vf_info->mac, mac); |
| 441 | |
Jacob Keller | a38488f | 2015-06-03 16:31:07 -0700 | [diff] [blame] | 442 | fm10k_reset_vf_info(interface, vf_info); |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 443 | |
| 444 | return 0; |
| 445 | } |
| 446 | |
| 447 | int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, |
Moshe Shemesh | 79aab09 | 2016-09-22 12:11:15 +0300 | [diff] [blame] | 448 | u8 qos, __be16 vlan_proto) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 449 | { |
| 450 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 451 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 452 | struct fm10k_hw *hw = &interface->hw; |
| 453 | struct fm10k_vf_info *vf_info; |
| 454 | |
| 455 | /* verify SR-IOV is active and that vf idx is valid */ |
| 456 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 457 | return -EINVAL; |
| 458 | |
| 459 | /* QOS is unsupported and VLAN IDs accepted range 0-4094 */ |
| 460 | if (qos || (vid > (VLAN_VID_MASK - 1))) |
| 461 | return -EINVAL; |
| 462 | |
Moshe Shemesh | 79aab09 | 2016-09-22 12:11:15 +0300 | [diff] [blame] | 463 | /* VF VLAN Protocol part to default is unsupported */ |
| 464 | if (vlan_proto != htons(ETH_P_8021Q)) |
| 465 | return -EPROTONOSUPPORT; |
| 466 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 467 | vf_info = &iov_data->vf_info[vf_idx]; |
| 468 | |
| 469 | /* exit if there is nothing to do */ |
| 470 | if (vf_info->pf_vid == vid) |
| 471 | return 0; |
| 472 | |
| 473 | /* record default VLAN ID for VF */ |
| 474 | vf_info->pf_vid = vid; |
| 475 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 476 | /* Clear the VLAN table for the VF */ |
| 477 | hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false); |
| 478 | |
Jacob Keller | a38488f | 2015-06-03 16:31:07 -0700 | [diff] [blame] | 479 | fm10k_reset_vf_info(interface, vf_info); |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 480 | |
| 481 | return 0; |
| 482 | } |
| 483 | |
Jeff Kirsher | de44519 | 2015-04-03 13:26:56 -0700 | [diff] [blame] | 484 | int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, |
| 485 | int __always_unused unused, int rate) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 486 | { |
| 487 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 488 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 489 | struct fm10k_hw *hw = &interface->hw; |
| 490 | |
| 491 | /* verify SR-IOV is active and that vf idx is valid */ |
| 492 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 493 | return -EINVAL; |
| 494 | |
| 495 | /* rate limit cannot be less than 10Mbs or greater than link speed */ |
| 496 | if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX)) |
| 497 | return -EINVAL; |
| 498 | |
| 499 | /* store values */ |
| 500 | iov_data->vf_info[vf_idx].rate = rate; |
| 501 | |
| 502 | /* update hardware configuration */ |
| 503 | hw->iov.ops.configure_tc(hw, vf_idx, rate); |
| 504 | |
| 505 | return 0; |
| 506 | } |
| 507 | |
| 508 | int fm10k_ndo_get_vf_config(struct net_device *netdev, |
| 509 | int vf_idx, struct ifla_vf_info *ivi) |
| 510 | { |
| 511 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 512 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 513 | struct fm10k_vf_info *vf_info; |
| 514 | |
| 515 | /* verify SR-IOV is active and that vf idx is valid */ |
| 516 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 517 | return -EINVAL; |
| 518 | |
| 519 | vf_info = &iov_data->vf_info[vf_idx]; |
| 520 | |
| 521 | ivi->vf = vf_idx; |
| 522 | ivi->max_tx_rate = vf_info->rate; |
| 523 | ivi->min_tx_rate = 0; |
| 524 | ether_addr_copy(ivi->mac, vf_info->mac); |
| 525 | ivi->vlan = vf_info->pf_vid; |
| 526 | ivi->qos = 0; |
| 527 | |
| 528 | return 0; |
| 529 | } |