Jacob Keller | 8664109 | 2016-04-07 08:21:21 -0700 | [diff] [blame] | 1 | /* Intel(R) Ethernet Switch Host Interface Driver |
| 2 | * Copyright(c) 2013 - 2016 Intel Corporation. |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * The full GNU General Public License is included in this distribution in |
| 14 | * the file called "COPYING". |
| 15 | * |
| 16 | * Contact Information: |
| 17 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
| 18 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 19 | */ |
| 20 | |
| 21 | #include "fm10k.h" |
| 22 | #include "fm10k_vf.h" |
| 23 | #include "fm10k_pf.h" |
| 24 | |
| 25 | static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results, |
| 26 | struct fm10k_mbx_info *mbx) |
| 27 | { |
| 28 | struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; |
| 29 | struct fm10k_intfc *interface = hw->back; |
| 30 | struct pci_dev *pdev = interface->pdev; |
| 31 | |
| 32 | dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n", |
| 33 | **results & FM10K_TLV_ID_MASK, vf_info->vf_idx); |
| 34 | |
| 35 | return fm10k_tlv_msg_error(hw, results, mbx); |
| 36 | } |
| 37 | |
| 38 | static const struct fm10k_msg_data iov_mbx_data[] = { |
| 39 | FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), |
| 40 | FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), |
| 41 | FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), |
| 42 | FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), |
| 43 | FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error), |
| 44 | }; |
| 45 | |
| 46 | s32 fm10k_iov_event(struct fm10k_intfc *interface) |
| 47 | { |
| 48 | struct fm10k_hw *hw = &interface->hw; |
| 49 | struct fm10k_iov_data *iov_data; |
Jeff Kirsher | 9de15bd | 2015-04-10 17:20:17 -0700 | [diff] [blame] | 50 | s64 vflre; |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 51 | int i; |
| 52 | |
Jacob Keller | d8ec92f | 2016-02-10 14:45:51 -0800 | [diff] [blame] | 53 | /* if there is no iov_data then there is no mailbox to process */ |
Jacob Keller | ce4dad2 | 2016-06-17 16:21:11 -0700 | [diff] [blame] | 54 | if (!READ_ONCE(interface->iov_data)) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 55 | return 0; |
| 56 | |
| 57 | rcu_read_lock(); |
| 58 | |
| 59 | iov_data = interface->iov_data; |
| 60 | |
| 61 | /* check again now that we are in the RCU block */ |
| 62 | if (!iov_data) |
| 63 | goto read_unlock; |
| 64 | |
| 65 | if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR)) |
Jeff Kirsher | 9de15bd | 2015-04-10 17:20:17 -0700 | [diff] [blame] | 66 | goto read_unlock; |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 67 | |
| 68 | /* read VFLRE to determine if any VFs have been reset */ |
| 69 | do { |
| 70 | vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0)); |
| 71 | vflre <<= 32; |
| 72 | vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1)); |
| 73 | vflre = (vflre << 32) | (vflre >> 32); |
| 74 | vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0)); |
| 75 | |
| 76 | i = iov_data->num_vfs; |
| 77 | |
| 78 | for (vflre <<= 64 - i; vflre && i--; vflre += vflre) { |
| 79 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 80 | |
| 81 | if (vflre >= 0) |
| 82 | continue; |
| 83 | |
| 84 | hw->iov.ops.reset_resources(hw, vf_info); |
| 85 | vf_info->mbx.ops.connect(hw, &vf_info->mbx); |
| 86 | } |
| 87 | } while (i != iov_data->num_vfs); |
| 88 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 89 | read_unlock: |
| 90 | rcu_read_unlock(); |
| 91 | |
| 92 | return 0; |
| 93 | } |
| 94 | |
| 95 | s32 fm10k_iov_mbx(struct fm10k_intfc *interface) |
| 96 | { |
| 97 | struct fm10k_hw *hw = &interface->hw; |
| 98 | struct fm10k_iov_data *iov_data; |
| 99 | int i; |
| 100 | |
Jacob Keller | d8ec92f | 2016-02-10 14:45:51 -0800 | [diff] [blame] | 101 | /* if there is no iov_data then there is no mailbox to process */ |
Jacob Keller | ce4dad2 | 2016-06-17 16:21:11 -0700 | [diff] [blame] | 102 | if (!READ_ONCE(interface->iov_data)) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 103 | return 0; |
| 104 | |
| 105 | rcu_read_lock(); |
| 106 | |
| 107 | iov_data = interface->iov_data; |
| 108 | |
| 109 | /* check again now that we are in the RCU block */ |
| 110 | if (!iov_data) |
| 111 | goto read_unlock; |
| 112 | |
| 113 | /* lock the mailbox for transmit and receive */ |
| 114 | fm10k_mbx_lock(interface); |
| 115 | |
Jeff Kirsher | ada2411 | 2015-04-03 13:27:07 -0700 | [diff] [blame] | 116 | /* Most VF messages sent to the PF cause the PF to respond by |
| 117 | * requesting from the SM mailbox. This means that too many VF |
| 118 | * messages processed at once could cause a mailbox timeout on the PF. |
| 119 | * To prevent this, store a pointer to the next VF mbx to process. Use |
| 120 | * that as the start of the loop so that we don't starve whichever VF |
| 121 | * got ignored on the previous run. |
| 122 | */ |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 123 | process_mbx: |
| 124 | for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) { |
| 125 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 126 | struct fm10k_mbx_info *mbx = &vf_info->mbx; |
| 127 | u16 glort = vf_info->glort; |
| 128 | |
Jacob Keller | 17a9180 | 2017-10-02 07:17:50 -0700 | [diff] [blame] | 129 | /* process the SM mailbox first to drain outgoing messages */ |
| 130 | hw->mbx.ops.process(hw, &hw->mbx); |
| 131 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 132 | /* verify port mapping is valid, if not reset port */ |
| 133 | if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) |
| 134 | hw->iov.ops.reset_lport(hw, vf_info); |
| 135 | |
| 136 | /* reset VFs that have mailbox timed out */ |
| 137 | if (!mbx->timeout) { |
| 138 | hw->iov.ops.reset_resources(hw, vf_info); |
| 139 | mbx->ops.connect(hw, mbx); |
| 140 | } |
| 141 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 142 | /* guarantee we have free space in the SM mailbox */ |
Jacob Keller | 80043f3 | 2015-07-01 17:38:36 -0700 | [diff] [blame] | 143 | if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) { |
| 144 | /* keep track of how many times this occurs */ |
| 145 | interface->hw_sm_mbx_full++; |
Jacob Keller | b52b7f70 | 2017-03-08 15:55:43 -0800 | [diff] [blame] | 146 | |
| 147 | /* make sure we try again momentarily */ |
| 148 | fm10k_service_event_schedule(interface); |
| 149 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 150 | break; |
Jacob Keller | 80043f3 | 2015-07-01 17:38:36 -0700 | [diff] [blame] | 151 | } |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 152 | |
| 153 | /* cleanup mailbox and process received messages */ |
| 154 | mbx->ops.process(hw, mbx); |
| 155 | } |
| 156 | |
Jeff Kirsher | ada2411 | 2015-04-03 13:27:07 -0700 | [diff] [blame] | 157 | /* if we stopped processing mailboxes early, update next_vf_mbx. |
| 158 | * Otherwise, reset next_vf_mbx, and restart loop so that we process |
| 159 | * the remaining mailboxes we skipped at the start. |
| 160 | */ |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 161 | if (i >= 0) { |
| 162 | iov_data->next_vf_mbx = i + 1; |
| 163 | } else if (iov_data->next_vf_mbx) { |
| 164 | iov_data->next_vf_mbx = 0; |
| 165 | goto process_mbx; |
| 166 | } |
| 167 | |
| 168 | /* free the lock */ |
| 169 | fm10k_mbx_unlock(interface); |
| 170 | |
| 171 | read_unlock: |
| 172 | rcu_read_unlock(); |
| 173 | |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | void fm10k_iov_suspend(struct pci_dev *pdev) |
| 178 | { |
| 179 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 180 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 181 | struct fm10k_hw *hw = &interface->hw; |
| 182 | int num_vfs, i; |
| 183 | |
| 184 | /* pull out num_vfs from iov_data */ |
| 185 | num_vfs = iov_data ? iov_data->num_vfs : 0; |
| 186 | |
| 187 | /* shut down queue mapping for VFs */ |
| 188 | fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss), |
| 189 | FM10K_DGLORTMAP_NONE); |
| 190 | |
| 191 | /* Stop any active VFs and reset their resources */ |
| 192 | for (i = 0; i < num_vfs; i++) { |
| 193 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 194 | |
| 195 | hw->iov.ops.reset_resources(hw, vf_info); |
| 196 | hw->iov.ops.reset_lport(hw, vf_info); |
| 197 | } |
| 198 | } |
| 199 | |
| 200 | int fm10k_iov_resume(struct pci_dev *pdev) |
| 201 | { |
| 202 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 203 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 204 | struct fm10k_dglort_cfg dglort = { 0 }; |
| 205 | struct fm10k_hw *hw = &interface->hw; |
| 206 | int num_vfs, i; |
| 207 | |
| 208 | /* pull out num_vfs from iov_data */ |
| 209 | num_vfs = iov_data ? iov_data->num_vfs : 0; |
| 210 | |
| 211 | /* return error if iov_data is not already populated */ |
| 212 | if (!iov_data) |
| 213 | return -ENOMEM; |
| 214 | |
| 215 | /* allocate hardware resources for the VFs */ |
| 216 | hw->iov.ops.assign_resources(hw, num_vfs, num_vfs); |
| 217 | |
| 218 | /* configure DGLORT mapping for RSS */ |
| 219 | dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; |
| 220 | dglort.idx = fm10k_dglort_vf_rss; |
| 221 | dglort.inner_rss = 1; |
| 222 | dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1); |
| 223 | dglort.queue_b = fm10k_vf_queue_index(hw, 0); |
| 224 | dglort.vsi_l = fls(hw->iov.total_vfs - 1); |
| 225 | dglort.vsi_b = 1; |
| 226 | |
| 227 | hw->mac.ops.configure_dglort_map(hw, &dglort); |
| 228 | |
| 229 | /* assign resources to the device */ |
| 230 | for (i = 0; i < num_vfs; i++) { |
| 231 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 232 | |
| 233 | /* allocate all but the last GLORT to the VFs */ |
| 234 | if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT)) |
| 235 | break; |
| 236 | |
| 237 | /* assign GLORT to VF, and restrict it to multicast */ |
| 238 | hw->iov.ops.set_lport(hw, vf_info, i, |
| 239 | FM10K_VF_FLAG_MULTI_CAPABLE); |
| 240 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 241 | /* mailbox is disconnected so we don't send a message */ |
| 242 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
| 243 | |
| 244 | /* now we are ready so we can connect */ |
| 245 | vf_info->mbx.ops.connect(hw, &vf_info->mbx); |
| 246 | } |
| 247 | |
| 248 | return 0; |
| 249 | } |
| 250 | |
| 251 | s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid) |
| 252 | { |
| 253 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 254 | struct fm10k_hw *hw = &interface->hw; |
| 255 | struct fm10k_vf_info *vf_info; |
| 256 | u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE; |
| 257 | |
| 258 | /* no IOV support, not our message to process */ |
| 259 | if (!iov_data) |
| 260 | return FM10K_ERR_PARAM; |
| 261 | |
| 262 | /* glort outside our range, not our message to process */ |
| 263 | if (vf_idx >= iov_data->num_vfs) |
| 264 | return FM10K_ERR_PARAM; |
| 265 | |
Matthew Vick | eca3204 | 2015-01-31 02:23:05 +0000 | [diff] [blame] | 266 | /* determine if an update has occurred and if so notify the VF */ |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 267 | vf_info = &iov_data->vf_info[vf_idx]; |
| 268 | if (vf_info->sw_vid != pvid) { |
| 269 | vf_info->sw_vid = pvid; |
| 270 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
| 271 | } |
| 272 | |
| 273 | return 0; |
| 274 | } |
| 275 | |
| 276 | static void fm10k_iov_free_data(struct pci_dev *pdev) |
| 277 | { |
| 278 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 279 | |
| 280 | if (!interface->iov_data) |
| 281 | return; |
| 282 | |
| 283 | /* reclaim hardware resources */ |
| 284 | fm10k_iov_suspend(pdev); |
| 285 | |
| 286 | /* drop iov_data from interface */ |
| 287 | kfree_rcu(interface->iov_data, rcu); |
| 288 | interface->iov_data = NULL; |
| 289 | } |
| 290 | |
| 291 | static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) |
| 292 | { |
| 293 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 294 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 295 | struct fm10k_hw *hw = &interface->hw; |
| 296 | size_t size; |
| 297 | int i, err; |
| 298 | |
| 299 | /* return error if iov_data is already populated */ |
| 300 | if (iov_data) |
| 301 | return -EBUSY; |
| 302 | |
| 303 | /* The PF should always be able to assign resources */ |
| 304 | if (!hw->iov.ops.assign_resources) |
| 305 | return -ENODEV; |
| 306 | |
| 307 | /* nothing to do if no VFs are requested */ |
| 308 | if (!num_vfs) |
| 309 | return 0; |
| 310 | |
| 311 | /* allocate memory for VF storage */ |
| 312 | size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]); |
| 313 | iov_data = kzalloc(size, GFP_KERNEL); |
| 314 | if (!iov_data) |
| 315 | return -ENOMEM; |
| 316 | |
| 317 | /* record number of VFs */ |
| 318 | iov_data->num_vfs = num_vfs; |
| 319 | |
| 320 | /* loop through vf_info structures initializing each entry */ |
| 321 | for (i = 0; i < num_vfs; i++) { |
| 322 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 323 | |
| 324 | /* Record VF VSI value */ |
| 325 | vf_info->vsi = i + 1; |
| 326 | vf_info->vf_idx = i; |
| 327 | |
| 328 | /* initialize mailbox memory */ |
| 329 | err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i); |
| 330 | if (err) { |
| 331 | dev_err(&pdev->dev, |
| 332 | "Unable to initialize SR-IOV mailbox\n"); |
| 333 | kfree(iov_data); |
| 334 | return err; |
| 335 | } |
| 336 | } |
| 337 | |
| 338 | /* assign iov_data to interface */ |
| 339 | interface->iov_data = iov_data; |
| 340 | |
| 341 | /* allocate hardware resources for the VFs */ |
| 342 | fm10k_iov_resume(pdev); |
| 343 | |
| 344 | return 0; |
| 345 | } |
| 346 | |
| 347 | void fm10k_iov_disable(struct pci_dev *pdev) |
| 348 | { |
| 349 | if (pci_num_vf(pdev) && pci_vfs_assigned(pdev)) |
| 350 | dev_err(&pdev->dev, |
| 351 | "Cannot disable SR-IOV while VFs are assigned\n"); |
| 352 | else |
| 353 | pci_disable_sriov(pdev); |
| 354 | |
| 355 | fm10k_iov_free_data(pdev); |
| 356 | } |
| 357 | |
| 358 | static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev) |
| 359 | { |
| 360 | u32 err_sev; |
| 361 | int pos; |
| 362 | |
| 363 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); |
| 364 | if (!pos) |
| 365 | return; |
| 366 | |
| 367 | pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev); |
| 368 | err_sev &= ~PCI_ERR_UNC_COMP_ABORT; |
| 369 | pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev); |
| 370 | } |
| 371 | |
| 372 | int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) |
| 373 | { |
| 374 | int current_vfs = pci_num_vf(pdev); |
| 375 | int err = 0; |
| 376 | |
| 377 | if (current_vfs && pci_vfs_assigned(pdev)) { |
| 378 | dev_err(&pdev->dev, |
| 379 | "Cannot modify SR-IOV while VFs are assigned\n"); |
| 380 | num_vfs = current_vfs; |
| 381 | } else { |
| 382 | pci_disable_sriov(pdev); |
| 383 | fm10k_iov_free_data(pdev); |
| 384 | } |
| 385 | |
| 386 | /* allocate resources for the VFs */ |
| 387 | err = fm10k_iov_alloc_data(pdev, num_vfs); |
| 388 | if (err) |
| 389 | return err; |
| 390 | |
| 391 | /* allocate VFs if not already allocated */ |
| 392 | if (num_vfs && (num_vfs != current_vfs)) { |
| 393 | /* Disable completer abort error reporting as |
| 394 | * the VFs can trigger this any time they read a queue |
| 395 | * that they don't own. |
| 396 | */ |
| 397 | fm10k_disable_aer_comp_abort(pdev); |
| 398 | |
| 399 | err = pci_enable_sriov(pdev, num_vfs); |
| 400 | if (err) { |
| 401 | dev_err(&pdev->dev, |
| 402 | "Enable PCI SR-IOV failed: %d\n", err); |
| 403 | return err; |
| 404 | } |
| 405 | } |
| 406 | |
| 407 | return num_vfs; |
| 408 | } |
| 409 | |
Jacob Keller | a38488f | 2015-06-03 16:31:07 -0700 | [diff] [blame] | 410 | static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface, |
| 411 | struct fm10k_vf_info *vf_info) |
| 412 | { |
| 413 | struct fm10k_hw *hw = &interface->hw; |
| 414 | |
| 415 | /* assigning the MAC address will send a mailbox message */ |
| 416 | fm10k_mbx_lock(interface); |
| 417 | |
| 418 | /* disable LPORT for this VF which clears switch rules */ |
| 419 | hw->iov.ops.reset_lport(hw, vf_info); |
| 420 | |
| 421 | /* assign new MAC+VLAN for this VF */ |
| 422 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
| 423 | |
| 424 | /* re-enable the LPORT for this VF */ |
| 425 | hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx, |
| 426 | FM10K_VF_FLAG_MULTI_CAPABLE); |
| 427 | |
| 428 | fm10k_mbx_unlock(interface); |
| 429 | } |
| 430 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 431 | int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac) |
| 432 | { |
| 433 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 434 | struct fm10k_iov_data *iov_data = interface->iov_data; |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 435 | struct fm10k_vf_info *vf_info; |
| 436 | |
| 437 | /* verify SR-IOV is active and that vf idx is valid */ |
| 438 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 439 | return -EINVAL; |
| 440 | |
| 441 | /* verify MAC addr is valid */ |
| 442 | if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac)) |
| 443 | return -EINVAL; |
| 444 | |
| 445 | /* record new MAC address */ |
| 446 | vf_info = &iov_data->vf_info[vf_idx]; |
| 447 | ether_addr_copy(vf_info->mac, mac); |
| 448 | |
Jacob Keller | a38488f | 2015-06-03 16:31:07 -0700 | [diff] [blame] | 449 | fm10k_reset_vf_info(interface, vf_info); |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 450 | |
| 451 | return 0; |
| 452 | } |
| 453 | |
| 454 | int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, |
Moshe Shemesh | 79aab09 | 2016-09-22 12:11:15 +0300 | [diff] [blame] | 455 | u8 qos, __be16 vlan_proto) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 456 | { |
| 457 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 458 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 459 | struct fm10k_hw *hw = &interface->hw; |
| 460 | struct fm10k_vf_info *vf_info; |
| 461 | |
| 462 | /* verify SR-IOV is active and that vf idx is valid */ |
| 463 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 464 | return -EINVAL; |
| 465 | |
| 466 | /* QOS is unsupported and VLAN IDs accepted range 0-4094 */ |
| 467 | if (qos || (vid > (VLAN_VID_MASK - 1))) |
| 468 | return -EINVAL; |
| 469 | |
Moshe Shemesh | 79aab09 | 2016-09-22 12:11:15 +0300 | [diff] [blame] | 470 | /* VF VLAN Protocol part to default is unsupported */ |
| 471 | if (vlan_proto != htons(ETH_P_8021Q)) |
| 472 | return -EPROTONOSUPPORT; |
| 473 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 474 | vf_info = &iov_data->vf_info[vf_idx]; |
| 475 | |
| 476 | /* exit if there is nothing to do */ |
| 477 | if (vf_info->pf_vid == vid) |
| 478 | return 0; |
| 479 | |
| 480 | /* record default VLAN ID for VF */ |
| 481 | vf_info->pf_vid = vid; |
| 482 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 483 | /* Clear the VLAN table for the VF */ |
| 484 | hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false); |
| 485 | |
Jacob Keller | a38488f | 2015-06-03 16:31:07 -0700 | [diff] [blame] | 486 | fm10k_reset_vf_info(interface, vf_info); |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 487 | |
| 488 | return 0; |
| 489 | } |
| 490 | |
Jeff Kirsher | de44519 | 2015-04-03 13:26:56 -0700 | [diff] [blame] | 491 | int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, |
| 492 | int __always_unused unused, int rate) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 493 | { |
| 494 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 495 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 496 | struct fm10k_hw *hw = &interface->hw; |
| 497 | |
| 498 | /* verify SR-IOV is active and that vf idx is valid */ |
| 499 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 500 | return -EINVAL; |
| 501 | |
| 502 | /* rate limit cannot be less than 10Mbs or greater than link speed */ |
| 503 | if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX)) |
| 504 | return -EINVAL; |
| 505 | |
| 506 | /* store values */ |
| 507 | iov_data->vf_info[vf_idx].rate = rate; |
| 508 | |
| 509 | /* update hardware configuration */ |
| 510 | hw->iov.ops.configure_tc(hw, vf_idx, rate); |
| 511 | |
| 512 | return 0; |
| 513 | } |
| 514 | |
| 515 | int fm10k_ndo_get_vf_config(struct net_device *netdev, |
| 516 | int vf_idx, struct ifla_vf_info *ivi) |
| 517 | { |
| 518 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 519 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 520 | struct fm10k_vf_info *vf_info; |
| 521 | |
| 522 | /* verify SR-IOV is active and that vf idx is valid */ |
| 523 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 524 | return -EINVAL; |
| 525 | |
| 526 | vf_info = &iov_data->vf_info[vf_idx]; |
| 527 | |
| 528 | ivi->vf = vf_idx; |
| 529 | ivi->max_tx_rate = vf_info->rate; |
| 530 | ivi->min_tx_rate = 0; |
| 531 | ether_addr_copy(ivi->mac, vf_info->mac); |
| 532 | ivi->vlan = vf_info->pf_vid; |
| 533 | ivi->qos = 0; |
| 534 | |
| 535 | return 0; |
| 536 | } |