Jacob Keller | 8664109 | 2016-04-07 08:21:21 -0700 | [diff] [blame] | 1 | /* Intel(R) Ethernet Switch Host Interface Driver |
Jacob Keller | 4abf01b | 2017-07-10 13:23:09 -0700 | [diff] [blame^] | 2 | * Copyright(c) 2013 - 2017 Intel Corporation. |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * The full GNU General Public License is included in this distribution in |
| 14 | * the file called "COPYING". |
| 15 | * |
| 16 | * Contact Information: |
| 17 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
| 18 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 19 | */ |
| 20 | |
| 21 | #include "fm10k.h" |
| 22 | #include "fm10k_vf.h" |
| 23 | #include "fm10k_pf.h" |
| 24 | |
| 25 | static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results, |
| 26 | struct fm10k_mbx_info *mbx) |
| 27 | { |
| 28 | struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; |
| 29 | struct fm10k_intfc *interface = hw->back; |
| 30 | struct pci_dev *pdev = interface->pdev; |
| 31 | |
| 32 | dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n", |
| 33 | **results & FM10K_TLV_ID_MASK, vf_info->vf_idx); |
| 34 | |
| 35 | return fm10k_tlv_msg_error(hw, results, mbx); |
| 36 | } |
| 37 | |
| 38 | static const struct fm10k_msg_data iov_mbx_data[] = { |
| 39 | FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), |
| 40 | FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), |
| 41 | FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), |
| 42 | FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), |
| 43 | FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error), |
| 44 | }; |
| 45 | |
| 46 | s32 fm10k_iov_event(struct fm10k_intfc *interface) |
| 47 | { |
| 48 | struct fm10k_hw *hw = &interface->hw; |
| 49 | struct fm10k_iov_data *iov_data; |
Jeff Kirsher | 9de15bd | 2015-04-10 17:20:17 -0700 | [diff] [blame] | 50 | s64 vflre; |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 51 | int i; |
| 52 | |
Jacob Keller | d8ec92f | 2016-02-10 14:45:51 -0800 | [diff] [blame] | 53 | /* if there is no iov_data then there is no mailbox to process */ |
Jacob Keller | ce4dad2 | 2016-06-17 16:21:11 -0700 | [diff] [blame] | 54 | if (!READ_ONCE(interface->iov_data)) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 55 | return 0; |
| 56 | |
| 57 | rcu_read_lock(); |
| 58 | |
| 59 | iov_data = interface->iov_data; |
| 60 | |
| 61 | /* check again now that we are in the RCU block */ |
| 62 | if (!iov_data) |
| 63 | goto read_unlock; |
| 64 | |
| 65 | if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR)) |
Jeff Kirsher | 9de15bd | 2015-04-10 17:20:17 -0700 | [diff] [blame] | 66 | goto read_unlock; |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 67 | |
| 68 | /* read VFLRE to determine if any VFs have been reset */ |
| 69 | do { |
Jacob Keller | 4abf01b | 2017-07-10 13:23:09 -0700 | [diff] [blame^] | 70 | vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1)); |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 71 | vflre <<= 32; |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 72 | vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0)); |
| 73 | |
| 74 | i = iov_data->num_vfs; |
| 75 | |
| 76 | for (vflre <<= 64 - i; vflre && i--; vflre += vflre) { |
| 77 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 78 | |
| 79 | if (vflre >= 0) |
| 80 | continue; |
| 81 | |
| 82 | hw->iov.ops.reset_resources(hw, vf_info); |
| 83 | vf_info->mbx.ops.connect(hw, &vf_info->mbx); |
| 84 | } |
| 85 | } while (i != iov_data->num_vfs); |
| 86 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 87 | read_unlock: |
| 88 | rcu_read_unlock(); |
| 89 | |
| 90 | return 0; |
| 91 | } |
| 92 | |
| 93 | s32 fm10k_iov_mbx(struct fm10k_intfc *interface) |
| 94 | { |
| 95 | struct fm10k_hw *hw = &interface->hw; |
| 96 | struct fm10k_iov_data *iov_data; |
| 97 | int i; |
| 98 | |
Jacob Keller | d8ec92f | 2016-02-10 14:45:51 -0800 | [diff] [blame] | 99 | /* if there is no iov_data then there is no mailbox to process */ |
Jacob Keller | ce4dad2 | 2016-06-17 16:21:11 -0700 | [diff] [blame] | 100 | if (!READ_ONCE(interface->iov_data)) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 101 | return 0; |
| 102 | |
| 103 | rcu_read_lock(); |
| 104 | |
| 105 | iov_data = interface->iov_data; |
| 106 | |
| 107 | /* check again now that we are in the RCU block */ |
| 108 | if (!iov_data) |
| 109 | goto read_unlock; |
| 110 | |
| 111 | /* lock the mailbox for transmit and receive */ |
| 112 | fm10k_mbx_lock(interface); |
| 113 | |
Jeff Kirsher | ada2411 | 2015-04-03 13:27:07 -0700 | [diff] [blame] | 114 | /* Most VF messages sent to the PF cause the PF to respond by |
| 115 | * requesting from the SM mailbox. This means that too many VF |
| 116 | * messages processed at once could cause a mailbox timeout on the PF. |
| 117 | * To prevent this, store a pointer to the next VF mbx to process. Use |
| 118 | * that as the start of the loop so that we don't starve whichever VF |
| 119 | * got ignored on the previous run. |
| 120 | */ |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 121 | process_mbx: |
| 122 | for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) { |
| 123 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 124 | struct fm10k_mbx_info *mbx = &vf_info->mbx; |
| 125 | u16 glort = vf_info->glort; |
| 126 | |
Jacob Keller | 17a9180 | 2017-10-02 07:17:50 -0700 | [diff] [blame] | 127 | /* process the SM mailbox first to drain outgoing messages */ |
| 128 | hw->mbx.ops.process(hw, &hw->mbx); |
| 129 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 130 | /* verify port mapping is valid, if not reset port */ |
| 131 | if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) |
| 132 | hw->iov.ops.reset_lport(hw, vf_info); |
| 133 | |
| 134 | /* reset VFs that have mailbox timed out */ |
| 135 | if (!mbx->timeout) { |
| 136 | hw->iov.ops.reset_resources(hw, vf_info); |
| 137 | mbx->ops.connect(hw, mbx); |
| 138 | } |
| 139 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 140 | /* guarantee we have free space in the SM mailbox */ |
Jacob Keller | 80043f3 | 2015-07-01 17:38:36 -0700 | [diff] [blame] | 141 | if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) { |
| 142 | /* keep track of how many times this occurs */ |
| 143 | interface->hw_sm_mbx_full++; |
Jacob Keller | b52b7f70 | 2017-03-08 15:55:43 -0800 | [diff] [blame] | 144 | |
| 145 | /* make sure we try again momentarily */ |
| 146 | fm10k_service_event_schedule(interface); |
| 147 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 148 | break; |
Jacob Keller | 80043f3 | 2015-07-01 17:38:36 -0700 | [diff] [blame] | 149 | } |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 150 | |
| 151 | /* cleanup mailbox and process received messages */ |
| 152 | mbx->ops.process(hw, mbx); |
| 153 | } |
| 154 | |
Jeff Kirsher | ada2411 | 2015-04-03 13:27:07 -0700 | [diff] [blame] | 155 | /* if we stopped processing mailboxes early, update next_vf_mbx. |
| 156 | * Otherwise, reset next_vf_mbx, and restart loop so that we process |
| 157 | * the remaining mailboxes we skipped at the start. |
| 158 | */ |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 159 | if (i >= 0) { |
| 160 | iov_data->next_vf_mbx = i + 1; |
| 161 | } else if (iov_data->next_vf_mbx) { |
| 162 | iov_data->next_vf_mbx = 0; |
| 163 | goto process_mbx; |
| 164 | } |
| 165 | |
| 166 | /* free the lock */ |
| 167 | fm10k_mbx_unlock(interface); |
| 168 | |
| 169 | read_unlock: |
| 170 | rcu_read_unlock(); |
| 171 | |
| 172 | return 0; |
| 173 | } |
| 174 | |
| 175 | void fm10k_iov_suspend(struct pci_dev *pdev) |
| 176 | { |
| 177 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 178 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 179 | struct fm10k_hw *hw = &interface->hw; |
| 180 | int num_vfs, i; |
| 181 | |
| 182 | /* pull out num_vfs from iov_data */ |
| 183 | num_vfs = iov_data ? iov_data->num_vfs : 0; |
| 184 | |
| 185 | /* shut down queue mapping for VFs */ |
| 186 | fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss), |
| 187 | FM10K_DGLORTMAP_NONE); |
| 188 | |
| 189 | /* Stop any active VFs and reset their resources */ |
| 190 | for (i = 0; i < num_vfs; i++) { |
| 191 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 192 | |
| 193 | hw->iov.ops.reset_resources(hw, vf_info); |
| 194 | hw->iov.ops.reset_lport(hw, vf_info); |
| 195 | } |
| 196 | } |
| 197 | |
| 198 | int fm10k_iov_resume(struct pci_dev *pdev) |
| 199 | { |
| 200 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 201 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 202 | struct fm10k_dglort_cfg dglort = { 0 }; |
| 203 | struct fm10k_hw *hw = &interface->hw; |
| 204 | int num_vfs, i; |
| 205 | |
| 206 | /* pull out num_vfs from iov_data */ |
| 207 | num_vfs = iov_data ? iov_data->num_vfs : 0; |
| 208 | |
| 209 | /* return error if iov_data is not already populated */ |
| 210 | if (!iov_data) |
| 211 | return -ENOMEM; |
| 212 | |
| 213 | /* allocate hardware resources for the VFs */ |
| 214 | hw->iov.ops.assign_resources(hw, num_vfs, num_vfs); |
| 215 | |
| 216 | /* configure DGLORT mapping for RSS */ |
| 217 | dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; |
| 218 | dglort.idx = fm10k_dglort_vf_rss; |
| 219 | dglort.inner_rss = 1; |
| 220 | dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1); |
| 221 | dglort.queue_b = fm10k_vf_queue_index(hw, 0); |
| 222 | dglort.vsi_l = fls(hw->iov.total_vfs - 1); |
| 223 | dglort.vsi_b = 1; |
| 224 | |
| 225 | hw->mac.ops.configure_dglort_map(hw, &dglort); |
| 226 | |
| 227 | /* assign resources to the device */ |
| 228 | for (i = 0; i < num_vfs; i++) { |
| 229 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 230 | |
| 231 | /* allocate all but the last GLORT to the VFs */ |
| 232 | if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT)) |
| 233 | break; |
| 234 | |
| 235 | /* assign GLORT to VF, and restrict it to multicast */ |
| 236 | hw->iov.ops.set_lport(hw, vf_info, i, |
| 237 | FM10K_VF_FLAG_MULTI_CAPABLE); |
| 238 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 239 | /* mailbox is disconnected so we don't send a message */ |
| 240 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
| 241 | |
| 242 | /* now we are ready so we can connect */ |
| 243 | vf_info->mbx.ops.connect(hw, &vf_info->mbx); |
| 244 | } |
| 245 | |
| 246 | return 0; |
| 247 | } |
| 248 | |
| 249 | s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid) |
| 250 | { |
| 251 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 252 | struct fm10k_hw *hw = &interface->hw; |
| 253 | struct fm10k_vf_info *vf_info; |
| 254 | u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE; |
| 255 | |
| 256 | /* no IOV support, not our message to process */ |
| 257 | if (!iov_data) |
| 258 | return FM10K_ERR_PARAM; |
| 259 | |
| 260 | /* glort outside our range, not our message to process */ |
| 261 | if (vf_idx >= iov_data->num_vfs) |
| 262 | return FM10K_ERR_PARAM; |
| 263 | |
Matthew Vick | eca3204 | 2015-01-31 02:23:05 +0000 | [diff] [blame] | 264 | /* determine if an update has occurred and if so notify the VF */ |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 265 | vf_info = &iov_data->vf_info[vf_idx]; |
| 266 | if (vf_info->sw_vid != pvid) { |
| 267 | vf_info->sw_vid = pvid; |
| 268 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
| 269 | } |
| 270 | |
| 271 | return 0; |
| 272 | } |
| 273 | |
| 274 | static void fm10k_iov_free_data(struct pci_dev *pdev) |
| 275 | { |
| 276 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 277 | |
| 278 | if (!interface->iov_data) |
| 279 | return; |
| 280 | |
| 281 | /* reclaim hardware resources */ |
| 282 | fm10k_iov_suspend(pdev); |
| 283 | |
| 284 | /* drop iov_data from interface */ |
| 285 | kfree_rcu(interface->iov_data, rcu); |
| 286 | interface->iov_data = NULL; |
| 287 | } |
| 288 | |
| 289 | static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) |
| 290 | { |
| 291 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
| 292 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 293 | struct fm10k_hw *hw = &interface->hw; |
| 294 | size_t size; |
| 295 | int i, err; |
| 296 | |
| 297 | /* return error if iov_data is already populated */ |
| 298 | if (iov_data) |
| 299 | return -EBUSY; |
| 300 | |
| 301 | /* The PF should always be able to assign resources */ |
| 302 | if (!hw->iov.ops.assign_resources) |
| 303 | return -ENODEV; |
| 304 | |
| 305 | /* nothing to do if no VFs are requested */ |
| 306 | if (!num_vfs) |
| 307 | return 0; |
| 308 | |
| 309 | /* allocate memory for VF storage */ |
| 310 | size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]); |
| 311 | iov_data = kzalloc(size, GFP_KERNEL); |
| 312 | if (!iov_data) |
| 313 | return -ENOMEM; |
| 314 | |
| 315 | /* record number of VFs */ |
| 316 | iov_data->num_vfs = num_vfs; |
| 317 | |
| 318 | /* loop through vf_info structures initializing each entry */ |
| 319 | for (i = 0; i < num_vfs; i++) { |
| 320 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
| 321 | |
| 322 | /* Record VF VSI value */ |
| 323 | vf_info->vsi = i + 1; |
| 324 | vf_info->vf_idx = i; |
| 325 | |
| 326 | /* initialize mailbox memory */ |
| 327 | err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i); |
| 328 | if (err) { |
| 329 | dev_err(&pdev->dev, |
| 330 | "Unable to initialize SR-IOV mailbox\n"); |
| 331 | kfree(iov_data); |
| 332 | return err; |
| 333 | } |
| 334 | } |
| 335 | |
| 336 | /* assign iov_data to interface */ |
| 337 | interface->iov_data = iov_data; |
| 338 | |
| 339 | /* allocate hardware resources for the VFs */ |
| 340 | fm10k_iov_resume(pdev); |
| 341 | |
| 342 | return 0; |
| 343 | } |
| 344 | |
| 345 | void fm10k_iov_disable(struct pci_dev *pdev) |
| 346 | { |
| 347 | if (pci_num_vf(pdev) && pci_vfs_assigned(pdev)) |
| 348 | dev_err(&pdev->dev, |
| 349 | "Cannot disable SR-IOV while VFs are assigned\n"); |
| 350 | else |
| 351 | pci_disable_sriov(pdev); |
| 352 | |
| 353 | fm10k_iov_free_data(pdev); |
| 354 | } |
| 355 | |
| 356 | static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev) |
| 357 | { |
| 358 | u32 err_sev; |
| 359 | int pos; |
| 360 | |
| 361 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); |
| 362 | if (!pos) |
| 363 | return; |
| 364 | |
| 365 | pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev); |
| 366 | err_sev &= ~PCI_ERR_UNC_COMP_ABORT; |
| 367 | pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev); |
| 368 | } |
| 369 | |
| 370 | int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) |
| 371 | { |
| 372 | int current_vfs = pci_num_vf(pdev); |
| 373 | int err = 0; |
| 374 | |
| 375 | if (current_vfs && pci_vfs_assigned(pdev)) { |
| 376 | dev_err(&pdev->dev, |
| 377 | "Cannot modify SR-IOV while VFs are assigned\n"); |
| 378 | num_vfs = current_vfs; |
| 379 | } else { |
| 380 | pci_disable_sriov(pdev); |
| 381 | fm10k_iov_free_data(pdev); |
| 382 | } |
| 383 | |
| 384 | /* allocate resources for the VFs */ |
| 385 | err = fm10k_iov_alloc_data(pdev, num_vfs); |
| 386 | if (err) |
| 387 | return err; |
| 388 | |
| 389 | /* allocate VFs if not already allocated */ |
| 390 | if (num_vfs && (num_vfs != current_vfs)) { |
| 391 | /* Disable completer abort error reporting as |
| 392 | * the VFs can trigger this any time they read a queue |
| 393 | * that they don't own. |
| 394 | */ |
| 395 | fm10k_disable_aer_comp_abort(pdev); |
| 396 | |
| 397 | err = pci_enable_sriov(pdev, num_vfs); |
| 398 | if (err) { |
| 399 | dev_err(&pdev->dev, |
| 400 | "Enable PCI SR-IOV failed: %d\n", err); |
| 401 | return err; |
| 402 | } |
| 403 | } |
| 404 | |
| 405 | return num_vfs; |
| 406 | } |
| 407 | |
Jacob Keller | a38488f | 2015-06-03 16:31:07 -0700 | [diff] [blame] | 408 | static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface, |
| 409 | struct fm10k_vf_info *vf_info) |
| 410 | { |
| 411 | struct fm10k_hw *hw = &interface->hw; |
| 412 | |
| 413 | /* assigning the MAC address will send a mailbox message */ |
| 414 | fm10k_mbx_lock(interface); |
| 415 | |
| 416 | /* disable LPORT for this VF which clears switch rules */ |
| 417 | hw->iov.ops.reset_lport(hw, vf_info); |
| 418 | |
| 419 | /* assign new MAC+VLAN for this VF */ |
| 420 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
| 421 | |
| 422 | /* re-enable the LPORT for this VF */ |
| 423 | hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx, |
| 424 | FM10K_VF_FLAG_MULTI_CAPABLE); |
| 425 | |
| 426 | fm10k_mbx_unlock(interface); |
| 427 | } |
| 428 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 429 | int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac) |
| 430 | { |
| 431 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 432 | struct fm10k_iov_data *iov_data = interface->iov_data; |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 433 | struct fm10k_vf_info *vf_info; |
| 434 | |
| 435 | /* verify SR-IOV is active and that vf idx is valid */ |
| 436 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 437 | return -EINVAL; |
| 438 | |
| 439 | /* verify MAC addr is valid */ |
| 440 | if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac)) |
| 441 | return -EINVAL; |
| 442 | |
| 443 | /* record new MAC address */ |
| 444 | vf_info = &iov_data->vf_info[vf_idx]; |
| 445 | ether_addr_copy(vf_info->mac, mac); |
| 446 | |
Jacob Keller | a38488f | 2015-06-03 16:31:07 -0700 | [diff] [blame] | 447 | fm10k_reset_vf_info(interface, vf_info); |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 448 | |
| 449 | return 0; |
| 450 | } |
| 451 | |
| 452 | int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, |
Moshe Shemesh | 79aab09 | 2016-09-22 12:11:15 +0300 | [diff] [blame] | 453 | u8 qos, __be16 vlan_proto) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 454 | { |
| 455 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 456 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 457 | struct fm10k_hw *hw = &interface->hw; |
| 458 | struct fm10k_vf_info *vf_info; |
| 459 | |
| 460 | /* verify SR-IOV is active and that vf idx is valid */ |
| 461 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 462 | return -EINVAL; |
| 463 | |
| 464 | /* QOS is unsupported and VLAN IDs accepted range 0-4094 */ |
| 465 | if (qos || (vid > (VLAN_VID_MASK - 1))) |
| 466 | return -EINVAL; |
| 467 | |
Moshe Shemesh | 79aab09 | 2016-09-22 12:11:15 +0300 | [diff] [blame] | 468 | /* VF VLAN Protocol part to default is unsupported */ |
| 469 | if (vlan_proto != htons(ETH_P_8021Q)) |
| 470 | return -EPROTONOSUPPORT; |
| 471 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 472 | vf_info = &iov_data->vf_info[vf_idx]; |
| 473 | |
| 474 | /* exit if there is nothing to do */ |
| 475 | if (vf_info->pf_vid == vid) |
| 476 | return 0; |
| 477 | |
| 478 | /* record default VLAN ID for VF */ |
| 479 | vf_info->pf_vid = vid; |
| 480 | |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 481 | /* Clear the VLAN table for the VF */ |
| 482 | hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false); |
| 483 | |
Jacob Keller | a38488f | 2015-06-03 16:31:07 -0700 | [diff] [blame] | 484 | fm10k_reset_vf_info(interface, vf_info); |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 485 | |
| 486 | return 0; |
| 487 | } |
| 488 | |
Jeff Kirsher | de44519 | 2015-04-03 13:26:56 -0700 | [diff] [blame] | 489 | int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, |
| 490 | int __always_unused unused, int rate) |
Alexander Duyck | 883a9cc | 2014-09-20 19:52:09 -0400 | [diff] [blame] | 491 | { |
| 492 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 493 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 494 | struct fm10k_hw *hw = &interface->hw; |
| 495 | |
| 496 | /* verify SR-IOV is active and that vf idx is valid */ |
| 497 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 498 | return -EINVAL; |
| 499 | |
| 500 | /* rate limit cannot be less than 10Mbs or greater than link speed */ |
| 501 | if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX)) |
| 502 | return -EINVAL; |
| 503 | |
| 504 | /* store values */ |
| 505 | iov_data->vf_info[vf_idx].rate = rate; |
| 506 | |
| 507 | /* update hardware configuration */ |
| 508 | hw->iov.ops.configure_tc(hw, vf_idx, rate); |
| 509 | |
| 510 | return 0; |
| 511 | } |
| 512 | |
| 513 | int fm10k_ndo_get_vf_config(struct net_device *netdev, |
| 514 | int vf_idx, struct ifla_vf_info *ivi) |
| 515 | { |
| 516 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 517 | struct fm10k_iov_data *iov_data = interface->iov_data; |
| 518 | struct fm10k_vf_info *vf_info; |
| 519 | |
| 520 | /* verify SR-IOV is active and that vf idx is valid */ |
| 521 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
| 522 | return -EINVAL; |
| 523 | |
| 524 | vf_info = &iov_data->vf_info[vf_idx]; |
| 525 | |
| 526 | ivi->vf = vf_idx; |
| 527 | ivi->max_tx_rate = vf_info->rate; |
| 528 | ivi->min_tx_rate = 0; |
| 529 | ether_addr_copy(ivi->mac, vf_info->mac); |
| 530 | ivi->vlan = vf_info->pf_vid; |
| 531 | ivi->qos = 0; |
| 532 | |
| 533 | return 0; |
| 534 | } |