Ariel Elior | 290ca2b | 2013-01-01 05:22:31 +0000 | [diff] [blame] | 1 | /* bnx2x_sriov.c: Broadcom Everest network driver. |
| 2 | * |
| 3 | * Copyright 2009-2012 Broadcom Corporation |
| 4 | * |
| 5 | * Unless you and Broadcom execute a separate written software license |
| 6 | * agreement governing use of this software, this software is licensed to you |
| 7 | * under the terms of the GNU General Public License version 2, available |
| 8 | * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). |
| 9 | * |
| 10 | * Notwithstanding the above, under no circumstances may you combine this |
| 11 | * software in any way with any other Broadcom software provided under a |
| 12 | * license other than the GPL, without Broadcom's express prior written |
| 13 | * consent. |
| 14 | * |
| 15 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> |
| 16 | * Written by: Shmulik Ravid <shmulikr@broadcom.com> |
| 17 | * Ariel Elior <ariele@broadcom.com> |
| 18 | * |
| 19 | */ |
| 20 | #include "bnx2x.h" |
| 21 | #include "bnx2x_init.h" |
Ariel Elior | b56e967 | 2013-01-01 05:22:32 +0000 | [diff] [blame^] | 22 | #include "bnx2x_cmn.h" |
Ariel Elior | 290ca2b | 2013-01-01 05:22:31 +0000 | [diff] [blame] | 23 | #include "bnx2x_sriov.h" |
Ariel Elior | b56e967 | 2013-01-01 05:22:32 +0000 | [diff] [blame^] | 24 | |
| 25 | /* General service functions */ |
| 26 | static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, |
| 27 | u16 pf_id) |
| 28 | { |
| 29 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), |
| 30 | pf_id); |
| 31 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), |
| 32 | pf_id); |
| 33 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), |
| 34 | pf_id); |
| 35 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), |
| 36 | pf_id); |
| 37 | } |
| 38 | |
| 39 | static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, |
| 40 | u8 enable) |
| 41 | { |
| 42 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), |
| 43 | enable); |
| 44 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), |
| 45 | enable); |
| 46 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), |
| 47 | enable); |
| 48 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), |
| 49 | enable); |
| 50 | } |
| 51 | |
Ariel Elior | 290ca2b | 2013-01-01 05:22:31 +0000 | [diff] [blame] | 52 | int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) |
| 53 | { |
| 54 | int idx; |
| 55 | |
| 56 | for_each_vf(bp, idx) |
| 57 | if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) |
| 58 | break; |
| 59 | return idx; |
| 60 | } |
| 61 | |
| 62 | static |
| 63 | struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) |
| 64 | { |
| 65 | u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); |
| 66 | return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; |
| 67 | } |
| 68 | |
| 69 | static int bnx2x_ari_enabled(struct pci_dev *dev) |
| 70 | { |
| 71 | return dev->bus->self && dev->bus->self->ari_enabled; |
| 72 | } |
| 73 | |
| 74 | static void |
| 75 | bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) |
| 76 | { |
| 77 | struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); |
| 78 | if (vf) { |
| 79 | if (!vf_sb_count(vf)) |
| 80 | vf->igu_base_id = igu_sb_id; |
| 81 | ++vf_sb_count(vf); |
| 82 | } |
| 83 | } |
| 84 | |
| 85 | static void |
| 86 | bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) |
| 87 | { |
| 88 | int sb_id; |
| 89 | u32 val; |
| 90 | u8 fid; |
| 91 | |
| 92 | /* IGU in normal mode - read CAM */ |
| 93 | for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { |
| 94 | val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); |
| 95 | if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) |
| 96 | continue; |
| 97 | fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); |
| 98 | if (!(fid & IGU_FID_ENCODE_IS_PF)) |
| 99 | bnx2x_vf_set_igu_info(bp, sb_id, |
| 100 | (fid & IGU_FID_VF_NUM_MASK)); |
| 101 | |
| 102 | DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", |
| 103 | ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), |
| 104 | ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : |
| 105 | (fid & IGU_FID_VF_NUM_MASK)), sb_id, |
| 106 | GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); |
| 107 | } |
| 108 | } |
| 109 | |
| 110 | static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) |
| 111 | { |
| 112 | if (bp->vfdb) { |
| 113 | kfree(bp->vfdb->vfqs); |
| 114 | kfree(bp->vfdb->vfs); |
| 115 | kfree(bp->vfdb); |
| 116 | } |
| 117 | bp->vfdb = NULL; |
| 118 | } |
| 119 | |
| 120 | static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) |
| 121 | { |
| 122 | int pos; |
| 123 | struct pci_dev *dev = bp->pdev; |
| 124 | |
| 125 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); |
| 126 | if (!pos) { |
| 127 | BNX2X_ERR("failed to find SRIOV capability in device\n"); |
| 128 | return -ENODEV; |
| 129 | } |
| 130 | |
| 131 | iov->pos = pos; |
| 132 | DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); |
| 133 | pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); |
| 134 | pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); |
| 135 | pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); |
| 136 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); |
| 137 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); |
| 138 | pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); |
| 139 | pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); |
| 140 | pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); |
| 141 | |
| 142 | return 0; |
| 143 | } |
| 144 | |
| 145 | static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) |
| 146 | { |
| 147 | u32 val; |
| 148 | |
| 149 | /* read the SRIOV capability structure |
| 150 | * The fields can be read via configuration read or |
| 151 | * directly from the device (starting at offset PCICFG_OFFSET) |
| 152 | */ |
| 153 | if (bnx2x_sriov_pci_cfg_info(bp, iov)) |
| 154 | return -ENODEV; |
| 155 | |
| 156 | /* get the number of SRIOV bars */ |
| 157 | iov->nres = 0; |
| 158 | |
| 159 | /* read the first_vfid */ |
| 160 | val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); |
| 161 | iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) |
| 162 | * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); |
| 163 | |
| 164 | DP(BNX2X_MSG_IOV, |
| 165 | "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", |
| 166 | BP_FUNC(bp), |
| 167 | iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, |
| 168 | iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); |
| 169 | |
| 170 | return 0; |
| 171 | } |
| 172 | |
| 173 | static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) |
| 174 | { |
| 175 | int i; |
| 176 | u8 queue_count = 0; |
| 177 | |
| 178 | if (IS_SRIOV(bp)) |
| 179 | for_each_vf(bp, i) |
| 180 | queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs); |
| 181 | |
| 182 | return queue_count; |
| 183 | } |
| 184 | |
| 185 | /* must be called after PF bars are mapped */ |
| 186 | int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, |
| 187 | int num_vfs_param) |
| 188 | { |
| 189 | int err, i, qcount; |
| 190 | struct bnx2x_sriov *iov; |
| 191 | struct pci_dev *dev = bp->pdev; |
| 192 | |
| 193 | bp->vfdb = NULL; |
| 194 | |
| 195 | /* verify sriov capability is present in configuration space */ |
| 196 | if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) { |
| 197 | DP(BNX2X_MSG_IOV, "no sriov - capability not found\n"); |
| 198 | return 0; |
| 199 | } |
| 200 | |
| 201 | /* verify is pf */ |
| 202 | if (IS_VF(bp)) |
| 203 | return 0; |
| 204 | |
| 205 | /* verify chip revision */ |
| 206 | if (CHIP_IS_E1x(bp)) |
| 207 | return 0; |
| 208 | |
| 209 | /* check if SRIOV support is turned off */ |
| 210 | if (!num_vfs_param) |
| 211 | return 0; |
| 212 | |
| 213 | /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ |
| 214 | if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { |
| 215 | BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", |
| 216 | BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); |
| 217 | return 0; |
| 218 | } |
| 219 | |
| 220 | /* SRIOV can be enabled only with MSIX */ |
| 221 | if (int_mode_param == BNX2X_INT_MODE_MSI || |
| 222 | int_mode_param == BNX2X_INT_MODE_INTX) { |
| 223 | BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); |
| 224 | return 0; |
| 225 | } |
| 226 | |
| 227 | /* verify ari is enabled */ |
| 228 | if (!bnx2x_ari_enabled(bp->pdev)) { |
| 229 | BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n"); |
| 230 | return 0; |
| 231 | } |
| 232 | |
| 233 | /* verify igu is in normal mode */ |
| 234 | if (CHIP_INT_MODE_IS_BC(bp)) { |
| 235 | BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); |
| 236 | return 0; |
| 237 | } |
| 238 | |
| 239 | /* allocate the vfs database */ |
| 240 | bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); |
| 241 | if (!bp->vfdb) { |
| 242 | BNX2X_ERR("failed to allocate vf database\n"); |
| 243 | err = -ENOMEM; |
| 244 | goto failed; |
| 245 | } |
| 246 | |
| 247 | /* get the sriov info - Linux already collected all the pertinent |
| 248 | * information, however the sriov structure is for the private use |
| 249 | * of the pci module. Also we want this information regardless |
| 250 | * of the hyper-visor. |
| 251 | */ |
| 252 | iov = &(bp->vfdb->sriov); |
| 253 | err = bnx2x_sriov_info(bp, iov); |
| 254 | if (err) |
| 255 | goto failed; |
| 256 | |
| 257 | /* SR-IOV capability was enabled but there are no VFs*/ |
| 258 | if (iov->total == 0) |
| 259 | goto failed; |
| 260 | |
| 261 | /* calcuate the actual number of VFs */ |
| 262 | iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param); |
| 263 | |
| 264 | /* allcate the vf array */ |
| 265 | bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * |
| 266 | BNX2X_NR_VIRTFN(bp), GFP_KERNEL); |
| 267 | if (!bp->vfdb->vfs) { |
| 268 | BNX2X_ERR("failed to allocate vf array\n"); |
| 269 | err = -ENOMEM; |
| 270 | goto failed; |
| 271 | } |
| 272 | |
| 273 | /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ |
| 274 | for_each_vf(bp, i) { |
| 275 | bnx2x_vf(bp, i, index) = i; |
| 276 | bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; |
| 277 | bnx2x_vf(bp, i, state) = VF_FREE; |
| 278 | INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); |
| 279 | mutex_init(&bnx2x_vf(bp, i, op_mutex)); |
| 280 | bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; |
| 281 | } |
| 282 | |
| 283 | /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ |
| 284 | bnx2x_get_vf_igu_cam_info(bp); |
| 285 | |
| 286 | /* get the total queue count and allocate the global queue arrays */ |
| 287 | qcount = bnx2x_iov_get_max_queue_count(bp); |
| 288 | |
| 289 | /* allocate the queue arrays for all VFs */ |
| 290 | bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), |
| 291 | GFP_KERNEL); |
| 292 | if (!bp->vfdb->vfqs) { |
| 293 | BNX2X_ERR("failed to allocate vf queue array\n"); |
| 294 | err = -ENOMEM; |
| 295 | goto failed; |
| 296 | } |
| 297 | |
| 298 | return 0; |
| 299 | failed: |
| 300 | DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); |
| 301 | __bnx2x_iov_free_vfdb(bp); |
| 302 | return err; |
| 303 | } |
Ariel Elior | b56e967 | 2013-01-01 05:22:32 +0000 | [diff] [blame^] | 304 | /* VF enable primitives |
| 305 | * when pretend is required the caller is responsible |
| 306 | * for calling pretend prior to calling these routines |
| 307 | */ |
| 308 | |
| 309 | /* called only on E1H or E2. |
| 310 | * When pretending to be PF, the pretend value is the function number 0...7 |
| 311 | * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID |
| 312 | * combination |
| 313 | */ |
| 314 | int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) |
| 315 | { |
| 316 | u32 pretend_reg; |
| 317 | |
| 318 | if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX) |
| 319 | return -1; |
| 320 | |
| 321 | /* get my own pretend register */ |
| 322 | pretend_reg = bnx2x_get_pretend_reg(bp); |
| 323 | REG_WR(bp, pretend_reg, pretend_func_val); |
| 324 | REG_RD(bp, pretend_reg); |
| 325 | return 0; |
| 326 | } |
| 327 | |
| 328 | /* internal vf enable - until vf is enabled internally all transactions |
| 329 | * are blocked. this routine should always be called last with pretend. |
| 330 | */ |
| 331 | static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) |
| 332 | { |
| 333 | REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); |
| 334 | } |
| 335 | |
| 336 | /* clears vf error in all semi blocks */ |
| 337 | static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) |
| 338 | { |
| 339 | REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); |
| 340 | REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); |
| 341 | REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); |
| 342 | REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); |
| 343 | } |
| 344 | |
| 345 | static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) |
| 346 | { |
| 347 | u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; |
| 348 | u32 was_err_reg = 0; |
| 349 | |
| 350 | switch (was_err_group) { |
| 351 | case 0: |
| 352 | was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; |
| 353 | break; |
| 354 | case 1: |
| 355 | was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; |
| 356 | break; |
| 357 | case 2: |
| 358 | was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; |
| 359 | break; |
| 360 | case 3: |
| 361 | was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; |
| 362 | break; |
| 363 | } |
| 364 | REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); |
| 365 | } |
| 366 | |
| 367 | void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) |
| 368 | { |
| 369 | /* set the VF-PF association in the FW */ |
| 370 | storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); |
| 371 | storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); |
| 372 | |
| 373 | /* clear vf errors*/ |
| 374 | bnx2x_vf_semi_clear_err(bp, abs_vfid); |
| 375 | bnx2x_vf_pglue_clear_err(bp, abs_vfid); |
| 376 | |
| 377 | /* internal vf-enable - pretend */ |
| 378 | bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); |
| 379 | DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); |
| 380 | bnx2x_vf_enable_internal(bp, true); |
| 381 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); |
| 382 | } |
| 383 | |
| 384 | static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) |
| 385 | { |
| 386 | struct pci_dev *dev; |
| 387 | struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); |
| 388 | |
| 389 | if (!vf) |
| 390 | goto unknown_dev; |
| 391 | |
| 392 | dev = pci_get_bus_and_slot(vf->bus, vf->devfn); |
| 393 | if (dev) |
| 394 | return bnx2x_is_pcie_pending(dev); |
| 395 | |
| 396 | unknown_dev: |
| 397 | BNX2X_ERR("Unknown device\n"); |
| 398 | return false; |
| 399 | } |
| 400 | |
| 401 | int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) |
| 402 | { |
| 403 | /* Wait 100ms */ |
| 404 | msleep(100); |
| 405 | |
| 406 | /* Verify no pending pci transactions */ |
| 407 | if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) |
| 408 | BNX2X_ERR("PCIE Transactions still pending\n"); |
| 409 | |
| 410 | return 0; |
| 411 | } |
| 412 | |
| 413 | /* must be called after the number of PF queues and the number of VFs are |
| 414 | * both known |
| 415 | */ |
| 416 | static void |
| 417 | bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) |
| 418 | { |
| 419 | u16 vlan_count = 0; |
| 420 | |
| 421 | /* will be set only during VF-ACQUIRE */ |
| 422 | resc->num_rxqs = 0; |
| 423 | resc->num_txqs = 0; |
| 424 | |
| 425 | /* no credit calculcis for macs (just yet) */ |
| 426 | resc->num_mac_filters = 1; |
| 427 | |
| 428 | /* divvy up vlan rules */ |
| 429 | vlan_count = bp->vlans_pool.check(&bp->vlans_pool); |
| 430 | vlan_count = 1 << ilog2(vlan_count); |
| 431 | resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); |
| 432 | |
| 433 | /* no real limitation */ |
| 434 | resc->num_mc_filters = 0; |
| 435 | |
| 436 | /* num_sbs already set */ |
| 437 | } |
| 438 | |
| 439 | /* IOV global initialization routines */ |
| 440 | void bnx2x_iov_init_dq(struct bnx2x *bp) |
| 441 | { |
| 442 | if (!IS_SRIOV(bp)) |
| 443 | return; |
| 444 | |
| 445 | /* Set the DQ such that the CID reflect the abs_vfid */ |
| 446 | REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); |
| 447 | REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); |
| 448 | |
| 449 | /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to |
| 450 | * the PF L2 queues |
| 451 | */ |
| 452 | REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); |
| 453 | |
| 454 | /* The VF window size is the log2 of the max number of CIDs per VF */ |
| 455 | REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); |
| 456 | |
| 457 | /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match |
| 458 | * the Pf doorbell size although the 2 are independent. |
| 459 | */ |
| 460 | REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, |
| 461 | BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); |
| 462 | |
| 463 | /* No security checks for now - |
| 464 | * configure single rule (out of 16) mask = 0x1, value = 0x0, |
| 465 | * CID range 0 - 0x1ffff |
| 466 | */ |
| 467 | REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); |
| 468 | REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); |
| 469 | REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); |
| 470 | REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); |
| 471 | |
| 472 | /* set the number of VF alllowed doorbells to the full DQ range */ |
| 473 | REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); |
| 474 | |
| 475 | /* set the VF doorbell threshold */ |
| 476 | REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); |
| 477 | } |
| 478 | |
| 479 | void bnx2x_iov_init_dmae(struct bnx2x *bp) |
| 480 | { |
| 481 | DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); |
| 482 | if (!IS_SRIOV(bp)) |
| 483 | return; |
| 484 | |
| 485 | REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); |
| 486 | } |
| 487 | |
| 488 | static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) |
| 489 | { |
| 490 | struct pci_dev *dev = bp->pdev; |
| 491 | struct bnx2x_sriov *iov = &bp->vfdb->sriov; |
| 492 | |
| 493 | return dev->bus->number + ((dev->devfn + iov->offset + |
| 494 | iov->stride * vfid) >> 8); |
| 495 | } |
| 496 | |
| 497 | static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) |
| 498 | { |
| 499 | struct pci_dev *dev = bp->pdev; |
| 500 | struct bnx2x_sriov *iov = &bp->vfdb->sriov; |
| 501 | |
| 502 | return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; |
| 503 | } |
| 504 | |
| 505 | static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) |
| 506 | { |
| 507 | int i, n; |
| 508 | struct pci_dev *dev = bp->pdev; |
| 509 | struct bnx2x_sriov *iov = &bp->vfdb->sriov; |
| 510 | |
| 511 | for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { |
| 512 | u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); |
| 513 | u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); |
| 514 | |
| 515 | do_div(size, iov->total); |
| 516 | vf->bars[n].bar = start + size * vf->abs_vfid; |
| 517 | vf->bars[n].size = size; |
| 518 | } |
| 519 | } |
| 520 | |
| 521 | void bnx2x_iov_free_mem(struct bnx2x *bp) |
| 522 | { |
| 523 | int i; |
| 524 | |
| 525 | if (!IS_SRIOV(bp)) |
| 526 | return; |
| 527 | |
| 528 | /* free vfs hw contexts */ |
| 529 | for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { |
| 530 | struct hw_dma *cxt = &bp->vfdb->context[i]; |
| 531 | BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); |
| 532 | } |
| 533 | |
| 534 | BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, |
| 535 | BP_VFDB(bp)->sp_dma.mapping, |
| 536 | BP_VFDB(bp)->sp_dma.size); |
| 537 | |
| 538 | BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, |
| 539 | BP_VF_MBX_DMA(bp)->mapping, |
| 540 | BP_VF_MBX_DMA(bp)->size); |
| 541 | } |
| 542 | |
| 543 | int bnx2x_iov_alloc_mem(struct bnx2x *bp) |
| 544 | { |
| 545 | size_t tot_size; |
| 546 | int i, rc = 0; |
| 547 | |
| 548 | if (!IS_SRIOV(bp)) |
| 549 | return rc; |
| 550 | |
| 551 | /* allocate vfs hw contexts */ |
| 552 | tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * |
| 553 | BNX2X_CIDS_PER_VF * sizeof(union cdu_context); |
| 554 | |
| 555 | for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { |
| 556 | struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); |
| 557 | cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); |
| 558 | |
| 559 | if (cxt->size) { |
| 560 | BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); |
| 561 | } else { |
| 562 | cxt->addr = NULL; |
| 563 | cxt->mapping = 0; |
| 564 | } |
| 565 | tot_size -= cxt->size; |
| 566 | } |
| 567 | |
| 568 | /* allocate vfs ramrods dma memory - client_init and set_mac */ |
| 569 | tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); |
| 570 | BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, |
| 571 | tot_size); |
| 572 | BP_VFDB(bp)->sp_dma.size = tot_size; |
| 573 | |
| 574 | /* allocate mailboxes */ |
| 575 | tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; |
| 576 | BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, |
| 577 | tot_size); |
| 578 | BP_VF_MBX_DMA(bp)->size = tot_size; |
| 579 | |
| 580 | return 0; |
| 581 | |
| 582 | alloc_mem_err: |
| 583 | return -ENOMEM; |
| 584 | } |
| 585 | |
| 586 | /* called by bnx2x_nic_load */ |
| 587 | int bnx2x_iov_nic_init(struct bnx2x *bp) |
| 588 | { |
| 589 | int vfid, qcount, i; |
| 590 | |
| 591 | if (!IS_SRIOV(bp)) { |
| 592 | DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); |
| 593 | return 0; |
| 594 | } |
| 595 | |
| 596 | DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); |
| 597 | |
| 598 | /* initialize vf database */ |
| 599 | for_each_vf(bp, vfid) { |
| 600 | struct bnx2x_virtf *vf = BP_VF(bp, vfid); |
| 601 | |
| 602 | int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * |
| 603 | BNX2X_CIDS_PER_VF; |
| 604 | |
| 605 | union cdu_context *base_cxt = (union cdu_context *) |
| 606 | BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + |
| 607 | (base_vf_cid & (ILT_PAGE_CIDS-1)); |
| 608 | |
| 609 | DP(BNX2X_MSG_IOV, |
| 610 | "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", |
| 611 | vf->abs_vfid, vf_sb_count(vf), base_vf_cid, |
| 612 | BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); |
| 613 | |
| 614 | /* init statically provisioned resources */ |
| 615 | bnx2x_iov_static_resc(bp, &vf->alloc_resc); |
| 616 | |
| 617 | /* queues are initialized during VF-ACQUIRE */ |
| 618 | |
| 619 | /* reserve the vf vlan credit */ |
| 620 | bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); |
| 621 | |
| 622 | vf->filter_state = 0; |
| 623 | vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); |
| 624 | |
| 625 | /* init mcast object - This object will be re-initialized |
| 626 | * during VF-ACQUIRE with the proper cl_id and cid. |
| 627 | * It needs to be initialized here so that it can be safely |
| 628 | * handled by a subsequent FLR flow. |
| 629 | */ |
| 630 | bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, |
| 631 | 0xFF, 0xFF, 0xFF, |
| 632 | bnx2x_vf_sp(bp, vf, mcast_rdata), |
| 633 | bnx2x_vf_sp_map(bp, vf, mcast_rdata), |
| 634 | BNX2X_FILTER_MCAST_PENDING, |
| 635 | &vf->filter_state, |
| 636 | BNX2X_OBJ_TYPE_RX_TX); |
| 637 | |
| 638 | /* set the mailbox message addresses */ |
| 639 | BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) |
| 640 | (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * |
| 641 | MBX_MSG_ALIGNED_SIZE); |
| 642 | |
| 643 | BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + |
| 644 | vfid * MBX_MSG_ALIGNED_SIZE; |
| 645 | |
| 646 | /* Enable vf mailbox */ |
| 647 | bnx2x_vf_enable_mbx(bp, vf->abs_vfid); |
| 648 | } |
| 649 | |
| 650 | /* Final VF init */ |
| 651 | qcount = 0; |
| 652 | for_each_vf(bp, i) { |
| 653 | struct bnx2x_virtf *vf = BP_VF(bp, i); |
| 654 | |
| 655 | /* fill in the BDF and bars */ |
| 656 | vf->bus = bnx2x_vf_bus(bp, i); |
| 657 | vf->devfn = bnx2x_vf_devfn(bp, i); |
| 658 | bnx2x_vf_set_bars(bp, vf); |
| 659 | |
| 660 | DP(BNX2X_MSG_IOV, |
| 661 | "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", |
| 662 | vf->abs_vfid, vf->bus, vf->devfn, |
| 663 | (unsigned)vf->bars[0].bar, vf->bars[0].size, |
| 664 | (unsigned)vf->bars[1].bar, vf->bars[1].size, |
| 665 | (unsigned)vf->bars[2].bar, vf->bars[2].size); |
| 666 | |
| 667 | /* set local queue arrays */ |
| 668 | vf->vfqs = &bp->vfdb->vfqs[qcount]; |
| 669 | qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs); |
| 670 | } |
| 671 | |
| 672 | return 0; |
| 673 | } |
Ariel Elior | 290ca2b | 2013-01-01 05:22:31 +0000 | [diff] [blame] | 674 | |
| 675 | /* called by bnx2x_init_hw_func, returns the next ilt line */ |
| 676 | int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) |
| 677 | { |
| 678 | int i; |
| 679 | struct bnx2x_ilt *ilt = BP_ILT(bp); |
| 680 | |
| 681 | if (!IS_SRIOV(bp)) |
| 682 | return line; |
| 683 | |
| 684 | /* set vfs ilt lines */ |
| 685 | for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { |
| 686 | struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); |
| 687 | |
| 688 | ilt->lines[line+i].page = hw_cxt->addr; |
| 689 | ilt->lines[line+i].page_mapping = hw_cxt->mapping; |
| 690 | ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ |
| 691 | } |
| 692 | return line + i; |
| 693 | } |
| 694 | |
| 695 | void bnx2x_iov_remove_one(struct bnx2x *bp) |
| 696 | { |
| 697 | /* if SRIOV is not enabled there's nothing to do */ |
| 698 | if (!IS_SRIOV(bp)) |
| 699 | return; |
| 700 | |
| 701 | /* free vf database */ |
| 702 | __bnx2x_iov_free_vfdb(bp); |
| 703 | } |