Ariel Elior | be1f1ffa | 2013-01-01 05:22:24 +0000 | [diff] [blame] | 1 | /* bnx2x_vfpf.c: Broadcom Everest network driver. |
| 2 | * |
| 3 | * Copyright 2009-2012 Broadcom Corporation |
| 4 | * |
| 5 | * Unless you and Broadcom execute a separate written software license |
| 6 | * agreement governing use of this software, this software is licensed to you |
| 7 | * under the terms of the GNU General Public License version 2, available |
| 8 | * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). |
| 9 | * |
| 10 | * Notwithstanding the above, under no circumstances may you combine this |
| 11 | * software in any way with any other Broadcom software provided under a |
| 12 | * license other than the GPL, without Broadcom's express prior written |
| 13 | * consent. |
| 14 | * |
| 15 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> |
| 16 | * Written by: Shmulik Ravid <shmulikr@broadcom.com> |
| 17 | * Ariel Elior <ariele@broadcom.com> |
| 18 | */ |
| 19 | |
| 20 | #include "bnx2x.h" |
| 21 | #include "bnx2x_sriov.h" |
Ariel Elior | b93288d | 2013-01-01 05:22:35 +0000 | [diff] [blame] | 22 | #include <linux/crc32.h> |
Ariel Elior | be1f1ffa | 2013-01-01 05:22:24 +0000 | [diff] [blame] | 23 | |
| 24 | /* place a given tlv on the tlv buffer at a given offset */ |
| 25 | void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, |
| 26 | u16 length) |
| 27 | { |
| 28 | struct channel_tlv *tl = |
| 29 | (struct channel_tlv *)(tlvs_list + offset); |
| 30 | |
| 31 | tl->type = type; |
| 32 | tl->length = length; |
| 33 | } |
| 34 | |
| 35 | /* Clear the mailbox and init the header of the first tlv */ |
| 36 | void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, |
| 37 | u16 type, u16 length) |
| 38 | { |
| 39 | DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n", |
| 40 | type); |
| 41 | |
| 42 | /* Clear mailbox */ |
| 43 | memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg)); |
| 44 | |
| 45 | /* init type and length */ |
| 46 | bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length); |
| 47 | |
| 48 | /* init first tlv header */ |
| 49 | first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); |
| 50 | } |
| 51 | |
| 52 | /* list the types and lengths of the tlvs on the buffer */ |
| 53 | void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) |
| 54 | { |
| 55 | int i = 1; |
| 56 | struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list; |
| 57 | |
| 58 | while (tlv->type != CHANNEL_TLV_LIST_END) { |
| 59 | /* output tlv */ |
| 60 | DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i, |
| 61 | tlv->type, tlv->length); |
| 62 | |
| 63 | /* advance to next tlv */ |
| 64 | tlvs_list += tlv->length; |
| 65 | |
| 66 | /* cast general tlv list pointer to channel tlv header*/ |
| 67 | tlv = (struct channel_tlv *)tlvs_list; |
| 68 | |
| 69 | i++; |
| 70 | |
| 71 | /* break condition for this loop */ |
| 72 | if (i > MAX_TLVS_IN_LIST) { |
| 73 | WARN(true, "corrupt tlvs"); |
| 74 | return; |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | /* output last tlv */ |
| 79 | DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i, |
| 80 | tlv->type, tlv->length); |
| 81 | } |
Ariel Elior | b56e967 | 2013-01-01 05:22:32 +0000 | [diff] [blame] | 82 | |
Ariel Elior | fd1fc79 | 2013-01-01 05:22:33 +0000 | [diff] [blame] | 83 | /* test whether we support a tlv type */ |
| 84 | bool bnx2x_tlv_supported(u16 tlvtype) |
| 85 | { |
| 86 | return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; |
| 87 | } |
| 88 | |
| 89 | static inline int bnx2x_pfvf_status_codes(int rc) |
| 90 | { |
| 91 | switch (rc) { |
| 92 | case 0: |
| 93 | return PFVF_STATUS_SUCCESS; |
| 94 | case -ENOMEM: |
| 95 | return PFVF_STATUS_NO_RESOURCE; |
| 96 | default: |
| 97 | return PFVF_STATUS_FAILURE; |
| 98 | } |
| 99 | } |
| 100 | |
Ariel Elior | b56e967 | 2013-01-01 05:22:32 +0000 | [diff] [blame] | 101 | /* General service functions */ |
| 102 | static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid) |
| 103 | { |
| 104 | u32 addr = BAR_CSTRORM_INTMEM + |
| 105 | CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid); |
| 106 | |
| 107 | REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY); |
| 108 | } |
| 109 | |
| 110 | static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid) |
| 111 | { |
| 112 | u32 addr = BAR_CSTRORM_INTMEM + |
| 113 | CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid); |
| 114 | |
| 115 | REG_WR8(bp, addr, 1); |
| 116 | } |
| 117 | |
| 118 | static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp) |
| 119 | { |
| 120 | int i; |
| 121 | |
| 122 | for_each_vf(bp, i) |
| 123 | storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); |
| 124 | } |
| 125 | |
| 126 | /* enable vf_pf mailbox (aka vf-pf-chanell) */ |
| 127 | void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) |
| 128 | { |
| 129 | bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); |
| 130 | |
| 131 | /* enable the mailbox in the FW */ |
| 132 | storm_memset_vf_mbx_ack(bp, abs_vfid); |
| 133 | storm_memset_vf_mbx_valid(bp, abs_vfid); |
| 134 | |
| 135 | /* enable the VF access to the mailbox */ |
| 136 | bnx2x_vf_enable_access(bp, abs_vfid); |
| 137 | } |
Ariel Elior | fd1fc79 | 2013-01-01 05:22:33 +0000 | [diff] [blame] | 138 | |
| 139 | /* this works only on !E1h */ |
| 140 | static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, |
| 141 | dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi, |
| 142 | u32 vf_addr_lo, u32 len32) |
| 143 | { |
| 144 | struct dmae_command dmae; |
| 145 | |
| 146 | if (CHIP_IS_E1x(bp)) { |
| 147 | BNX2X_ERR("Chip revision does not support VFs\n"); |
| 148 | return DMAE_NOT_RDY; |
| 149 | } |
| 150 | |
| 151 | if (!bp->dmae_ready) { |
| 152 | BNX2X_ERR("DMAE is not ready, can not copy\n"); |
| 153 | return DMAE_NOT_RDY; |
| 154 | } |
| 155 | |
| 156 | /* set opcode and fixed command fields */ |
| 157 | bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI); |
| 158 | |
| 159 | if (from_vf) { |
| 160 | dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) | |
| 161 | (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) | |
| 162 | (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT); |
| 163 | |
| 164 | dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT); |
| 165 | |
| 166 | dmae.src_addr_lo = vf_addr_lo; |
| 167 | dmae.src_addr_hi = vf_addr_hi; |
| 168 | dmae.dst_addr_lo = U64_LO(pf_addr); |
| 169 | dmae.dst_addr_hi = U64_HI(pf_addr); |
| 170 | } else { |
| 171 | dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) | |
| 172 | (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) | |
| 173 | (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT); |
| 174 | |
| 175 | dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT); |
| 176 | |
| 177 | dmae.src_addr_lo = U64_LO(pf_addr); |
| 178 | dmae.src_addr_hi = U64_HI(pf_addr); |
| 179 | dmae.dst_addr_lo = vf_addr_lo; |
| 180 | dmae.dst_addr_hi = vf_addr_hi; |
| 181 | } |
| 182 | dmae.len = len32; |
| 183 | bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE); |
| 184 | |
| 185 | /* issue the command and wait for completion */ |
| 186 | return bnx2x_issue_dmae_with_comp(bp, &dmae); |
| 187 | } |
| 188 | |
Ariel Elior | 8ca5e17 | 2013-01-01 05:22:34 +0000 | [diff] [blame] | 189 | static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf) |
| 190 | { |
| 191 | struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); |
| 192 | u64 vf_addr; |
| 193 | dma_addr_t pf_addr; |
| 194 | u16 length, type; |
| 195 | int rc; |
| 196 | struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; |
| 197 | |
| 198 | /* prepare response */ |
| 199 | type = mbx->first_tlv.tl.type; |
| 200 | length = type == CHANNEL_TLV_ACQUIRE ? |
| 201 | sizeof(struct pfvf_acquire_resp_tlv) : |
| 202 | sizeof(struct pfvf_general_resp_tlv); |
| 203 | bnx2x_add_tlv(bp, resp, 0, type, length); |
| 204 | resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); |
| 205 | bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END, |
| 206 | sizeof(struct channel_list_end_tlv)); |
| 207 | bnx2x_dp_tlv_list(bp, resp); |
| 208 | DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", |
| 209 | mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); |
| 210 | |
| 211 | /* send response */ |
| 212 | vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + |
| 213 | mbx->first_tlv.resp_msg_offset; |
| 214 | pf_addr = mbx->msg_mapping + |
| 215 | offsetof(struct bnx2x_vf_mbx_msg, resp); |
| 216 | |
| 217 | /* copy the response body, if there is one, before the header, as the vf |
| 218 | * is sensitive to the header being written |
| 219 | */ |
| 220 | if (resp->hdr.tl.length > sizeof(u64)) { |
| 221 | length = resp->hdr.tl.length - sizeof(u64); |
| 222 | vf_addr += sizeof(u64); |
| 223 | pf_addr += sizeof(u64); |
| 224 | rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, |
| 225 | U64_HI(vf_addr), |
| 226 | U64_LO(vf_addr), |
| 227 | length/4); |
| 228 | if (rc) { |
| 229 | BNX2X_ERR("Failed to copy response body to VF %d\n", |
| 230 | vf->abs_vfid); |
Ariel Elior | f1929b0 | 2013-01-01 05:22:41 +0000 | [diff] [blame] | 231 | goto mbx_error; |
Ariel Elior | 8ca5e17 | 2013-01-01 05:22:34 +0000 | [diff] [blame] | 232 | } |
| 233 | vf_addr -= sizeof(u64); |
| 234 | pf_addr -= sizeof(u64); |
| 235 | } |
| 236 | |
| 237 | /* ack the FW */ |
| 238 | storm_memset_vf_mbx_ack(bp, vf->abs_vfid); |
| 239 | mmiowb(); |
| 240 | |
| 241 | /* initiate dmae to send the response */ |
| 242 | mbx->flags &= ~VF_MSG_INPROCESS; |
| 243 | |
| 244 | /* copy the response header including status-done field, |
| 245 | * must be last dmae, must be after FW is acked |
| 246 | */ |
| 247 | rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, |
| 248 | U64_HI(vf_addr), |
| 249 | U64_LO(vf_addr), |
| 250 | sizeof(u64)/4); |
| 251 | |
| 252 | /* unlock channel mutex */ |
| 253 | bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); |
| 254 | |
| 255 | if (rc) { |
| 256 | BNX2X_ERR("Failed to copy response status to VF %d\n", |
| 257 | vf->abs_vfid); |
Ariel Elior | f1929b0 | 2013-01-01 05:22:41 +0000 | [diff] [blame] | 258 | goto mbx_error; |
Ariel Elior | 8ca5e17 | 2013-01-01 05:22:34 +0000 | [diff] [blame] | 259 | } |
| 260 | return; |
Ariel Elior | f1929b0 | 2013-01-01 05:22:41 +0000 | [diff] [blame] | 261 | |
| 262 | mbx_error: |
| 263 | bnx2x_vf_release(bp, vf, false); /* non blocking */ |
Ariel Elior | 8ca5e17 | 2013-01-01 05:22:34 +0000 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, |
| 267 | struct bnx2x_vf_mbx *mbx, int vfop_status) |
| 268 | { |
| 269 | int i; |
| 270 | struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp; |
| 271 | struct pf_vf_resc *resc = &resp->resc; |
| 272 | u8 status = bnx2x_pfvf_status_codes(vfop_status); |
| 273 | |
| 274 | memset(resp, 0, sizeof(*resp)); |
| 275 | |
| 276 | /* fill in pfdev info */ |
| 277 | resp->pfdev_info.chip_num = bp->common.chip_id; |
| 278 | resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT); |
| 279 | resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; |
| 280 | resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | |
| 281 | /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); |
| 282 | bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, |
| 283 | sizeof(resp->pfdev_info.fw_ver)); |
| 284 | |
| 285 | if (status == PFVF_STATUS_NO_RESOURCE || |
| 286 | status == PFVF_STATUS_SUCCESS) { |
| 287 | /* set resources numbers, if status equals NO_RESOURCE these |
| 288 | * are max possible numbers |
| 289 | */ |
| 290 | resc->num_rxqs = vf_rxq_count(vf) ? : |
| 291 | bnx2x_vf_max_queue_cnt(bp, vf); |
| 292 | resc->num_txqs = vf_txq_count(vf) ? : |
| 293 | bnx2x_vf_max_queue_cnt(bp, vf); |
| 294 | resc->num_sbs = vf_sb_count(vf); |
| 295 | resc->num_mac_filters = vf_mac_rules_cnt(vf); |
| 296 | resc->num_vlan_filters = vf_vlan_rules_cnt(vf); |
| 297 | resc->num_mc_filters = 0; |
| 298 | |
| 299 | if (status == PFVF_STATUS_SUCCESS) { |
Ariel Elior | abc5a02 | 2013-01-01 05:22:43 +0000 | [diff] [blame] | 300 | /* fill in the allocated resources */ |
| 301 | struct pf_vf_bulletin_content *bulletin = |
| 302 | BP_VF_BULLETIN(bp, vf->index); |
| 303 | |
Ariel Elior | 8ca5e17 | 2013-01-01 05:22:34 +0000 | [diff] [blame] | 304 | for_each_vfq(vf, i) |
| 305 | resc->hw_qid[i] = |
| 306 | vfq_qzone_id(vf, vfq_get(vf, i)); |
| 307 | |
| 308 | for_each_vf_sb(vf, i) { |
| 309 | resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i); |
| 310 | resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i); |
| 311 | } |
Ariel Elior | abc5a02 | 2013-01-01 05:22:43 +0000 | [diff] [blame] | 312 | |
| 313 | /* if a mac has been set for this vf, supply it */ |
| 314 | if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) { |
| 315 | memcpy(resc->current_mac_addr, bulletin->mac, |
| 316 | ETH_ALEN); |
| 317 | } |
Ariel Elior | 8ca5e17 | 2013-01-01 05:22:34 +0000 | [diff] [blame] | 318 | } |
| 319 | } |
| 320 | |
| 321 | DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n" |
| 322 | "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n", |
| 323 | vf->abs_vfid, |
| 324 | resp->pfdev_info.chip_num, |
| 325 | resp->pfdev_info.db_size, |
| 326 | resp->pfdev_info.indices_per_sb, |
| 327 | resp->pfdev_info.pf_cap, |
| 328 | resc->num_rxqs, |
| 329 | resc->num_txqs, |
| 330 | resc->num_sbs, |
| 331 | resc->num_mac_filters, |
| 332 | resc->num_vlan_filters, |
| 333 | resc->num_mc_filters, |
| 334 | resp->pfdev_info.fw_ver); |
| 335 | |
| 336 | DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ "); |
| 337 | for (i = 0; i < vf_rxq_count(vf); i++) |
| 338 | DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]); |
| 339 | DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ "); |
| 340 | for (i = 0; i < vf_sb_count(vf); i++) |
| 341 | DP_CONT(BNX2X_MSG_IOV, "%d:%d ", |
| 342 | resc->hw_sbs[i].hw_sb_id, |
| 343 | resc->hw_sbs[i].sb_qid); |
| 344 | DP_CONT(BNX2X_MSG_IOV, "]\n"); |
| 345 | |
| 346 | /* send the response */ |
| 347 | vf->op_rc = vfop_status; |
| 348 | bnx2x_vf_mbx_resp(bp, vf); |
| 349 | } |
| 350 | |
| 351 | static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, |
| 352 | struct bnx2x_vf_mbx *mbx) |
| 353 | { |
| 354 | int rc; |
| 355 | struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire; |
| 356 | |
| 357 | /* log vfdef info */ |
| 358 | DP(BNX2X_MSG_IOV, |
| 359 | "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n", |
| 360 | vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os, |
| 361 | acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs, |
| 362 | acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters, |
| 363 | acquire->resc_request.num_vlan_filters, |
| 364 | acquire->resc_request.num_mc_filters); |
| 365 | |
| 366 | /* acquire the resources */ |
| 367 | rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request); |
| 368 | |
Ariel Elior | abc5a02 | 2013-01-01 05:22:43 +0000 | [diff] [blame] | 369 | /* store address of vf's bulletin board */ |
| 370 | vf->bulletin_map = acquire->bulletin_addr; |
| 371 | |
Ariel Elior | 8ca5e17 | 2013-01-01 05:22:34 +0000 | [diff] [blame] | 372 | /* response */ |
| 373 | bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); |
| 374 | } |
| 375 | |
Ariel Elior | b93288d | 2013-01-01 05:22:35 +0000 | [diff] [blame] | 376 | static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, |
| 377 | struct bnx2x_vf_mbx *mbx) |
| 378 | { |
| 379 | struct vfpf_init_tlv *init = &mbx->msg->req.init; |
| 380 | |
| 381 | /* record ghost addresses from vf message */ |
| 382 | vf->spq_map = init->spq_addr; |
| 383 | vf->fw_stat_map = init->stats_addr; |
| 384 | vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); |
| 385 | |
| 386 | /* response */ |
| 387 | bnx2x_vf_mbx_resp(bp, vf); |
| 388 | } |
| 389 | |
Ariel Elior | 8db573b | 2013-01-01 05:22:37 +0000 | [diff] [blame] | 390 | /* convert MBX queue-flags to standard SP queue-flags */ |
| 391 | static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags, |
| 392 | unsigned long *sp_q_flags) |
| 393 | { |
| 394 | if (mbx_q_flags & VFPF_QUEUE_FLG_TPA) |
| 395 | __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags); |
| 396 | if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6) |
| 397 | __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags); |
| 398 | if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO) |
| 399 | __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags); |
| 400 | if (mbx_q_flags & VFPF_QUEUE_FLG_STATS) |
| 401 | __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags); |
| 402 | if (mbx_q_flags & VFPF_QUEUE_FLG_OV) |
| 403 | __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); |
| 404 | if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN) |
| 405 | __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags); |
| 406 | if (mbx_q_flags & VFPF_QUEUE_FLG_COS) |
| 407 | __set_bit(BNX2X_Q_FLG_COS, sp_q_flags); |
| 408 | if (mbx_q_flags & VFPF_QUEUE_FLG_HC) |
| 409 | __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); |
| 410 | if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) |
| 411 | __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); |
| 412 | } |
| 413 | |
| 414 | static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, |
| 415 | struct bnx2x_vf_mbx *mbx) |
| 416 | { |
| 417 | struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; |
| 418 | struct bnx2x_vfop_cmd cmd = { |
| 419 | .done = bnx2x_vf_mbx_resp, |
| 420 | .block = false, |
| 421 | }; |
| 422 | |
| 423 | /* verify vf_qid */ |
| 424 | if (setup_q->vf_qid >= vf_rxq_count(vf)) { |
| 425 | BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n", |
| 426 | setup_q->vf_qid, vf_rxq_count(vf)); |
| 427 | vf->op_rc = -EINVAL; |
| 428 | goto response; |
| 429 | } |
| 430 | |
| 431 | /* tx queues must be setup alongside rx queues thus if the rx queue |
| 432 | * is not marked as valid there's nothing to do. |
| 433 | */ |
| 434 | if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) { |
| 435 | struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid); |
| 436 | unsigned long q_type = 0; |
| 437 | |
| 438 | struct bnx2x_queue_init_params *init_p; |
| 439 | struct bnx2x_queue_setup_params *setup_p; |
| 440 | |
| 441 | /* reinit the VF operation context */ |
| 442 | memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); |
| 443 | setup_p = &vf->op_params.qctor.prep_qsetup; |
| 444 | init_p = &vf->op_params.qctor.qstate.params.init; |
| 445 | |
| 446 | /* activate immediately */ |
| 447 | __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags); |
| 448 | |
| 449 | if (setup_q->param_valid & VFPF_TXQ_VALID) { |
| 450 | struct bnx2x_txq_setup_params *txq_params = |
| 451 | &setup_p->txq_params; |
| 452 | |
| 453 | __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); |
| 454 | |
| 455 | /* save sb resource index */ |
| 456 | q->sb_idx = setup_q->txq.vf_sb; |
| 457 | |
| 458 | /* tx init */ |
| 459 | init_p->tx.hc_rate = setup_q->txq.hc_rate; |
| 460 | init_p->tx.sb_cq_index = setup_q->txq.sb_index; |
| 461 | |
| 462 | bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags, |
| 463 | &init_p->tx.flags); |
| 464 | |
| 465 | /* tx setup - flags */ |
| 466 | bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags, |
| 467 | &setup_p->flags); |
| 468 | |
| 469 | /* tx setup - general, nothing */ |
| 470 | |
| 471 | /* tx setup - tx */ |
| 472 | txq_params->dscr_map = setup_q->txq.txq_addr; |
| 473 | txq_params->sb_cq_index = setup_q->txq.sb_index; |
| 474 | txq_params->traffic_type = setup_q->txq.traffic_type; |
| 475 | |
| 476 | bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p, |
| 477 | q->index, q->sb_idx); |
| 478 | } |
| 479 | |
| 480 | if (setup_q->param_valid & VFPF_RXQ_VALID) { |
| 481 | struct bnx2x_rxq_setup_params *rxq_params = |
| 482 | &setup_p->rxq_params; |
| 483 | |
| 484 | __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); |
| 485 | |
| 486 | /* Note: there is no support for different SBs |
| 487 | * for TX and RX |
| 488 | */ |
| 489 | q->sb_idx = setup_q->rxq.vf_sb; |
| 490 | |
| 491 | /* rx init */ |
| 492 | init_p->rx.hc_rate = setup_q->rxq.hc_rate; |
| 493 | init_p->rx.sb_cq_index = setup_q->rxq.sb_index; |
| 494 | bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags, |
| 495 | &init_p->rx.flags); |
| 496 | |
| 497 | /* rx setup - flags */ |
| 498 | bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags, |
| 499 | &setup_p->flags); |
| 500 | |
| 501 | /* rx setup - general */ |
| 502 | setup_p->gen_params.mtu = setup_q->rxq.mtu; |
| 503 | |
| 504 | /* rx setup - rx */ |
| 505 | rxq_params->drop_flags = setup_q->rxq.drop_flags; |
| 506 | rxq_params->dscr_map = setup_q->rxq.rxq_addr; |
| 507 | rxq_params->sge_map = setup_q->rxq.sge_addr; |
| 508 | rxq_params->rcq_map = setup_q->rxq.rcq_addr; |
| 509 | rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr; |
| 510 | rxq_params->buf_sz = setup_q->rxq.buf_sz; |
| 511 | rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz; |
| 512 | rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt; |
| 513 | rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz; |
| 514 | rxq_params->cache_line_log = |
| 515 | setup_q->rxq.cache_line_log; |
| 516 | rxq_params->sb_cq_index = setup_q->rxq.sb_index; |
| 517 | |
| 518 | bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p, |
| 519 | q->index, q->sb_idx); |
| 520 | } |
| 521 | /* complete the preparations */ |
| 522 | bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type); |
| 523 | |
| 524 | vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index); |
| 525 | if (vf->op_rc) |
| 526 | goto response; |
| 527 | return; |
| 528 | } |
| 529 | response: |
| 530 | bnx2x_vf_mbx_resp(bp, vf); |
| 531 | } |
| 532 | |
Ariel Elior | 954ea74 | 2013-01-01 05:22:38 +0000 | [diff] [blame] | 533 | enum bnx2x_vfop_filters_state { |
| 534 | BNX2X_VFOP_MBX_Q_FILTERS_MACS, |
| 535 | BNX2X_VFOP_MBX_Q_FILTERS_VLANS, |
| 536 | BNX2X_VFOP_MBX_Q_FILTERS_RXMODE, |
| 537 | BNX2X_VFOP_MBX_Q_FILTERS_MCAST, |
| 538 | BNX2X_VFOP_MBX_Q_FILTERS_DONE |
| 539 | }; |
| 540 | |
| 541 | static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, |
| 542 | struct bnx2x_virtf *vf, |
| 543 | struct vfpf_set_q_filters_tlv *tlv, |
| 544 | struct bnx2x_vfop_filters **pfl, |
| 545 | u32 type_flag) |
| 546 | { |
| 547 | int i, j; |
| 548 | struct bnx2x_vfop_filters *fl = NULL; |
| 549 | size_t fsz; |
| 550 | |
| 551 | fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) + |
| 552 | sizeof(struct bnx2x_vfop_filters); |
| 553 | |
| 554 | fl = kzalloc(fsz, GFP_KERNEL); |
| 555 | if (!fl) |
| 556 | return -ENOMEM; |
| 557 | |
| 558 | INIT_LIST_HEAD(&fl->head); |
| 559 | |
| 560 | for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) { |
| 561 | struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i]; |
| 562 | |
| 563 | if ((msg_filter->flags & type_flag) != type_flag) |
| 564 | continue; |
| 565 | if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { |
| 566 | fl->filters[j].mac = msg_filter->mac; |
| 567 | fl->filters[j].type = BNX2X_VFOP_FILTER_MAC; |
| 568 | } else { |
| 569 | fl->filters[j].vid = msg_filter->vlan_tag; |
| 570 | fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN; |
| 571 | } |
| 572 | fl->filters[j].add = |
| 573 | (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? |
| 574 | true : false; |
| 575 | list_add_tail(&fl->filters[j++].link, &fl->head); |
| 576 | } |
| 577 | if (list_empty(&fl->head)) |
| 578 | kfree(fl); |
| 579 | else |
| 580 | *pfl = fl; |
| 581 | |
| 582 | return 0; |
| 583 | } |
| 584 | |
| 585 | static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx, |
| 586 | struct vfpf_q_mac_vlan_filter *filter) |
| 587 | { |
| 588 | DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags); |
| 589 | if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID) |
| 590 | DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag); |
| 591 | if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID) |
| 592 | DP_CONT(msglvl, ", MAC=%pM", filter->mac); |
| 593 | DP_CONT(msglvl, "\n"); |
| 594 | } |
| 595 | |
| 596 | static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, |
| 597 | struct vfpf_set_q_filters_tlv *filters) |
| 598 | { |
| 599 | int i; |
| 600 | |
| 601 | if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) |
| 602 | for (i = 0; i < filters->n_mac_vlan_filters; i++) |
| 603 | bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i, |
| 604 | &filters->filters[i]); |
| 605 | |
| 606 | if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) |
| 607 | DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask); |
| 608 | |
| 609 | if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) |
| 610 | for (i = 0; i < filters->n_multicast; i++) |
| 611 | DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]); |
| 612 | } |
| 613 | |
| 614 | #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID |
| 615 | #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID |
| 616 | |
| 617 | static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) |
| 618 | { |
| 619 | int rc; |
| 620 | |
| 621 | struct vfpf_set_q_filters_tlv *msg = |
| 622 | &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; |
| 623 | |
| 624 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); |
| 625 | enum bnx2x_vfop_filters_state state = vfop->state; |
| 626 | |
| 627 | struct bnx2x_vfop_cmd cmd = { |
| 628 | .done = bnx2x_vfop_mbx_qfilters, |
| 629 | .block = false, |
| 630 | }; |
| 631 | |
| 632 | DP(BNX2X_MSG_IOV, "STATE: %d\n", state); |
| 633 | |
| 634 | if (vfop->rc < 0) |
| 635 | goto op_err; |
| 636 | |
| 637 | switch (state) { |
| 638 | case BNX2X_VFOP_MBX_Q_FILTERS_MACS: |
| 639 | /* next state */ |
| 640 | vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS; |
| 641 | |
| 642 | /* check for any vlan/mac changes */ |
| 643 | if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { |
| 644 | /* build mac list */ |
| 645 | struct bnx2x_vfop_filters *fl = NULL; |
| 646 | |
| 647 | vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, |
| 648 | VFPF_MAC_FILTER); |
| 649 | if (vfop->rc) |
| 650 | goto op_err; |
| 651 | |
| 652 | if (fl) { |
| 653 | /* set mac list */ |
| 654 | rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl, |
| 655 | msg->vf_qid, |
| 656 | false); |
| 657 | if (rc) { |
| 658 | vfop->rc = rc; |
| 659 | goto op_err; |
| 660 | } |
| 661 | return; |
| 662 | } |
| 663 | } |
| 664 | /* fall through */ |
| 665 | |
| 666 | case BNX2X_VFOP_MBX_Q_FILTERS_VLANS: |
| 667 | /* next state */ |
| 668 | vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE; |
| 669 | |
| 670 | /* check for any vlan/mac changes */ |
| 671 | if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { |
| 672 | /* build vlan list */ |
| 673 | struct bnx2x_vfop_filters *fl = NULL; |
| 674 | |
| 675 | vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, |
| 676 | VFPF_VLAN_FILTER); |
| 677 | if (vfop->rc) |
| 678 | goto op_err; |
| 679 | |
| 680 | if (fl) { |
| 681 | /* set vlan list */ |
| 682 | rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl, |
| 683 | msg->vf_qid, |
| 684 | false); |
| 685 | if (rc) { |
| 686 | vfop->rc = rc; |
| 687 | goto op_err; |
| 688 | } |
| 689 | return; |
| 690 | } |
| 691 | } |
| 692 | /* fall through */ |
| 693 | |
| 694 | case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE: |
| 695 | /* next state */ |
| 696 | vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST; |
| 697 | |
| 698 | if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { |
| 699 | unsigned long accept = 0; |
| 700 | |
| 701 | /* covert VF-PF if mask to bnx2x accept flags */ |
| 702 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) |
| 703 | __set_bit(BNX2X_ACCEPT_UNICAST, &accept); |
| 704 | |
| 705 | if (msg->rx_mask & |
| 706 | VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST) |
| 707 | __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); |
| 708 | |
| 709 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST) |
| 710 | __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept); |
| 711 | |
| 712 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST) |
| 713 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept); |
| 714 | |
| 715 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST) |
| 716 | __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); |
| 717 | |
| 718 | /* A packet arriving the vf's mac should be accepted |
| 719 | * with any vlan |
| 720 | */ |
| 721 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); |
| 722 | |
| 723 | /* set rx-mode */ |
| 724 | rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, |
| 725 | msg->vf_qid, accept); |
| 726 | if (rc) { |
| 727 | vfop->rc = rc; |
| 728 | goto op_err; |
| 729 | } |
| 730 | return; |
| 731 | } |
| 732 | /* fall through */ |
| 733 | |
| 734 | case BNX2X_VFOP_MBX_Q_FILTERS_MCAST: |
| 735 | /* next state */ |
| 736 | vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE; |
| 737 | |
| 738 | if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { |
| 739 | /* set mcasts */ |
| 740 | rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast, |
| 741 | msg->n_multicast, false); |
| 742 | if (rc) { |
| 743 | vfop->rc = rc; |
| 744 | goto op_err; |
| 745 | } |
| 746 | return; |
| 747 | } |
| 748 | /* fall through */ |
| 749 | op_done: |
| 750 | case BNX2X_VFOP_MBX_Q_FILTERS_DONE: |
| 751 | bnx2x_vfop_end(bp, vf, vfop); |
| 752 | return; |
| 753 | op_err: |
| 754 | BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", |
| 755 | vf->abs_vfid, msg->vf_qid, vfop->rc); |
| 756 | goto op_done; |
| 757 | |
| 758 | default: |
| 759 | bnx2x_vfop_default(state); |
| 760 | } |
| 761 | } |
| 762 | |
| 763 | static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp, |
| 764 | struct bnx2x_virtf *vf, |
| 765 | struct bnx2x_vfop_cmd *cmd) |
| 766 | { |
| 767 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); |
| 768 | if (vfop) { |
| 769 | bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS, |
| 770 | bnx2x_vfop_mbx_qfilters, cmd->done); |
| 771 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters, |
| 772 | cmd->block); |
| 773 | } |
| 774 | return -ENOMEM; |
| 775 | } |
| 776 | |
| 777 | static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, |
| 778 | struct bnx2x_virtf *vf, |
| 779 | struct bnx2x_vf_mbx *mbx) |
| 780 | { |
| 781 | struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; |
Ariel Elior | abc5a02 | 2013-01-01 05:22:43 +0000 | [diff] [blame] | 782 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); |
Ariel Elior | 954ea74 | 2013-01-01 05:22:38 +0000 | [diff] [blame] | 783 | struct bnx2x_vfop_cmd cmd = { |
| 784 | .done = bnx2x_vf_mbx_resp, |
| 785 | .block = false, |
| 786 | }; |
| 787 | |
Ariel Elior | abc5a02 | 2013-01-01 05:22:43 +0000 | [diff] [blame] | 788 | /* if a mac was already set for this VF via the set vf mac ndo, we only |
| 789 | * accept mac configurations of that mac. Why accept them at all? |
| 790 | * because PF may have been unable to configure the mac at the time |
| 791 | * since queue was not set up. |
| 792 | */ |
| 793 | if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) { |
| 794 | /* once a mac was set by ndo can only accept a single mac... */ |
| 795 | if (filters->n_mac_vlan_filters > 1) { |
| 796 | BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", |
| 797 | vf->abs_vfid); |
| 798 | vf->op_rc = -EPERM; |
| 799 | goto response; |
| 800 | } |
| 801 | |
| 802 | /* ...and only the mac set by the ndo */ |
| 803 | if (filters->n_mac_vlan_filters == 1 && |
| 804 | memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) { |
| 805 | BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", |
| 806 | vf->abs_vfid); |
| 807 | |
| 808 | vf->op_rc = -EPERM; |
| 809 | goto response; |
| 810 | } |
| 811 | } |
| 812 | |
Ariel Elior | 954ea74 | 2013-01-01 05:22:38 +0000 | [diff] [blame] | 813 | /* verify vf_qid */ |
| 814 | if (filters->vf_qid > vf_rxq_count(vf)) |
| 815 | goto response; |
| 816 | |
| 817 | DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", |
| 818 | vf->abs_vfid, |
| 819 | filters->vf_qid); |
| 820 | |
| 821 | /* print q_filter message */ |
| 822 | bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); |
| 823 | |
| 824 | vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd); |
| 825 | if (vf->op_rc) |
| 826 | goto response; |
| 827 | return; |
| 828 | |
| 829 | response: |
| 830 | bnx2x_vf_mbx_resp(bp, vf); |
| 831 | } |
| 832 | |
Ariel Elior | 463a68a | 2013-01-01 05:22:39 +0000 | [diff] [blame] | 833 | static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, |
| 834 | struct bnx2x_vf_mbx *mbx) |
| 835 | { |
| 836 | int qid = mbx->msg->req.q_op.vf_qid; |
| 837 | struct bnx2x_vfop_cmd cmd = { |
| 838 | .done = bnx2x_vf_mbx_resp, |
| 839 | .block = false, |
| 840 | }; |
| 841 | |
| 842 | DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n", |
| 843 | vf->abs_vfid, qid); |
| 844 | |
| 845 | vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid); |
| 846 | if (vf->op_rc) |
| 847 | bnx2x_vf_mbx_resp(bp, vf); |
| 848 | } |
| 849 | |
Ariel Elior | 99e9d21 | 2013-01-01 05:22:40 +0000 | [diff] [blame] | 850 | static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, |
| 851 | struct bnx2x_vf_mbx *mbx) |
| 852 | { |
| 853 | struct bnx2x_vfop_cmd cmd = { |
| 854 | .done = bnx2x_vf_mbx_resp, |
| 855 | .block = false, |
| 856 | }; |
| 857 | |
| 858 | DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid); |
| 859 | |
| 860 | vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); |
| 861 | if (vf->op_rc) |
| 862 | bnx2x_vf_mbx_resp(bp, vf); |
| 863 | } |
| 864 | |
Ariel Elior | f1929b0 | 2013-01-01 05:22:41 +0000 | [diff] [blame] | 865 | static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, |
| 866 | struct bnx2x_vf_mbx *mbx) |
| 867 | { |
| 868 | struct bnx2x_vfop_cmd cmd = { |
| 869 | .done = bnx2x_vf_mbx_resp, |
| 870 | .block = false, |
| 871 | }; |
| 872 | |
| 873 | DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid); |
| 874 | |
| 875 | vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); |
| 876 | if (vf->op_rc) |
| 877 | bnx2x_vf_mbx_resp(bp, vf); |
| 878 | } |
| 879 | |
Ariel Elior | fd1fc79 | 2013-01-01 05:22:33 +0000 | [diff] [blame] | 880 | /* dispatch request */ |
| 881 | static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, |
| 882 | struct bnx2x_vf_mbx *mbx) |
| 883 | { |
| 884 | int i; |
| 885 | |
| 886 | /* check if tlv type is known */ |
| 887 | if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) { |
Ariel Elior | 8ca5e17 | 2013-01-01 05:22:34 +0000 | [diff] [blame] | 888 | /* Lock the per vf op mutex and note the locker's identity. |
| 889 | * The unlock will take place in mbx response. |
| 890 | */ |
| 891 | bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); |
| 892 | |
Ariel Elior | fd1fc79 | 2013-01-01 05:22:33 +0000 | [diff] [blame] | 893 | /* switch on the opcode */ |
| 894 | switch (mbx->first_tlv.tl.type) { |
Ariel Elior | 8ca5e17 | 2013-01-01 05:22:34 +0000 | [diff] [blame] | 895 | case CHANNEL_TLV_ACQUIRE: |
| 896 | bnx2x_vf_mbx_acquire(bp, vf, mbx); |
| 897 | break; |
Ariel Elior | b93288d | 2013-01-01 05:22:35 +0000 | [diff] [blame] | 898 | case CHANNEL_TLV_INIT: |
| 899 | bnx2x_vf_mbx_init_vf(bp, vf, mbx); |
| 900 | break; |
Ariel Elior | 8db573b | 2013-01-01 05:22:37 +0000 | [diff] [blame] | 901 | case CHANNEL_TLV_SETUP_Q: |
| 902 | bnx2x_vf_mbx_setup_q(bp, vf, mbx); |
| 903 | break; |
Ariel Elior | 954ea74 | 2013-01-01 05:22:38 +0000 | [diff] [blame] | 904 | case CHANNEL_TLV_SET_Q_FILTERS: |
| 905 | bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); |
| 906 | break; |
Ariel Elior | 463a68a | 2013-01-01 05:22:39 +0000 | [diff] [blame] | 907 | case CHANNEL_TLV_TEARDOWN_Q: |
| 908 | bnx2x_vf_mbx_teardown_q(bp, vf, mbx); |
| 909 | break; |
Ariel Elior | 99e9d21 | 2013-01-01 05:22:40 +0000 | [diff] [blame] | 910 | case CHANNEL_TLV_CLOSE: |
| 911 | bnx2x_vf_mbx_close_vf(bp, vf, mbx); |
| 912 | break; |
Ariel Elior | f1929b0 | 2013-01-01 05:22:41 +0000 | [diff] [blame] | 913 | case CHANNEL_TLV_RELEASE: |
| 914 | bnx2x_vf_mbx_release_vf(bp, vf, mbx); |
| 915 | break; |
Ariel Elior | fd1fc79 | 2013-01-01 05:22:33 +0000 | [diff] [blame] | 916 | } |
Ariel Elior | 463a68a | 2013-01-01 05:22:39 +0000 | [diff] [blame] | 917 | |
Ariel Elior | fd1fc79 | 2013-01-01 05:22:33 +0000 | [diff] [blame] | 918 | } else { |
| 919 | /* unknown TLV - this may belong to a VF driver from the future |
| 920 | * - a version written after this PF driver was written, which |
| 921 | * supports features unknown as of yet. Too bad since we don't |
| 922 | * support them. Or this may be because someone wrote a crappy |
| 923 | * VF driver and is sending garbage over the channel. |
| 924 | */ |
| 925 | BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", |
| 926 | mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); |
| 927 | for (i = 0; i < 20; i++) |
| 928 | DP_CONT(BNX2X_MSG_IOV, "%x ", |
| 929 | mbx->msg->req.tlv_buf_size.tlv_buffer[i]); |
Ariel Elior | 8ca5e17 | 2013-01-01 05:22:34 +0000 | [diff] [blame] | 930 | |
| 931 | /* test whether we can respond to the VF (do we have an address |
| 932 | * for it?) |
| 933 | */ |
| 934 | if (vf->state == VF_ACQUIRED) { |
| 935 | /* mbx_resp uses the op_rc of the VF */ |
| 936 | vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; |
| 937 | |
| 938 | /* notify the VF that we do not support this request */ |
| 939 | bnx2x_vf_mbx_resp(bp, vf); |
| 940 | } else { |
| 941 | /* can't send a response since this VF is unknown to us |
| 942 | * just unlock the channel and be done with. |
| 943 | */ |
| 944 | bnx2x_unlock_vf_pf_channel(bp, vf, |
| 945 | mbx->first_tlv.tl.type); |
| 946 | } |
Ariel Elior | fd1fc79 | 2013-01-01 05:22:33 +0000 | [diff] [blame] | 947 | } |
| 948 | } |
| 949 | |
| 950 | /* handle new vf-pf message */ |
| 951 | void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) |
| 952 | { |
| 953 | struct bnx2x_virtf *vf; |
| 954 | struct bnx2x_vf_mbx *mbx; |
| 955 | u8 vf_idx; |
| 956 | int rc; |
| 957 | |
| 958 | DP(BNX2X_MSG_IOV, |
| 959 | "vf pf event received: vfid %d, address_hi %x, address lo %x", |
| 960 | vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo); |
| 961 | /* Sanity checks consider removing later */ |
| 962 | |
| 963 | /* check if the vf_id is valid */ |
| 964 | if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf > |
| 965 | BNX2X_NR_VIRTFN(bp)) { |
| 966 | BNX2X_ERR("Illegal vf_id %d max allowed: %d\n", |
| 967 | vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); |
| 968 | goto mbx_done; |
| 969 | } |
| 970 | vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); |
| 971 | mbx = BP_VF_MBX(bp, vf_idx); |
| 972 | |
| 973 | /* verify an event is not currently being processed - |
| 974 | * debug failsafe only |
| 975 | */ |
| 976 | if (mbx->flags & VF_MSG_INPROCESS) { |
| 977 | BNX2X_ERR("Previous message is still being processed, vf_id %d\n", |
| 978 | vfpf_event->vf_id); |
| 979 | goto mbx_done; |
| 980 | } |
| 981 | vf = BP_VF(bp, vf_idx); |
| 982 | |
| 983 | /* save the VF message address */ |
| 984 | mbx->vf_addr_hi = vfpf_event->msg_addr_hi; |
| 985 | mbx->vf_addr_lo = vfpf_event->msg_addr_lo; |
| 986 | DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", |
| 987 | mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); |
| 988 | |
| 989 | /* dmae to get the VF request */ |
| 990 | rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid, |
| 991 | mbx->vf_addr_hi, mbx->vf_addr_lo, |
| 992 | sizeof(union vfpf_tlvs)/4); |
| 993 | if (rc) { |
| 994 | BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid); |
| 995 | goto mbx_error; |
| 996 | } |
| 997 | |
| 998 | /* process the VF message header */ |
| 999 | mbx->first_tlv = mbx->msg->req.first_tlv; |
| 1000 | |
| 1001 | /* dispatch the request (will prepare the response) */ |
| 1002 | bnx2x_vf_mbx_request(bp, vf, mbx); |
| 1003 | goto mbx_done; |
| 1004 | |
| 1005 | mbx_error: |
Ariel Elior | f1929b0 | 2013-01-01 05:22:41 +0000 | [diff] [blame] | 1006 | bnx2x_vf_release(bp, vf, false); /* non blocking */ |
Ariel Elior | fd1fc79 | 2013-01-01 05:22:33 +0000 | [diff] [blame] | 1007 | mbx_done: |
| 1008 | return; |
| 1009 | } |
Ariel Elior | abc5a02 | 2013-01-01 05:22:43 +0000 | [diff] [blame] | 1010 | |
| 1011 | /* propagate local bulletin board to vf */ |
| 1012 | int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf) |
| 1013 | { |
| 1014 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf); |
| 1015 | dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping + |
| 1016 | vf * BULLETIN_CONTENT_SIZE; |
| 1017 | dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map); |
| 1018 | u32 len = BULLETIN_CONTENT_SIZE; |
| 1019 | int rc; |
| 1020 | |
| 1021 | /* can only update vf after init took place */ |
| 1022 | if (bnx2x_vf(bp, vf, state) != VF_ENABLED && |
| 1023 | bnx2x_vf(bp, vf, state) != VF_ACQUIRED) |
| 1024 | return 0; |
| 1025 | |
| 1026 | /* increment bulletin board version and compute crc */ |
| 1027 | bulletin->version++; |
| 1028 | bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin); |
| 1029 | |
| 1030 | /* propagate bulletin board via dmae to vm memory */ |
| 1031 | rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, |
| 1032 | bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr), |
| 1033 | U64_LO(vf_addr), len/4); |
| 1034 | return rc; |
| 1035 | } |