blob: f3f742a4e59a8d5f3b2ad9db501ec7f11f3644e3 [file] [log] [blame]
Yuval Mintz32a47e72016-05-11 16:36:12 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
Yuval Mintzdacd88d2016-05-11 16:36:16 +03009#include <linux/etherdevice.h>
Yuval Mintz36558c32016-05-11 16:36:17 +030010#include <linux/crc32.h>
Yuval Mintz0b55e272016-05-11 16:36:15 +030011#include <linux/qed/qed_iov_if.h>
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030012#include "qed_cxt.h"
13#include "qed_hsi.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030014#include "qed_hw.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030015#include "qed_init_ops.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030016#include "qed_int.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030017#include "qed_mcp.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030018#include "qed_reg_addr.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030019#include "qed_sp.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030020#include "qed_sriov.h"
21#include "qed_vf.h"
22
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030023/* IOV ramrods */
Yuval Mintz1fe614d2016-06-05 13:11:11 +030024static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030025{
26 struct vf_start_ramrod_data *p_ramrod = NULL;
27 struct qed_spq_entry *p_ent = NULL;
28 struct qed_sp_init_data init_data;
29 int rc = -EINVAL;
Yuval Mintz1fe614d2016-06-05 13:11:11 +030030 u8 fp_minor;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030031
32 /* Get SPQ entry */
33 memset(&init_data, 0, sizeof(init_data));
34 init_data.cid = qed_spq_get_cid(p_hwfn);
Yuval Mintz1fe614d2016-06-05 13:11:11 +030035 init_data.opaque_fid = p_vf->opaque_fid;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030036 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
37
38 rc = qed_sp_init_request(p_hwfn, &p_ent,
39 COMMON_RAMROD_VF_START,
40 PROTOCOLID_COMMON, &init_data);
41 if (rc)
42 return rc;
43
44 p_ramrod = &p_ent->ramrod.vf_start;
45
Yuval Mintz1fe614d2016-06-05 13:11:11 +030046 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
47 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030048
Yuval Mintz1fe614d2016-06-05 13:11:11 +030049 switch (p_hwfn->hw_info.personality) {
50 case QED_PCI_ETH:
51 p_ramrod->personality = PERSONALITY_ETH;
52 break;
53 case QED_PCI_ETH_ROCE:
54 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
55 break;
56 default:
57 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
58 p_hwfn->hw_info.personality);
59 return -EINVAL;
60 }
61
62 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
Yuval Mintza044df82016-08-22 13:25:09 +030063 if (fp_minor > ETH_HSI_VER_MINOR &&
64 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
Yuval Mintz1fe614d2016-06-05 13:11:11 +030065 DP_VERBOSE(p_hwfn,
66 QED_MSG_IOV,
67 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
68 p_vf->abs_vf_id,
69 ETH_HSI_VER_MAJOR,
70 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
71 fp_minor = ETH_HSI_VER_MINOR;
72 }
73
Yuval Mintz351a4ded2016-06-02 10:23:29 +030074 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
Yuval Mintz1fe614d2016-06-05 13:11:11 +030075 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
76
77 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
78 "VF[%d] - Starting using HSI %02x.%02x\n",
79 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030080
81 return qed_spq_post(p_hwfn, p_ent, NULL);
82}
83
Yuval Mintz0b55e272016-05-11 16:36:15 +030084static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
85 u32 concrete_vfid, u16 opaque_vfid)
86{
87 struct vf_stop_ramrod_data *p_ramrod = NULL;
88 struct qed_spq_entry *p_ent = NULL;
89 struct qed_sp_init_data init_data;
90 int rc = -EINVAL;
91
92 /* Get SPQ entry */
93 memset(&init_data, 0, sizeof(init_data));
94 init_data.cid = qed_spq_get_cid(p_hwfn);
95 init_data.opaque_fid = opaque_vfid;
96 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
97
98 rc = qed_sp_init_request(p_hwfn, &p_ent,
99 COMMON_RAMROD_VF_STOP,
100 PROTOCOLID_COMMON, &init_data);
101 if (rc)
102 return rc;
103
104 p_ramrod = &p_ent->ramrod.vf_stop;
105
106 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
107
108 return qed_spq_post(p_hwfn, p_ent, NULL);
109}
110
Baoyou Xieba569472016-09-09 09:21:15 +0800111static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400112 int rel_vf_id,
113 bool b_enabled_only, bool b_non_malicious)
Yuval Mintz32a47e72016-05-11 16:36:12 +0300114{
115 if (!p_hwfn->pf_iov_info) {
116 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
117 return false;
118 }
119
120 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
121 (rel_vf_id < 0))
122 return false;
123
124 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
125 b_enabled_only)
126 return false;
127
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400128 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
129 b_non_malicious)
130 return false;
131
Yuval Mintz32a47e72016-05-11 16:36:12 +0300132 return true;
133}
134
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300135static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
136 u16 relative_vf_id,
137 bool b_enabled_only)
138{
139 struct qed_vf_info *vf = NULL;
140
141 if (!p_hwfn->pf_iov_info) {
142 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
143 return NULL;
144 }
145
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400146 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
147 b_enabled_only, false))
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300148 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
149 else
150 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
151 relative_vf_id);
152
153 return vf;
154}
155
Yuval Mintz41086462016-06-05 13:11:13 +0300156static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
157 struct qed_vf_info *p_vf, u16 rx_qid)
158{
159 if (rx_qid >= p_vf->num_rxqs)
160 DP_VERBOSE(p_hwfn,
161 QED_MSG_IOV,
162 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
163 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
164 return rx_qid < p_vf->num_rxqs;
165}
166
167static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
168 struct qed_vf_info *p_vf, u16 tx_qid)
169{
170 if (tx_qid >= p_vf->num_txqs)
171 DP_VERBOSE(p_hwfn,
172 QED_MSG_IOV,
173 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
174 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
175 return tx_qid < p_vf->num_txqs;
176}
177
178static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
179 struct qed_vf_info *p_vf, u16 sb_idx)
180{
181 int i;
182
183 for (i = 0; i < p_vf->num_sbs; i++)
184 if (p_vf->igu_sbs[i] == sb_idx)
185 return true;
186
187 DP_VERBOSE(p_hwfn,
188 QED_MSG_IOV,
189 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
190 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
191
192 return false;
193}
194
Baoyou Xieba569472016-09-09 09:21:15 +0800195static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
196 int vfid, struct qed_ptt *p_ptt)
Yuval Mintz36558c32016-05-11 16:36:17 +0300197{
198 struct qed_bulletin_content *p_bulletin;
199 int crc_size = sizeof(p_bulletin->crc);
200 struct qed_dmae_params params;
201 struct qed_vf_info *p_vf;
202
203 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
204 if (!p_vf)
205 return -EINVAL;
206
207 if (!p_vf->vf_bulletin)
208 return -EINVAL;
209
210 p_bulletin = p_vf->bulletin.p_virt;
211
212 /* Increment bulletin board version and compute crc */
213 p_bulletin->version++;
214 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
215 p_vf->bulletin.size - crc_size);
216
217 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
218 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
219 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
220
221 /* propagate bulletin board via dmae to vm memory */
222 memset(&params, 0, sizeof(params));
223 params.flags = QED_DMAE_FLAG_VF_DST;
224 params.dst_vfid = p_vf->abs_vf_id;
225 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
226 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
227 &params);
228}
229
Yuval Mintz32a47e72016-05-11 16:36:12 +0300230static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
231{
232 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
233 int pos = iov->pos;
234
235 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
236 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
237
238 pci_read_config_word(cdev->pdev,
239 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
240 pci_read_config_word(cdev->pdev,
241 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
242
243 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
244 if (iov->num_vfs) {
245 DP_VERBOSE(cdev,
246 QED_MSG_IOV,
247 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
248 iov->num_vfs = 0;
249 }
250
251 pci_read_config_word(cdev->pdev,
252 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
253
254 pci_read_config_word(cdev->pdev,
255 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
256
257 pci_read_config_word(cdev->pdev,
258 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
259
260 pci_read_config_dword(cdev->pdev,
261 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
262
263 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
264
265 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
266
267 DP_VERBOSE(cdev,
268 QED_MSG_IOV,
269 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
270 iov->nres,
271 iov->cap,
272 iov->ctrl,
273 iov->total_vfs,
274 iov->initial_vfs,
275 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
276
277 /* Some sanity checks */
278 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
279 iov->total_vfs > NUM_OF_VFS(cdev)) {
280 /* This can happen only due to a bug. In this case we set
281 * num_vfs to zero to avoid memory corruption in the code that
282 * assumes max number of vfs
283 */
284 DP_NOTICE(cdev,
285 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
286 iov->num_vfs);
287
288 iov->num_vfs = 0;
289 iov->total_vfs = 0;
290 }
291
292 return 0;
293}
294
295static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
296 struct qed_ptt *p_ptt)
297{
298 struct qed_igu_block *p_sb;
299 u16 sb_id;
300 u32 val;
301
302 if (!p_hwfn->hw_info.p_igu_info) {
303 DP_ERR(p_hwfn,
304 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
305 return;
306 }
307
308 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
309 sb_id++) {
310 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
311 if ((p_sb->status & QED_IGU_STATUS_FREE) &&
312 !(p_sb->status & QED_IGU_STATUS_PF)) {
313 val = qed_rd(p_hwfn, p_ptt,
314 IGU_REG_MAPPING_MEMORY + sb_id * 4);
315 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
316 qed_wr(p_hwfn, p_ptt,
317 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
318 }
319 }
320}
321
322static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
323{
324 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
325 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
326 struct qed_bulletin_content *p_bulletin_virt;
327 dma_addr_t req_p, rply_p, bulletin_p;
328 union pfvf_tlvs *p_reply_virt_addr;
329 union vfpf_tlvs *p_req_virt_addr;
330 u8 idx = 0;
331
332 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
333
334 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
335 req_p = p_iov_info->mbx_msg_phys_addr;
336 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
337 rply_p = p_iov_info->mbx_reply_phys_addr;
338 p_bulletin_virt = p_iov_info->p_bulletins;
339 bulletin_p = p_iov_info->bulletins_phys;
340 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
341 DP_ERR(p_hwfn,
342 "qed_iov_setup_vfdb called without allocating mem first\n");
343 return;
344 }
345
346 for (idx = 0; idx < p_iov->total_vfs; idx++) {
347 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
348 u32 concrete;
349
350 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
351 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
352 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
353 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
354
355 vf->state = VF_STOPPED;
356 vf->b_init = false;
357
358 vf->bulletin.phys = idx *
359 sizeof(struct qed_bulletin_content) +
360 bulletin_p;
361 vf->bulletin.p_virt = p_bulletin_virt + idx;
362 vf->bulletin.size = sizeof(struct qed_bulletin_content);
363
364 vf->relative_vf_id = idx;
365 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
366 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
367 vf->concrete_fid = concrete;
368 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
369 (vf->abs_vf_id << 8);
370 vf->vport_id = idx + 1;
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +0300371
372 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
373 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300374 }
375}
376
377static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
378{
379 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
380 void **p_v_addr;
381 u16 num_vfs = 0;
382
383 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
384
385 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
386 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
387
388 /* Allocate PF Mailbox buffer (per-VF) */
389 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
390 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
391 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
392 p_iov_info->mbx_msg_size,
393 &p_iov_info->mbx_msg_phys_addr,
394 GFP_KERNEL);
395 if (!*p_v_addr)
396 return -ENOMEM;
397
398 /* Allocate PF Mailbox Reply buffer (per-VF) */
399 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
400 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
401 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
402 p_iov_info->mbx_reply_size,
403 &p_iov_info->mbx_reply_phys_addr,
404 GFP_KERNEL);
405 if (!*p_v_addr)
406 return -ENOMEM;
407
408 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
409 num_vfs;
410 p_v_addr = &p_iov_info->p_bulletins;
411 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
412 p_iov_info->bulletins_size,
413 &p_iov_info->bulletins_phys,
414 GFP_KERNEL);
415 if (!*p_v_addr)
416 return -ENOMEM;
417
418 DP_VERBOSE(p_hwfn,
419 QED_MSG_IOV,
420 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
421 p_iov_info->mbx_msg_virt_addr,
422 (u64) p_iov_info->mbx_msg_phys_addr,
423 p_iov_info->mbx_reply_virt_addr,
424 (u64) p_iov_info->mbx_reply_phys_addr,
425 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
426
427 return 0;
428}
429
430static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
431{
432 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
433
434 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
435 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
436 p_iov_info->mbx_msg_size,
437 p_iov_info->mbx_msg_virt_addr,
438 p_iov_info->mbx_msg_phys_addr);
439
440 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
441 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
442 p_iov_info->mbx_reply_size,
443 p_iov_info->mbx_reply_virt_addr,
444 p_iov_info->mbx_reply_phys_addr);
445
446 if (p_iov_info->p_bulletins)
447 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
448 p_iov_info->bulletins_size,
449 p_iov_info->p_bulletins,
450 p_iov_info->bulletins_phys);
451}
452
453int qed_iov_alloc(struct qed_hwfn *p_hwfn)
454{
455 struct qed_pf_iov *p_sriov;
456
457 if (!IS_PF_SRIOV(p_hwfn)) {
458 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
459 "No SR-IOV - no need for IOV db\n");
460 return 0;
461 }
462
463 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700464 if (!p_sriov)
Yuval Mintz32a47e72016-05-11 16:36:12 +0300465 return -ENOMEM;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300466
467 p_hwfn->pf_iov_info = p_sriov;
468
469 return qed_iov_allocate_vfdb(p_hwfn);
470}
471
472void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
473{
474 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
475 return;
476
477 qed_iov_setup_vfdb(p_hwfn);
478 qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
479}
480
481void qed_iov_free(struct qed_hwfn *p_hwfn)
482{
483 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
484 qed_iov_free_vfdb(p_hwfn);
485 kfree(p_hwfn->pf_iov_info);
486 }
487}
488
489void qed_iov_free_hw_info(struct qed_dev *cdev)
490{
491 kfree(cdev->p_iov_info);
492 cdev->p_iov_info = NULL;
493}
494
495int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
496{
497 struct qed_dev *cdev = p_hwfn->cdev;
498 int pos;
499 int rc;
500
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300501 if (IS_VF(p_hwfn->cdev))
502 return 0;
503
Yuval Mintz32a47e72016-05-11 16:36:12 +0300504 /* Learn the PCI configuration */
505 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
506 PCI_EXT_CAP_ID_SRIOV);
507 if (!pos) {
508 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
509 return 0;
510 }
511
512 /* Allocate a new struct for IOV information */
513 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700514 if (!cdev->p_iov_info)
Yuval Mintz32a47e72016-05-11 16:36:12 +0300515 return -ENOMEM;
Joe Perches2591c282016-09-04 14:24:03 -0700516
Yuval Mintz32a47e72016-05-11 16:36:12 +0300517 cdev->p_iov_info->pos = pos;
518
519 rc = qed_iov_pci_cfg_info(cdev);
520 if (rc)
521 return rc;
522
523 /* We want PF IOV to be synonemous with the existance of p_iov_info;
524 * In case the capability is published but there are no VFs, simply
525 * de-allocate the struct.
526 */
527 if (!cdev->p_iov_info->total_vfs) {
528 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
529 "IOV capabilities, but no VFs are published\n");
530 kfree(cdev->p_iov_info);
531 cdev->p_iov_info = NULL;
532 return 0;
533 }
534
535 /* Calculate the first VF index - this is a bit tricky; Basically,
536 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
537 * after the first engine's VFs.
538 */
539 cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
540 p_hwfn->abs_pf_id - 16;
541 if (QED_PATH_ID(p_hwfn))
542 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
543
544 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
545 "First VF in hwfn 0x%08x\n",
546 cdev->p_iov_info->first_vf_in_pf);
547
548 return 0;
549}
550
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400551bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
552 int vfid, bool b_fail_malicious)
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300553{
554 /* Check PF supports sriov */
Yuval Mintzb0409fa2016-05-15 14:48:05 +0300555 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
556 !IS_PF_SRIOV_ALLOC(p_hwfn))
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300557 return false;
558
559 /* Check VF validity */
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400560 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300561 return false;
562
563 return true;
564}
565
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400566bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
567{
568 return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
569}
570
Yuval Mintz0b55e272016-05-11 16:36:15 +0300571static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
572 u16 rel_vf_id, u8 to_disable)
573{
574 struct qed_vf_info *vf;
575 int i;
576
577 for_each_hwfn(cdev, i) {
578 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
579
580 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
581 if (!vf)
582 continue;
583
584 vf->to_disable = to_disable;
585 }
586}
587
Baoyou Xieba569472016-09-09 09:21:15 +0800588static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
Yuval Mintz0b55e272016-05-11 16:36:15 +0300589{
590 u16 i;
591
592 if (!IS_QED_SRIOV(cdev))
593 return;
594
595 for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
596 qed_iov_set_vf_to_disable(cdev, i, to_disable);
597}
598
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300599static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
600 struct qed_ptt *p_ptt, u8 abs_vfid)
601{
602 qed_wr(p_hwfn, p_ptt,
603 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
604 1 << (abs_vfid & 0x1f));
605}
606
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300607static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
608 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
609{
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300610 int i;
611
612 /* Set VF masks and configuration - pretend */
613 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
614
615 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
616
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300617 /* unpretend */
618 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
619
620 /* iterate over all queues, clear sb consumer */
Yuval Mintzb2b897e2016-05-15 14:48:06 +0300621 for (i = 0; i < vf->num_sbs; i++)
622 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
623 vf->igu_sbs[i],
624 vf->opaque_fid, true);
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300625}
626
Yuval Mintz0b55e272016-05-11 16:36:15 +0300627static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
628 struct qed_ptt *p_ptt,
629 struct qed_vf_info *vf, bool enable)
630{
631 u32 igu_vf_conf;
632
633 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
634
635 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
636
637 if (enable)
638 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
639 else
640 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
641
642 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
643
644 /* unpretend */
645 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
646}
647
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300648static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
649 struct qed_ptt *p_ptt,
650 struct qed_vf_info *vf)
651{
652 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
653 int rc;
654
Yuval Mintz0b55e272016-05-11 16:36:15 +0300655 if (vf->to_disable)
656 return 0;
657
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300658 DP_VERBOSE(p_hwfn,
659 QED_MSG_IOV,
660 "Enable internal access for vf %x [abs %x]\n",
661 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
662
663 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
664
Yuval Mintzb2b897e2016-05-15 14:48:06 +0300665 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
666
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400667 /* It's possible VF was previously considered malicious */
668 vf->b_malicious = false;
669
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300670 rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
671 if (rc)
672 return rc;
673
674 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
675
676 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
677 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
678
679 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
680 p_hwfn->hw_info.hw_mode);
681
682 /* unpretend */
683 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
684
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300685 vf->state = VF_FREE;
686
687 return rc;
688}
689
Yuval Mintz0b55e272016-05-11 16:36:15 +0300690/**
691 * @brief qed_iov_config_perm_table - configure the permission
692 * zone table.
693 * In E4, queue zone permission table size is 320x9. There
694 * are 320 VF queues for single engine device (256 for dual
695 * engine device), and each entry has the following format:
696 * {Valid, VF[7:0]}
697 * @param p_hwfn
698 * @param p_ptt
699 * @param vf
700 * @param enable
701 */
702static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
703 struct qed_ptt *p_ptt,
704 struct qed_vf_info *vf, u8 enable)
705{
706 u32 reg_addr, val;
707 u16 qzone_id = 0;
708 int qid;
709
710 for (qid = 0; qid < vf->num_rxqs; qid++) {
711 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
712 &qzone_id);
713
714 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
Yuval Mintz1a635e42016-08-15 10:42:43 +0300715 val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300716 qed_wr(p_hwfn, p_ptt, reg_addr, val);
717 }
718}
719
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300720static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
721 struct qed_ptt *p_ptt,
722 struct qed_vf_info *vf)
723{
724 /* Reset vf in IGU - interrupts are still disabled */
725 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
726
727 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
728
729 /* Permission Table */
730 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
731}
732
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300733static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
734 struct qed_ptt *p_ptt,
735 struct qed_vf_info *vf, u16 num_rx_queues)
736{
737 struct qed_igu_block *igu_blocks;
738 int qid = 0, igu_id = 0;
739 u32 val = 0;
740
741 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
742
743 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
744 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
745 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
746
747 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
748 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
749 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
750
751 while ((qid < num_rx_queues) &&
752 (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
753 if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
754 struct cau_sb_entry sb_entry;
755
756 vf->igu_sbs[qid] = (u16)igu_id;
757 igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
758
759 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
760
761 qed_wr(p_hwfn, p_ptt,
762 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
763 val);
764
765 /* Configure igu sb in CAU which were marked valid */
766 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
767 p_hwfn->rel_pf_id,
768 vf->abs_vf_id, 1);
769 qed_dmae_host2grc(p_hwfn, p_ptt,
770 (u64)(uintptr_t)&sb_entry,
771 CAU_REG_SB_VAR_MEMORY +
772 igu_id * sizeof(u64), 2, 0);
773 qid++;
774 }
775 igu_id++;
776 }
777
778 vf->num_sbs = (u8) num_rx_queues;
779
780 return vf->num_sbs;
781}
782
Yuval Mintz0b55e272016-05-11 16:36:15 +0300783static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
784 struct qed_ptt *p_ptt,
785 struct qed_vf_info *vf)
786{
787 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
788 int idx, igu_id;
789 u32 addr, val;
790
791 /* Invalidate igu CAM lines and mark them as free */
792 for (idx = 0; idx < vf->num_sbs; idx++) {
793 igu_id = vf->igu_sbs[idx];
794 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
795
796 val = qed_rd(p_hwfn, p_ptt, addr);
797 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
798 qed_wr(p_hwfn, p_ptt, addr, val);
799
800 p_info->igu_map.igu_blocks[igu_id].status |=
801 QED_IGU_STATUS_FREE;
802
803 p_hwfn->hw_info.p_igu_info->free_blks++;
804 }
805
806 vf->num_sbs = 0;
807}
808
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300809static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
810 struct qed_ptt *p_ptt,
811 u16 rel_vf_id, u16 num_rx_queues)
812{
813 u8 num_of_vf_avaiable_chains = 0;
814 struct qed_vf_info *vf = NULL;
815 int rc = 0;
816 u32 cids;
817 u8 i;
818
819 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
820 if (!vf) {
821 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
822 return -EINVAL;
823 }
824
825 if (vf->b_init) {
826 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
827 return -EINVAL;
828 }
829
830 /* Limit number of queues according to number of CIDs */
831 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
832 DP_VERBOSE(p_hwfn,
833 QED_MSG_IOV,
834 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
835 vf->relative_vf_id, num_rx_queues, (u16) cids);
836 num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
837
838 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
839 p_ptt,
840 vf,
841 num_rx_queues);
842 if (!num_of_vf_avaiable_chains) {
843 DP_ERR(p_hwfn, "no available igu sbs\n");
844 return -ENOMEM;
845 }
846
847 /* Choose queue number and index ranges */
848 vf->num_rxqs = num_of_vf_avaiable_chains;
849 vf->num_txqs = num_of_vf_avaiable_chains;
850
851 for (i = 0; i < vf->num_rxqs; i++) {
852 u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
853 vf->igu_sbs[i]);
854
855 if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
856 DP_NOTICE(p_hwfn,
857 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
858 vf->relative_vf_id, queue_id);
859 return -EINVAL;
860 }
861
862 /* CIDs are per-VF, so no problem having them 0-based. */
863 vf->vf_queues[i].fw_rx_qid = queue_id;
864 vf->vf_queues[i].fw_tx_qid = queue_id;
865 vf->vf_queues[i].fw_cid = i;
866
867 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
868 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
869 vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
870 }
871 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
872 if (!rc) {
873 vf->b_init = true;
874
875 if (IS_LEAD_HWFN(p_hwfn))
876 p_hwfn->cdev->p_iov_info->num_vfs++;
877 }
878
879 return rc;
880}
881
Manish Chopra079d20a2016-05-15 14:48:07 +0300882static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
883 u16 vfid,
884 struct qed_mcp_link_params *params,
885 struct qed_mcp_link_state *link,
886 struct qed_mcp_link_capabilities *p_caps)
887{
888 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
889 vfid,
890 false);
891 struct qed_bulletin_content *p_bulletin;
892
893 if (!p_vf)
894 return;
895
896 p_bulletin = p_vf->bulletin.p_virt;
897 p_bulletin->req_autoneg = params->speed.autoneg;
898 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
899 p_bulletin->req_forced_speed = params->speed.forced_speed;
900 p_bulletin->req_autoneg_pause = params->pause.autoneg;
901 p_bulletin->req_forced_rx = params->pause.forced_rx;
902 p_bulletin->req_forced_tx = params->pause.forced_tx;
903 p_bulletin->req_loopback = params->loopback_mode;
904
905 p_bulletin->link_up = link->link_up;
906 p_bulletin->speed = link->speed;
907 p_bulletin->full_duplex = link->full_duplex;
908 p_bulletin->autoneg = link->an;
909 p_bulletin->autoneg_complete = link->an_complete;
910 p_bulletin->parallel_detection = link->parallel_detection;
911 p_bulletin->pfc_enabled = link->pfc_enabled;
912 p_bulletin->partner_adv_speed = link->partner_adv_speed;
913 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
914 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
915 p_bulletin->partner_adv_pause = link->partner_adv_pause;
916 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
917
918 p_bulletin->capability_speed = p_caps->speed_capabilities;
919}
920
Yuval Mintz0b55e272016-05-11 16:36:15 +0300921static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
922 struct qed_ptt *p_ptt, u16 rel_vf_id)
923{
Manish Chopra079d20a2016-05-15 14:48:07 +0300924 struct qed_mcp_link_capabilities caps;
925 struct qed_mcp_link_params params;
926 struct qed_mcp_link_state link;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300927 struct qed_vf_info *vf = NULL;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300928
929 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
930 if (!vf) {
931 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
932 return -EINVAL;
933 }
934
Yuval Mintz36558c32016-05-11 16:36:17 +0300935 if (vf->bulletin.p_virt)
936 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
937
938 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
939
Manish Chopra079d20a2016-05-15 14:48:07 +0300940 /* Get the link configuration back in bulletin so
941 * that when VFs are re-enabled they get the actual
942 * link configuration.
943 */
944 memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
945 memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
946 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
947 qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
948
Yuval Mintz1fe614d2016-06-05 13:11:11 +0300949 /* Forget the VF's acquisition message */
950 memset(&vf->acquire, 0, sizeof(vf->acquire));
Yuval Mintz0b55e272016-05-11 16:36:15 +0300951
952 /* disablng interrupts and resetting permission table was done during
953 * vf-close, however, we could get here without going through vf_close
954 */
955 /* Disable Interrupts for VF */
956 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
957
958 /* Reset Permission table */
959 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
960
961 vf->num_rxqs = 0;
962 vf->num_txqs = 0;
963 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
964
965 if (vf->b_init) {
966 vf->b_init = false;
967
968 if (IS_LEAD_HWFN(p_hwfn))
969 p_hwfn->cdev->p_iov_info->num_vfs--;
970 }
971
972 return 0;
973}
974
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300975static bool qed_iov_tlv_supported(u16 tlvtype)
976{
977 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
978}
979
980/* place a given tlv on the tlv buffer, continuing current tlv list */
981void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
982{
983 struct channel_tlv *tl = (struct channel_tlv *)*offset;
984
985 tl->type = type;
986 tl->length = length;
987
988 /* Offset should keep pointing to next TLV (the end of the last) */
989 *offset += length;
990
991 /* Return a pointer to the start of the added tlv */
992 return *offset - length;
993}
994
995/* list the types and lengths of the tlvs on the buffer */
996void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
997{
998 u16 i = 1, total_length = 0;
999 struct channel_tlv *tlv;
1000
1001 do {
1002 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1003
1004 /* output tlv */
1005 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1006 "TLV number %d: type %d, length %d\n",
1007 i, tlv->type, tlv->length);
1008
1009 if (tlv->type == CHANNEL_TLV_LIST_END)
1010 return;
1011
1012 /* Validate entry - protect against malicious VFs */
1013 if (!tlv->length) {
1014 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1015 return;
1016 }
1017
1018 total_length += tlv->length;
1019
1020 if (total_length >= sizeof(struct tlv_buffer_size)) {
1021 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1022 return;
1023 }
1024
1025 i++;
1026 } while (1);
1027}
1028
1029static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1030 struct qed_ptt *p_ptt,
1031 struct qed_vf_info *p_vf,
1032 u16 length, u8 status)
1033{
1034 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1035 struct qed_dmae_params params;
1036 u8 eng_vf_id;
1037
1038 mbx->reply_virt->default_resp.hdr.status = status;
1039
1040 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1041
1042 eng_vf_id = p_vf->abs_vf_id;
1043
1044 memset(&params, 0, sizeof(struct qed_dmae_params));
1045 params.flags = QED_DMAE_FLAG_VF_DST;
1046 params.dst_vfid = eng_vf_id;
1047
1048 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1049 mbx->req_virt->first_tlv.reply_address +
1050 sizeof(u64),
1051 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1052 &params);
1053
1054 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1055 mbx->req_virt->first_tlv.reply_address,
1056 sizeof(u64) / 4, &params);
1057
1058 REG_WR(p_hwfn,
1059 GTT_BAR0_MAP_REG_USDM_RAM +
1060 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1061}
1062
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001063static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1064 enum qed_iov_vport_update_flag flag)
1065{
1066 switch (flag) {
1067 case QED_IOV_VP_UPDATE_ACTIVATE:
1068 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
Yuval Mintz17b235c2016-05-11 16:36:18 +03001069 case QED_IOV_VP_UPDATE_VLAN_STRIP:
1070 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1071 case QED_IOV_VP_UPDATE_TX_SWITCH:
1072 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001073 case QED_IOV_VP_UPDATE_MCAST:
1074 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1075 case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1076 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1077 case QED_IOV_VP_UPDATE_RSS:
1078 return CHANNEL_TLV_VPORT_UPDATE_RSS;
Yuval Mintz17b235c2016-05-11 16:36:18 +03001079 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1080 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1081 case QED_IOV_VP_UPDATE_SGE_TPA:
1082 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001083 default:
1084 return 0;
1085 }
1086}
1087
1088static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1089 struct qed_vf_info *p_vf,
1090 struct qed_iov_vf_mbx *p_mbx,
1091 u8 status,
1092 u16 tlvs_mask, u16 tlvs_accepted)
1093{
1094 struct pfvf_def_resp_tlv *resp;
1095 u16 size, total_len, i;
1096
1097 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1098 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1099 size = sizeof(struct pfvf_def_resp_tlv);
1100 total_len = size;
1101
1102 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1103
1104 /* Prepare response for all extended tlvs if they are found by PF */
1105 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
Yuval Mintz1a635e42016-08-15 10:42:43 +03001106 if (!(tlvs_mask & BIT(i)))
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001107 continue;
1108
1109 resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1110 qed_iov_vport_to_tlv(p_hwfn, i), size);
1111
Yuval Mintz1a635e42016-08-15 10:42:43 +03001112 if (tlvs_accepted & BIT(i))
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001113 resp->hdr.status = status;
1114 else
1115 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1116
1117 DP_VERBOSE(p_hwfn,
1118 QED_MSG_IOV,
1119 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1120 p_vf->relative_vf_id,
1121 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1122
1123 total_len += size;
1124 }
1125
1126 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1127 sizeof(struct channel_list_end_tlv));
1128
1129 return total_len;
1130}
1131
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001132static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1133 struct qed_ptt *p_ptt,
1134 struct qed_vf_info *vf_info,
1135 u16 type, u16 length, u8 status)
1136{
1137 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1138
1139 mbx->offset = (u8 *)mbx->reply_virt;
1140
1141 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1142 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1143 sizeof(struct channel_list_end_tlv));
1144
1145 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1146}
1147
Baoyou Xieba569472016-09-09 09:21:15 +08001148static struct
1149qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1150 u16 relative_vf_id,
1151 bool b_enabled_only)
Yuval Mintz0b55e272016-05-11 16:36:15 +03001152{
1153 struct qed_vf_info *vf = NULL;
1154
1155 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1156 if (!vf)
1157 return NULL;
1158
1159 return &vf->p_vf_info;
1160}
1161
Baoyou Xieba569472016-09-09 09:21:15 +08001162static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
Yuval Mintz0b55e272016-05-11 16:36:15 +03001163{
1164 struct qed_public_vf_info *vf_info;
1165
1166 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1167
1168 if (!vf_info)
1169 return;
1170
1171 /* Clear the VF mac */
1172 memset(vf_info->mac, 0, ETH_ALEN);
1173}
1174
1175static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1176 struct qed_vf_info *p_vf)
1177{
1178 u32 i;
1179
1180 p_vf->vf_bulletin = 0;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001181 p_vf->vport_instance = 0;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001182 p_vf->configured_features = 0;
Yuval Mintz0b55e272016-05-11 16:36:15 +03001183
1184 /* If VF previously requested less resources, go back to default */
1185 p_vf->num_rxqs = p_vf->num_sbs;
1186 p_vf->num_txqs = p_vf->num_sbs;
1187
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001188 p_vf->num_active_rxqs = 0;
1189
Yuval Mintz0b55e272016-05-11 16:36:15 +03001190 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
1191 p_vf->vf_queues[i].rxq_active = 0;
1192
Yuval Mintz08feecd2016-05-11 16:36:20 +03001193 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001194 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
Yuval Mintz0b55e272016-05-11 16:36:15 +03001195 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1196}
1197
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001198static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1199 struct qed_ptt *p_ptt,
1200 struct qed_vf_info *p_vf,
1201 struct vf_pf_resc_request *p_req,
1202 struct pf_vf_resc *p_resp)
1203{
1204 int i;
1205
1206 /* Queue related information */
1207 p_resp->num_rxqs = p_vf->num_rxqs;
1208 p_resp->num_txqs = p_vf->num_txqs;
1209 p_resp->num_sbs = p_vf->num_sbs;
1210
1211 for (i = 0; i < p_resp->num_sbs; i++) {
1212 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1213 p_resp->hw_sbs[i].sb_qid = 0;
1214 }
1215
1216 /* These fields are filled for backward compatibility.
1217 * Unused by modern vfs.
1218 */
1219 for (i = 0; i < p_resp->num_rxqs; i++) {
1220 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1221 (u16 *)&p_resp->hw_qid[i]);
1222 p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1223 }
1224
1225 /* Filter related information */
1226 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1227 p_req->num_mac_filters);
1228 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1229 p_req->num_vlan_filters);
1230
1231 /* This isn't really needed/enforced, but some legacy VFs might depend
1232 * on the correct filling of this field.
1233 */
1234 p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1235
1236 /* Validate sufficient resources for VF */
1237 if (p_resp->num_rxqs < p_req->num_rxqs ||
1238 p_resp->num_txqs < p_req->num_txqs ||
1239 p_resp->num_sbs < p_req->num_sbs ||
1240 p_resp->num_mac_filters < p_req->num_mac_filters ||
1241 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1242 p_resp->num_mc_filters < p_req->num_mc_filters) {
1243 DP_VERBOSE(p_hwfn,
1244 QED_MSG_IOV,
1245 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
1246 p_vf->abs_vf_id,
1247 p_req->num_rxqs,
1248 p_resp->num_rxqs,
1249 p_req->num_rxqs,
1250 p_resp->num_txqs,
1251 p_req->num_sbs,
1252 p_resp->num_sbs,
1253 p_req->num_mac_filters,
1254 p_resp->num_mac_filters,
1255 p_req->num_vlan_filters,
1256 p_resp->num_vlan_filters,
1257 p_req->num_mc_filters, p_resp->num_mc_filters);
Yuval Mintza044df82016-08-22 13:25:09 +03001258
1259 /* Some legacy OSes are incapable of correctly handling this
1260 * failure.
1261 */
1262 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1263 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1264 (p_vf->acquire.vfdev_info.os_type ==
1265 VFPF_ACQUIRE_OS_WINDOWS))
1266 return PFVF_STATUS_SUCCESS;
1267
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001268 return PFVF_STATUS_NO_RESOURCE;
1269 }
1270
1271 return PFVF_STATUS_SUCCESS;
1272}
1273
1274static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1275 struct pfvf_stats_info *p_stats)
1276{
1277 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1278 offsetof(struct mstorm_vf_zone,
1279 non_trigger.eth_queue_stat);
1280 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1281 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1282 offsetof(struct ustorm_vf_zone,
1283 non_trigger.eth_queue_stat);
1284 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1285 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1286 offsetof(struct pstorm_vf_zone,
1287 non_trigger.eth_queue_stat);
1288 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1289 p_stats->tstats.address = 0;
1290 p_stats->tstats.len = 0;
1291}
1292
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001293static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1294 struct qed_ptt *p_ptt,
1295 struct qed_vf_info *vf)
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001296{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001297 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1298 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1299 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1300 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001301 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001302 struct pf_vf_resc *resc = &resp->resc;
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001303 int rc;
1304
1305 memset(resp, 0, sizeof(*resp));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001306
Yuval Mintz05fafbf2016-08-19 09:33:31 +03001307 /* Write the PF version so that VF would know which version
1308 * is supported - might be later overriden. This guarantees that
1309 * VF could recognize legacy PF based on lack of versions in reply.
1310 */
1311 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1312 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1313
Yuval Mintza044df82016-08-22 13:25:09 +03001314 if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1315 DP_VERBOSE(p_hwfn,
1316 QED_MSG_IOV,
1317 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1318 vf->abs_vf_id, vf->state);
1319 goto out;
1320 }
1321
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001322 /* Validate FW compatibility */
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001323 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
Yuval Mintza044df82016-08-22 13:25:09 +03001324 if (req->vfdev_info.capabilities &
1325 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1326 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001327
Yuval Mintza044df82016-08-22 13:25:09 +03001328 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1329 "VF[%d] is pre-fastpath HSI\n",
1330 vf->abs_vf_id);
1331 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1332 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1333 } else {
1334 DP_INFO(p_hwfn,
1335 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1336 vf->abs_vf_id,
1337 req->vfdev_info.eth_fp_hsi_major,
1338 req->vfdev_info.eth_fp_hsi_minor,
1339 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1340
1341 goto out;
1342 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001343 }
1344
1345 /* On 100g PFs, prevent old VFs from loading */
1346 if ((p_hwfn->cdev->num_hwfns > 1) &&
1347 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1348 DP_INFO(p_hwfn,
1349 "VF[%d] is running an old driver that doesn't support 100g\n",
1350 vf->abs_vf_id);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001351 goto out;
1352 }
1353
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001354 /* Store the acquire message */
1355 memcpy(&vf->acquire, req, sizeof(vf->acquire));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001356
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001357 vf->opaque_fid = req->vfdev_info.opaque_fid;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001358
1359 vf->vf_bulletin = req->bulletin_addr;
1360 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1361 vf->bulletin.size : req->bulletin_size;
1362
1363 /* fill in pfdev info */
1364 pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1365 pfdev_info->db_size = 0;
1366 pfdev_info->indices_per_sb = PIS_PER_SB;
1367
1368 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1369 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1370 if (p_hwfn->cdev->num_hwfns > 1)
1371 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1372
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001373 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001374
1375 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1376
1377 pfdev_info->fw_major = FW_MAJOR_VERSION;
1378 pfdev_info->fw_minor = FW_MINOR_VERSION;
1379 pfdev_info->fw_rev = FW_REVISION_VERSION;
1380 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
Yuval Mintza044df82016-08-22 13:25:09 +03001381
1382 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1383 * this field.
1384 */
Yuval Mintz1a635e42016-08-15 10:42:43 +03001385 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001386 req->vfdev_info.eth_fp_hsi_minor);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001387 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1388 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1389
1390 pfdev_info->dev_type = p_hwfn->cdev->type;
1391 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1392
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001393 /* Fill resources available to VF; Make sure there are enough to
1394 * satisfy the VF's request.
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001395 */
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001396 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1397 &req->resc_request, resc);
1398 if (vfpf_status != PFVF_STATUS_SUCCESS)
1399 goto out;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001400
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001401 /* Start the VF in FW */
1402 rc = qed_sp_vf_start(p_hwfn, vf);
1403 if (rc) {
1404 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1405 vfpf_status = PFVF_STATUS_FAILURE;
1406 goto out;
1407 }
1408
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001409 /* Fill agreed size of bulletin board in response */
1410 resp->bulletin_size = vf->bulletin.size;
Yuval Mintz36558c32016-05-11 16:36:17 +03001411 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001412
1413 DP_VERBOSE(p_hwfn,
1414 QED_MSG_IOV,
1415 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1416 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1417 vf->abs_vf_id,
1418 resp->pfdev_info.chip_num,
1419 resp->pfdev_info.db_size,
1420 resp->pfdev_info.indices_per_sb,
1421 resp->pfdev_info.capabilities,
1422 resc->num_rxqs,
1423 resc->num_txqs,
1424 resc->num_sbs,
1425 resc->num_mac_filters,
1426 resc->num_vlan_filters);
1427 vf->state = VF_ACQUIRED;
1428
1429 /* Prepare Response */
1430out:
1431 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1432 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001433}
1434
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001435static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1436 struct qed_vf_info *p_vf, bool val)
1437{
1438 struct qed_sp_vport_update_params params;
1439 int rc;
1440
1441 if (val == p_vf->spoof_chk) {
1442 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1443 "Spoofchk value[%d] is already configured\n", val);
1444 return 0;
1445 }
1446
1447 memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
1448 params.opaque_fid = p_vf->opaque_fid;
1449 params.vport_id = p_vf->vport_id;
1450 params.update_anti_spoofing_en_flg = 1;
1451 params.anti_spoofing_en = val;
1452
1453 rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
Yuval Mintzcb1fa082016-07-27 14:45:20 +03001454 if (!rc) {
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001455 p_vf->spoof_chk = val;
1456 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1457 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1458 "Spoofchk val[%d] configured\n", val);
1459 } else {
1460 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1461 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1462 val, p_vf->relative_vf_id);
1463 }
1464
1465 return rc;
1466}
1467
Yuval Mintz08feecd2016-05-11 16:36:20 +03001468static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1469 struct qed_vf_info *p_vf)
1470{
1471 struct qed_filter_ucast filter;
1472 int rc = 0;
1473 int i;
1474
1475 memset(&filter, 0, sizeof(filter));
1476 filter.is_rx_filter = 1;
1477 filter.is_tx_filter = 1;
1478 filter.vport_to_add_to = p_vf->vport_id;
1479 filter.opcode = QED_FILTER_ADD;
1480
1481 /* Reconfigure vlans */
1482 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1483 if (!p_vf->shadow_config.vlans[i].used)
1484 continue;
1485
1486 filter.type = QED_FILTER_VLAN;
1487 filter.vlan = p_vf->shadow_config.vlans[i].vid;
Yuval Mintz1a635e42016-08-15 10:42:43 +03001488 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
Yuval Mintz08feecd2016-05-11 16:36:20 +03001489 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1490 filter.vlan, p_vf->relative_vf_id);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001491 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1492 &filter, QED_SPQ_MODE_CB, NULL);
Yuval Mintz08feecd2016-05-11 16:36:20 +03001493 if (rc) {
1494 DP_NOTICE(p_hwfn,
1495 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1496 filter.vlan, p_vf->relative_vf_id);
1497 break;
1498 }
1499 }
1500
1501 return rc;
1502}
1503
1504static int
1505qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1506 struct qed_vf_info *p_vf, u64 events)
1507{
1508 int rc = 0;
1509
Yuval Mintz1a635e42016-08-15 10:42:43 +03001510 if ((events & BIT(VLAN_ADDR_FORCED)) &&
Yuval Mintz08feecd2016-05-11 16:36:20 +03001511 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1512 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1513
1514 return rc;
1515}
1516
1517static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1518 struct qed_vf_info *p_vf, u64 events)
1519{
1520 int rc = 0;
1521 struct qed_filter_ucast filter;
1522
1523 if (!p_vf->vport_instance)
1524 return -EINVAL;
1525
Yuval Mintz1a635e42016-08-15 10:42:43 +03001526 if (events & BIT(MAC_ADDR_FORCED)) {
Yuval Mintzeff16962016-05-11 16:36:21 +03001527 /* Since there's no way [currently] of removing the MAC,
1528 * we can always assume this means we need to force it.
1529 */
1530 memset(&filter, 0, sizeof(filter));
1531 filter.type = QED_FILTER_MAC;
1532 filter.opcode = QED_FILTER_REPLACE;
1533 filter.is_rx_filter = 1;
1534 filter.is_tx_filter = 1;
1535 filter.vport_to_add_to = p_vf->vport_id;
1536 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1537
1538 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1539 &filter, QED_SPQ_MODE_CB, NULL);
1540 if (rc) {
1541 DP_NOTICE(p_hwfn,
1542 "PF failed to configure MAC for VF\n");
1543 return rc;
1544 }
1545
1546 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1547 }
1548
Yuval Mintz1a635e42016-08-15 10:42:43 +03001549 if (events & BIT(VLAN_ADDR_FORCED)) {
Yuval Mintz08feecd2016-05-11 16:36:20 +03001550 struct qed_sp_vport_update_params vport_update;
1551 u8 removal;
1552 int i;
1553
1554 memset(&filter, 0, sizeof(filter));
1555 filter.type = QED_FILTER_VLAN;
1556 filter.is_rx_filter = 1;
1557 filter.is_tx_filter = 1;
1558 filter.vport_to_add_to = p_vf->vport_id;
1559 filter.vlan = p_vf->bulletin.p_virt->pvid;
1560 filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1561 QED_FILTER_FLUSH;
1562
1563 /* Send the ramrod */
1564 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1565 &filter, QED_SPQ_MODE_CB, NULL);
1566 if (rc) {
1567 DP_NOTICE(p_hwfn,
1568 "PF failed to configure VLAN for VF\n");
1569 return rc;
1570 }
1571
1572 /* Update the default-vlan & silent vlan stripping */
1573 memset(&vport_update, 0, sizeof(vport_update));
1574 vport_update.opaque_fid = p_vf->opaque_fid;
1575 vport_update.vport_id = p_vf->vport_id;
1576 vport_update.update_default_vlan_enable_flg = 1;
1577 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1578 vport_update.update_default_vlan_flg = 1;
1579 vport_update.default_vlan = filter.vlan;
1580
1581 vport_update.update_inner_vlan_removal_flg = 1;
1582 removal = filter.vlan ? 1
1583 : p_vf->shadow_config.inner_vlan_removal;
1584 vport_update.inner_vlan_removal_flg = removal;
1585 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1586 rc = qed_sp_vport_update(p_hwfn,
1587 &vport_update,
1588 QED_SPQ_MODE_EBLOCK, NULL);
1589 if (rc) {
1590 DP_NOTICE(p_hwfn,
1591 "PF failed to configure VF vport for vlan\n");
1592 return rc;
1593 }
1594
1595 /* Update all the Rx queues */
1596 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1597 u16 qid;
1598
1599 if (!p_vf->vf_queues[i].rxq_active)
1600 continue;
1601
1602 qid = p_vf->vf_queues[i].fw_rx_qid;
1603
1604 rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
1605 1, 0, 1,
1606 QED_SPQ_MODE_EBLOCK,
1607 NULL);
1608 if (rc) {
1609 DP_NOTICE(p_hwfn,
1610 "Failed to send Rx update fo queue[0x%04x]\n",
1611 qid);
1612 return rc;
1613 }
1614 }
1615
1616 if (filter.vlan)
1617 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1618 else
Yuval Mintz1a635e42016-08-15 10:42:43 +03001619 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
Yuval Mintz08feecd2016-05-11 16:36:20 +03001620 }
1621
1622 /* If forced features are terminated, we need to configure the shadow
1623 * configuration back again.
1624 */
1625 if (events)
1626 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1627
1628 return rc;
1629}
1630
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001631static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1632 struct qed_ptt *p_ptt,
1633 struct qed_vf_info *vf)
1634{
1635 struct qed_sp_vport_start_params params = { 0 };
1636 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1637 struct vfpf_vport_start_tlv *start;
1638 u8 status = PFVF_STATUS_SUCCESS;
1639 struct qed_vf_info *vf_info;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001640 u64 *p_bitmap;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001641 int sb_id;
1642 int rc;
1643
1644 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1645 if (!vf_info) {
1646 DP_NOTICE(p_hwfn->cdev,
1647 "Failed to get VF info, invalid vfid [%d]\n",
1648 vf->relative_vf_id);
1649 return;
1650 }
1651
1652 vf->state = VF_ENABLED;
1653 start = &mbx->req_virt->start_vport;
1654
1655 /* Initialize Status block in CAU */
1656 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1657 if (!start->sb_addr[sb_id]) {
1658 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1659 "VF[%d] did not fill the address of SB %d\n",
1660 vf->relative_vf_id, sb_id);
1661 break;
1662 }
1663
1664 qed_int_cau_conf_sb(p_hwfn, p_ptt,
1665 start->sb_addr[sb_id],
Yuval Mintz1a635e42016-08-15 10:42:43 +03001666 vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001667 }
1668 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1669
1670 vf->mtu = start->mtu;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001671 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1672
1673 /* Take into consideration configuration forced by hypervisor;
1674 * If none is configured, use the supplied VF values [for old
1675 * vfs that would still be fine, since they passed '0' as padding].
1676 */
1677 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
Yuval Mintz1a635e42016-08-15 10:42:43 +03001678 if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
Yuval Mintz08feecd2016-05-11 16:36:20 +03001679 u8 vf_req = start->only_untagged;
1680
1681 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1682 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1683 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001684
1685 params.tpa_mode = start->tpa_mode;
1686 params.remove_inner_vlan = start->inner_vlan_removal;
Yuval Mintz831bfb0e2016-05-11 16:36:25 +03001687 params.tx_switching = true;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001688
Yuval Mintz08feecd2016-05-11 16:36:20 +03001689 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001690 params.drop_ttl0 = false;
1691 params.concrete_fid = vf->concrete_fid;
1692 params.opaque_fid = vf->opaque_fid;
1693 params.vport_id = vf->vport_id;
1694 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1695 params.mtu = vf->mtu;
Yuval Mintz11a85d72016-08-22 13:25:10 +03001696 params.check_mac = true;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001697
1698 rc = qed_sp_eth_vport_start(p_hwfn, &params);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001699 if (rc) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001700 DP_ERR(p_hwfn,
1701 "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1702 status = PFVF_STATUS_FAILURE;
1703 } else {
1704 vf->vport_instance++;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001705
1706 /* Force configuration if needed on the newly opened vport */
1707 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001708
1709 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001710 }
1711 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1712 sizeof(struct pfvf_def_resp_tlv), status);
1713}
1714
1715static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1716 struct qed_ptt *p_ptt,
1717 struct qed_vf_info *vf)
1718{
1719 u8 status = PFVF_STATUS_SUCCESS;
1720 int rc;
1721
1722 vf->vport_instance--;
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001723 vf->spoof_chk = false;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001724
1725 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001726 if (rc) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001727 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1728 rc);
1729 status = PFVF_STATUS_FAILURE;
1730 }
1731
Yuval Mintz08feecd2016-05-11 16:36:20 +03001732 /* Forget the configuration on the vport */
1733 vf->configured_features = 0;
1734 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
1735
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001736 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1737 sizeof(struct pfvf_def_resp_tlv), status);
1738}
1739
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001740static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1741 struct qed_ptt *p_ptt,
Yuval Mintza044df82016-08-22 13:25:09 +03001742 struct qed_vf_info *vf,
1743 u8 status, bool b_legacy)
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001744{
1745 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1746 struct pfvf_start_queue_resp_tlv *p_tlv;
1747 struct vfpf_start_rxq_tlv *req;
Yuval Mintza044df82016-08-22 13:25:09 +03001748 u16 length;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001749
1750 mbx->offset = (u8 *)mbx->reply_virt;
1751
Yuval Mintza044df82016-08-22 13:25:09 +03001752 /* Taking a bigger struct instead of adding a TLV to list was a
1753 * mistake, but one which we're now stuck with, as some older
1754 * clients assume the size of the previous response.
1755 */
1756 if (!b_legacy)
1757 length = sizeof(*p_tlv);
1758 else
1759 length = sizeof(struct pfvf_def_resp_tlv);
1760
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001761 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
Yuval Mintza044df82016-08-22 13:25:09 +03001762 length);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001763 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1764 sizeof(struct channel_list_end_tlv));
1765
1766 /* Update the TLV with the response */
Yuval Mintza044df82016-08-22 13:25:09 +03001767 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001768 req = &mbx->req_virt->start_rxq;
Yuval Mintz351a4ded2016-06-02 10:23:29 +03001769 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
1770 offsetof(struct mstorm_vf_zone,
1771 non_trigger.eth_rx_queue_producers) +
1772 sizeof(struct eth_rx_prod_data) * req->rx_qid;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001773 }
1774
Yuval Mintza044df82016-08-22 13:25:09 +03001775 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001776}
1777
1778static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1779 struct qed_ptt *p_ptt,
1780 struct qed_vf_info *vf)
1781{
1782 struct qed_queue_start_common_params params;
1783 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
Yuval Mintz41086462016-06-05 13:11:13 +03001784 u8 status = PFVF_STATUS_NO_RESOURCE;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001785 struct vfpf_start_rxq_tlv *req;
Yuval Mintza044df82016-08-22 13:25:09 +03001786 bool b_legacy_vf = false;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001787 int rc;
1788
1789 memset(&params, 0, sizeof(params));
1790 req = &mbx->req_virt->start_rxq;
Yuval Mintz41086462016-06-05 13:11:13 +03001791
1792 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
1793 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1794 goto out;
1795
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001796 params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
Yuval Mintz351a4ded2016-06-02 10:23:29 +03001797 params.vf_qid = req->rx_qid;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001798 params.vport_id = vf->vport_id;
1799 params.sb = req->hw_sb;
1800 params.sb_idx = req->sb_index;
1801
Yuval Mintza044df82016-08-22 13:25:09 +03001802 /* Legacy VFs have their Producers in a different location, which they
1803 * calculate on their own and clean the producer prior to this.
1804 */
1805 if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1806 ETH_HSI_VER_NO_PKT_LEN_TUNN) {
1807 b_legacy_vf = true;
1808 } else {
1809 REG_WR(p_hwfn,
1810 GTT_BAR0_MAP_REG_MSDM_RAM +
1811 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
1812 0);
1813 }
1814
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001815 rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
1816 vf->vf_queues[req->rx_qid].fw_cid,
1817 &params,
1818 vf->abs_vf_id + 0x10,
1819 req->bd_max_bytes,
1820 req->rxq_addr,
Yuval Mintza044df82016-08-22 13:25:09 +03001821 req->cqe_pbl_addr, req->cqe_pbl_size,
1822 b_legacy_vf);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001823
1824 if (rc) {
1825 status = PFVF_STATUS_FAILURE;
1826 } else {
Yuval Mintz41086462016-06-05 13:11:13 +03001827 status = PFVF_STATUS_SUCCESS;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001828 vf->vf_queues[req->rx_qid].rxq_active = true;
1829 vf->num_active_rxqs++;
1830 }
1831
Yuval Mintz41086462016-06-05 13:11:13 +03001832out:
Yuval Mintza044df82016-08-22 13:25:09 +03001833 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001834}
1835
Yuval Mintz5040acf2016-06-05 13:11:14 +03001836static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
1837 struct qed_ptt *p_ptt,
1838 struct qed_vf_info *p_vf, u8 status)
1839{
1840 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1841 struct pfvf_start_queue_resp_tlv *p_tlv;
Yuval Mintza044df82016-08-22 13:25:09 +03001842 bool b_legacy = false;
1843 u16 length;
Yuval Mintz5040acf2016-06-05 13:11:14 +03001844
1845 mbx->offset = (u8 *)mbx->reply_virt;
1846
Yuval Mintza044df82016-08-22 13:25:09 +03001847 /* Taking a bigger struct instead of adding a TLV to list was a
1848 * mistake, but one which we're now stuck with, as some older
1849 * clients assume the size of the previous response.
1850 */
1851 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1852 ETH_HSI_VER_NO_PKT_LEN_TUNN)
1853 b_legacy = true;
1854
1855 if (!b_legacy)
1856 length = sizeof(*p_tlv);
1857 else
1858 length = sizeof(struct pfvf_def_resp_tlv);
1859
Yuval Mintz5040acf2016-06-05 13:11:14 +03001860 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
Yuval Mintza044df82016-08-22 13:25:09 +03001861 length);
Yuval Mintz5040acf2016-06-05 13:11:14 +03001862 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1863 sizeof(struct channel_list_end_tlv));
1864
1865 /* Update the TLV with the response */
Yuval Mintza044df82016-08-22 13:25:09 +03001866 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
Yuval Mintz5040acf2016-06-05 13:11:14 +03001867 u16 qid = mbx->req_virt->start_txq.tx_qid;
1868
Ram Amrani51ff1722016-10-01 21:59:57 +03001869 p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
1870 DQ_DEMS_LEGACY);
Yuval Mintz5040acf2016-06-05 13:11:14 +03001871 }
1872
Yuval Mintza044df82016-08-22 13:25:09 +03001873 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
Yuval Mintz5040acf2016-06-05 13:11:14 +03001874}
1875
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001876static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
1877 struct qed_ptt *p_ptt,
1878 struct qed_vf_info *vf)
1879{
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001880 struct qed_queue_start_common_params params;
1881 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
Yuval Mintz41086462016-06-05 13:11:13 +03001882 u8 status = PFVF_STATUS_NO_RESOURCE;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001883 union qed_qm_pq_params pq_params;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001884 struct vfpf_start_txq_tlv *req;
1885 int rc;
1886
1887 /* Prepare the parameters which would choose the right PQ */
1888 memset(&pq_params, 0, sizeof(pq_params));
1889 pq_params.eth.is_vf = 1;
1890 pq_params.eth.vf_id = vf->relative_vf_id;
1891
1892 memset(&params, 0, sizeof(params));
1893 req = &mbx->req_virt->start_txq;
Yuval Mintz41086462016-06-05 13:11:13 +03001894
1895 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
1896 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1897 goto out;
1898
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001899 params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
1900 params.vport_id = vf->vport_id;
1901 params.sb = req->hw_sb;
1902 params.sb_idx = req->sb_index;
1903
1904 rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
1905 vf->opaque_fid,
1906 vf->vf_queues[req->tx_qid].fw_cid,
1907 &params,
1908 vf->abs_vf_id + 0x10,
1909 req->pbl_addr,
1910 req->pbl_size, &pq_params);
1911
Yuval Mintz41086462016-06-05 13:11:13 +03001912 if (rc) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001913 status = PFVF_STATUS_FAILURE;
Yuval Mintz41086462016-06-05 13:11:13 +03001914 } else {
1915 status = PFVF_STATUS_SUCCESS;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001916 vf->vf_queues[req->tx_qid].txq_active = true;
Yuval Mintz41086462016-06-05 13:11:13 +03001917 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001918
Yuval Mintz41086462016-06-05 13:11:13 +03001919out:
Yuval Mintz5040acf2016-06-05 13:11:14 +03001920 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001921}
1922
1923static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
1924 struct qed_vf_info *vf,
1925 u16 rxq_id, u8 num_rxqs, bool cqe_completion)
1926{
1927 int rc = 0;
1928 int qid;
1929
1930 if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
1931 return -EINVAL;
1932
1933 for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
1934 if (vf->vf_queues[qid].rxq_active) {
1935 rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1936 vf->vf_queues[qid].
1937 fw_rx_qid, false,
1938 cqe_completion);
1939
1940 if (rc)
1941 return rc;
1942 }
1943 vf->vf_queues[qid].rxq_active = false;
1944 vf->num_active_rxqs--;
1945 }
1946
1947 return rc;
1948}
1949
1950static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
1951 struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
1952{
1953 int rc = 0;
1954 int qid;
1955
1956 if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
1957 return -EINVAL;
1958
1959 for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
1960 if (vf->vf_queues[qid].txq_active) {
1961 rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1962 vf->vf_queues[qid].
1963 fw_tx_qid);
1964
1965 if (rc)
1966 return rc;
1967 }
1968 vf->vf_queues[qid].txq_active = false;
1969 }
1970 return rc;
1971}
1972
1973static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
1974 struct qed_ptt *p_ptt,
1975 struct qed_vf_info *vf)
1976{
1977 u16 length = sizeof(struct pfvf_def_resp_tlv);
1978 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1979 u8 status = PFVF_STATUS_SUCCESS;
1980 struct vfpf_stop_rxqs_tlv *req;
1981 int rc;
1982
1983 /* We give the option of starting from qid != 0, in this case we
1984 * need to make sure that qid + num_qs doesn't exceed the actual
1985 * amount of queues that exist.
1986 */
1987 req = &mbx->req_virt->stop_rxqs;
1988 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
1989 req->num_rxqs, req->cqe_completion);
1990 if (rc)
1991 status = PFVF_STATUS_FAILURE;
1992
1993 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
1994 length, status);
1995}
1996
1997static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
1998 struct qed_ptt *p_ptt,
1999 struct qed_vf_info *vf)
2000{
2001 u16 length = sizeof(struct pfvf_def_resp_tlv);
2002 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2003 u8 status = PFVF_STATUS_SUCCESS;
2004 struct vfpf_stop_txqs_tlv *req;
2005 int rc;
2006
2007 /* We give the option of starting from qid != 0, in this case we
2008 * need to make sure that qid + num_qs doesn't exceed the actual
2009 * amount of queues that exist.
2010 */
2011 req = &mbx->req_virt->stop_txqs;
2012 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
2013 if (rc)
2014 status = PFVF_STATUS_FAILURE;
2015
2016 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2017 length, status);
2018}
2019
Yuval Mintz17b235c2016-05-11 16:36:18 +03002020static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2021 struct qed_ptt *p_ptt,
2022 struct qed_vf_info *vf)
2023{
2024 u16 length = sizeof(struct pfvf_def_resp_tlv);
2025 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2026 struct vfpf_update_rxq_tlv *req;
2027 u8 status = PFVF_STATUS_SUCCESS;
2028 u8 complete_event_flg;
2029 u8 complete_cqe_flg;
2030 u16 qid;
2031 int rc;
2032 u8 i;
2033
2034 req = &mbx->req_virt->update_rxq;
2035 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2036 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2037
2038 for (i = 0; i < req->num_rxqs; i++) {
2039 qid = req->rx_qid + i;
2040
2041 if (!vf->vf_queues[qid].rxq_active) {
2042 DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
2043 qid);
2044 status = PFVF_STATUS_FAILURE;
2045 break;
2046 }
2047
2048 rc = qed_sp_eth_rx_queues_update(p_hwfn,
2049 vf->vf_queues[qid].fw_rx_qid,
2050 1,
2051 complete_cqe_flg,
2052 complete_event_flg,
2053 QED_SPQ_MODE_EBLOCK, NULL);
2054
2055 if (rc) {
2056 status = PFVF_STATUS_FAILURE;
2057 break;
2058 }
2059 }
2060
2061 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2062 length, status);
2063}
2064
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002065void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2066 void *p_tlvs_list, u16 req_type)
2067{
2068 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2069 int len = 0;
2070
2071 do {
2072 if (!p_tlv->length) {
2073 DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2074 return NULL;
2075 }
2076
2077 if (p_tlv->type == req_type) {
2078 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2079 "Extended tlv type %d, length %d found\n",
2080 p_tlv->type, p_tlv->length);
2081 return p_tlv;
2082 }
2083
2084 len += p_tlv->length;
2085 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2086
2087 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2088 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2089 return NULL;
2090 }
2091 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2092
2093 return NULL;
2094}
2095
2096static void
2097qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2098 struct qed_sp_vport_update_params *p_data,
2099 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2100{
2101 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2102 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2103
2104 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2105 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2106 if (!p_act_tlv)
2107 return;
2108
2109 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2110 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2111 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2112 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2113 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2114}
2115
2116static void
Yuval Mintz17b235c2016-05-11 16:36:18 +03002117qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2118 struct qed_sp_vport_update_params *p_data,
2119 struct qed_vf_info *p_vf,
2120 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2121{
2122 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2123 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2124
2125 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2126 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2127 if (!p_vlan_tlv)
2128 return;
2129
Yuval Mintz08feecd2016-05-11 16:36:20 +03002130 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2131
2132 /* Ignore the VF request if we're forcing a vlan */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002133 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
Yuval Mintz08feecd2016-05-11 16:36:20 +03002134 p_data->update_inner_vlan_removal_flg = 1;
2135 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2136 }
Yuval Mintz17b235c2016-05-11 16:36:18 +03002137
2138 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2139}
2140
2141static void
2142qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2143 struct qed_sp_vport_update_params *p_data,
2144 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2145{
2146 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2147 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2148
2149 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2150 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2151 tlv);
2152 if (!p_tx_switch_tlv)
2153 return;
2154
2155 p_data->update_tx_switching_flg = 1;
2156 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2157 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2158}
2159
2160static void
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002161qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2162 struct qed_sp_vport_update_params *p_data,
2163 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2164{
2165 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2166 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2167
2168 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2169 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2170 if (!p_mcast_tlv)
2171 return;
2172
2173 p_data->update_approx_mcast_flg = 1;
2174 memcpy(p_data->bins, p_mcast_tlv->bins,
2175 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2176 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2177}
2178
2179static void
2180qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2181 struct qed_sp_vport_update_params *p_data,
2182 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2183{
2184 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2185 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2186 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2187
2188 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2189 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2190 if (!p_accept_tlv)
2191 return;
2192
2193 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2194 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2195 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2196 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2197 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2198}
2199
2200static void
Yuval Mintz17b235c2016-05-11 16:36:18 +03002201qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2202 struct qed_sp_vport_update_params *p_data,
2203 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2204{
2205 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2206 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2207
2208 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2209 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2210 tlv);
2211 if (!p_accept_any_vlan)
2212 return;
2213
2214 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2215 p_data->update_accept_any_vlan_flg =
2216 p_accept_any_vlan->update_accept_any_vlan_flg;
2217 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2218}
2219
2220static void
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002221qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2222 struct qed_vf_info *vf,
2223 struct qed_sp_vport_update_params *p_data,
2224 struct qed_rss_params *p_rss,
2225 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2226{
2227 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2228 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2229 u16 i, q_idx, max_q_idx;
2230 u16 table_size;
2231
2232 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2233 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2234 if (!p_rss_tlv) {
2235 p_data->rss_params = NULL;
2236 return;
2237 }
2238
2239 memset(p_rss, 0, sizeof(struct qed_rss_params));
2240
2241 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2242 VFPF_UPDATE_RSS_CONFIG_FLAG);
2243 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2244 VFPF_UPDATE_RSS_CAPS_FLAG);
2245 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2246 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2247 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2248 VFPF_UPDATE_RSS_KEY_FLAG);
2249
2250 p_rss->rss_enable = p_rss_tlv->rss_enable;
2251 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2252 p_rss->rss_caps = p_rss_tlv->rss_caps;
2253 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2254 memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
2255 sizeof(p_rss->rss_ind_table));
2256 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2257
2258 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2259 (1 << p_rss_tlv->rss_table_size_log));
2260
2261 max_q_idx = ARRAY_SIZE(vf->vf_queues);
2262
2263 for (i = 0; i < table_size; i++) {
2264 u16 index = vf->vf_queues[0].fw_rx_qid;
2265
2266 q_idx = p_rss->rss_ind_table[i];
2267 if (q_idx >= max_q_idx)
2268 DP_NOTICE(p_hwfn,
2269 "rss_ind_table[%d] = %d, rxq is out of range\n",
2270 i, q_idx);
2271 else if (!vf->vf_queues[q_idx].rxq_active)
2272 DP_NOTICE(p_hwfn,
2273 "rss_ind_table[%d] = %d, rxq is not active\n",
2274 i, q_idx);
2275 else
2276 index = vf->vf_queues[q_idx].fw_rx_qid;
2277 p_rss->rss_ind_table[i] = index;
2278 }
2279
2280 p_data->rss_params = p_rss;
2281 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2282}
2283
Yuval Mintz17b235c2016-05-11 16:36:18 +03002284static void
2285qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2286 struct qed_vf_info *vf,
2287 struct qed_sp_vport_update_params *p_data,
2288 struct qed_sge_tpa_params *p_sge_tpa,
2289 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2290{
2291 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2292 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2293
2294 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2295 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2296
2297 if (!p_sge_tpa_tlv) {
2298 p_data->sge_tpa_params = NULL;
2299 return;
2300 }
2301
2302 memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2303
2304 p_sge_tpa->update_tpa_en_flg =
2305 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2306 p_sge_tpa->update_tpa_param_flg =
2307 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2308 VFPF_UPDATE_TPA_PARAM_FLAG);
2309
2310 p_sge_tpa->tpa_ipv4_en_flg =
2311 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2312 p_sge_tpa->tpa_ipv6_en_flg =
2313 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2314 p_sge_tpa->tpa_pkt_split_flg =
2315 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2316 p_sge_tpa->tpa_hdr_data_split_flg =
2317 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2318 p_sge_tpa->tpa_gro_consistent_flg =
2319 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2320
2321 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2322 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2323 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2324 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2325 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2326
2327 p_data->sge_tpa_params = p_sge_tpa;
2328
2329 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2330}
2331
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002332static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
2333 struct qed_ptt *p_ptt,
2334 struct qed_vf_info *vf)
2335{
2336 struct qed_sp_vport_update_params params;
2337 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
Yuval Mintz17b235c2016-05-11 16:36:18 +03002338 struct qed_sge_tpa_params sge_tpa_params;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002339 struct qed_rss_params rss_params;
2340 u8 status = PFVF_STATUS_SUCCESS;
2341 u16 tlvs_mask = 0;
2342 u16 length;
2343 int rc;
2344
Yuval Mintz41086462016-06-05 13:11:13 +03002345 /* Valiate PF can send such a request */
2346 if (!vf->vport_instance) {
2347 DP_VERBOSE(p_hwfn,
2348 QED_MSG_IOV,
2349 "No VPORT instance available for VF[%d], failing vport update\n",
2350 vf->abs_vf_id);
2351 status = PFVF_STATUS_FAILURE;
2352 goto out;
2353 }
2354
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002355 memset(&params, 0, sizeof(params));
2356 params.opaque_fid = vf->opaque_fid;
2357 params.vport_id = vf->vport_id;
2358 params.rss_params = NULL;
2359
2360 /* Search for extended tlvs list and update values
2361 * from VF in struct qed_sp_vport_update_params.
2362 */
2363 qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
Yuval Mintz17b235c2016-05-11 16:36:18 +03002364 qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
2365 qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002366 qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
2367 qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
2368 qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
2369 mbx, &tlvs_mask);
Yuval Mintz17b235c2016-05-11 16:36:18 +03002370 qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
2371 qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
2372 &sge_tpa_params, mbx, &tlvs_mask);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002373
2374 /* Just log a message if there is no single extended tlv in buffer.
2375 * When all features of vport update ramrod would be requested by VF
2376 * as extended TLVs in buffer then an error can be returned in response
2377 * if there is no extended TLV present in buffer.
2378 */
2379 if (!tlvs_mask) {
2380 DP_NOTICE(p_hwfn,
2381 "No feature tlvs found for vport update\n");
2382 status = PFVF_STATUS_NOT_SUPPORTED;
2383 goto out;
2384 }
2385
2386 rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
2387
2388 if (rc)
2389 status = PFVF_STATUS_FAILURE;
2390
2391out:
2392 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2393 tlvs_mask, tlvs_mask);
2394 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2395}
2396
Yuval Mintz8246d0b2016-06-05 13:11:15 +03002397static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
2398 struct qed_vf_info *p_vf,
2399 struct qed_filter_ucast *p_params)
Yuval Mintz08feecd2016-05-11 16:36:20 +03002400{
2401 int i;
2402
Yuval Mintz08feecd2016-05-11 16:36:20 +03002403 /* First remove entries and then add new ones */
2404 if (p_params->opcode == QED_FILTER_REMOVE) {
2405 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2406 if (p_vf->shadow_config.vlans[i].used &&
2407 p_vf->shadow_config.vlans[i].vid ==
2408 p_params->vlan) {
2409 p_vf->shadow_config.vlans[i].used = false;
2410 break;
2411 }
2412 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2413 DP_VERBOSE(p_hwfn,
2414 QED_MSG_IOV,
2415 "VF [%d] - Tries to remove a non-existing vlan\n",
2416 p_vf->relative_vf_id);
2417 return -EINVAL;
2418 }
2419 } else if (p_params->opcode == QED_FILTER_REPLACE ||
2420 p_params->opcode == QED_FILTER_FLUSH) {
2421 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2422 p_vf->shadow_config.vlans[i].used = false;
2423 }
2424
2425 /* In forced mode, we're willing to remove entries - but we don't add
2426 * new ones.
2427 */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002428 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
Yuval Mintz08feecd2016-05-11 16:36:20 +03002429 return 0;
2430
2431 if (p_params->opcode == QED_FILTER_ADD ||
2432 p_params->opcode == QED_FILTER_REPLACE) {
2433 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
2434 if (p_vf->shadow_config.vlans[i].used)
2435 continue;
2436
2437 p_vf->shadow_config.vlans[i].used = true;
2438 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
2439 break;
2440 }
2441
2442 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2443 DP_VERBOSE(p_hwfn,
2444 QED_MSG_IOV,
2445 "VF [%d] - Tries to configure more than %d vlan filters\n",
2446 p_vf->relative_vf_id,
2447 QED_ETH_VF_NUM_VLAN_FILTERS + 1);
2448 return -EINVAL;
2449 }
2450 }
2451
2452 return 0;
2453}
2454
Yuval Mintz8246d0b2016-06-05 13:11:15 +03002455static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
2456 struct qed_vf_info *p_vf,
2457 struct qed_filter_ucast *p_params)
2458{
2459 int i;
2460
2461 /* If we're in forced-mode, we don't allow any change */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002462 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
Yuval Mintz8246d0b2016-06-05 13:11:15 +03002463 return 0;
2464
2465 /* First remove entries and then add new ones */
2466 if (p_params->opcode == QED_FILTER_REMOVE) {
2467 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2468 if (ether_addr_equal(p_vf->shadow_config.macs[i],
2469 p_params->mac)) {
2470 memset(p_vf->shadow_config.macs[i], 0,
2471 ETH_ALEN);
2472 break;
2473 }
2474 }
2475
2476 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2477 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2478 "MAC isn't configured\n");
2479 return -EINVAL;
2480 }
2481 } else if (p_params->opcode == QED_FILTER_REPLACE ||
2482 p_params->opcode == QED_FILTER_FLUSH) {
2483 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
2484 memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
2485 }
2486
2487 /* List the new MAC address */
2488 if (p_params->opcode != QED_FILTER_ADD &&
2489 p_params->opcode != QED_FILTER_REPLACE)
2490 return 0;
2491
2492 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2493 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
2494 ether_addr_copy(p_vf->shadow_config.macs[i],
2495 p_params->mac);
2496 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2497 "Added MAC at %d entry in shadow\n", i);
2498 break;
2499 }
2500 }
2501
2502 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2503 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
2504 return -EINVAL;
2505 }
2506
2507 return 0;
2508}
2509
2510static int
2511qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
2512 struct qed_vf_info *p_vf,
2513 struct qed_filter_ucast *p_params)
2514{
2515 int rc = 0;
2516
2517 if (p_params->type == QED_FILTER_MAC) {
2518 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
2519 if (rc)
2520 return rc;
2521 }
2522
2523 if (p_params->type == QED_FILTER_VLAN)
2524 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
2525
2526 return rc;
2527}
2528
Baoyou Xieba569472016-09-09 09:21:15 +08002529static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
2530 int vfid, struct qed_filter_ucast *params)
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002531{
2532 struct qed_public_vf_info *vf;
2533
2534 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
2535 if (!vf)
2536 return -EINVAL;
2537
2538 /* No real decision to make; Store the configured MAC */
2539 if (params->type == QED_FILTER_MAC ||
2540 params->type == QED_FILTER_MAC_VLAN)
2541 ether_addr_copy(vf->mac, params->mac);
2542
2543 return 0;
2544}
2545
2546static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
2547 struct qed_ptt *p_ptt,
2548 struct qed_vf_info *vf)
2549{
Yuval Mintz08feecd2016-05-11 16:36:20 +03002550 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002551 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2552 struct vfpf_ucast_filter_tlv *req;
2553 u8 status = PFVF_STATUS_SUCCESS;
2554 struct qed_filter_ucast params;
2555 int rc;
2556
2557 /* Prepare the unicast filter params */
2558 memset(&params, 0, sizeof(struct qed_filter_ucast));
2559 req = &mbx->req_virt->ucast_filter;
2560 params.opcode = (enum qed_filter_opcode)req->opcode;
2561 params.type = (enum qed_filter_ucast_type)req->type;
2562
2563 params.is_rx_filter = 1;
2564 params.is_tx_filter = 1;
2565 params.vport_to_remove_from = vf->vport_id;
2566 params.vport_to_add_to = vf->vport_id;
2567 memcpy(params.mac, req->mac, ETH_ALEN);
2568 params.vlan = req->vlan;
2569
2570 DP_VERBOSE(p_hwfn,
2571 QED_MSG_IOV,
2572 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2573 vf->abs_vf_id, params.opcode, params.type,
2574 params.is_rx_filter ? "RX" : "",
2575 params.is_tx_filter ? "TX" : "",
2576 params.vport_to_add_to,
2577 params.mac[0], params.mac[1],
2578 params.mac[2], params.mac[3],
2579 params.mac[4], params.mac[5], params.vlan);
2580
2581 if (!vf->vport_instance) {
2582 DP_VERBOSE(p_hwfn,
2583 QED_MSG_IOV,
2584 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
2585 vf->abs_vf_id);
2586 status = PFVF_STATUS_FAILURE;
2587 goto out;
2588 }
2589
Yuval Mintz08feecd2016-05-11 16:36:20 +03002590 /* Update shadow copy of the VF configuration */
2591 if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
2592 status = PFVF_STATUS_FAILURE;
2593 goto out;
2594 }
2595
2596 /* Determine if the unicast filtering is acceptible by PF */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002597 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
Yuval Mintz08feecd2016-05-11 16:36:20 +03002598 (params.type == QED_FILTER_VLAN ||
2599 params.type == QED_FILTER_MAC_VLAN)) {
2600 /* Once VLAN is forced or PVID is set, do not allow
2601 * to add/replace any further VLANs.
2602 */
2603 if (params.opcode == QED_FILTER_ADD ||
2604 params.opcode == QED_FILTER_REPLACE)
2605 status = PFVF_STATUS_FORCED;
2606 goto out;
2607 }
2608
Yuval Mintz1a635e42016-08-15 10:42:43 +03002609 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
Yuval Mintzeff16962016-05-11 16:36:21 +03002610 (params.type == QED_FILTER_MAC ||
2611 params.type == QED_FILTER_MAC_VLAN)) {
2612 if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
2613 (params.opcode != QED_FILTER_ADD &&
2614 params.opcode != QED_FILTER_REPLACE))
2615 status = PFVF_STATUS_FORCED;
2616 goto out;
2617 }
2618
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002619 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
2620 if (rc) {
2621 status = PFVF_STATUS_FAILURE;
2622 goto out;
2623 }
2624
2625 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
2626 QED_SPQ_MODE_CB, NULL);
2627 if (rc)
2628 status = PFVF_STATUS_FAILURE;
2629
2630out:
2631 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
2632 sizeof(struct pfvf_def_resp_tlv), status);
2633}
2634
Yuval Mintz0b55e272016-05-11 16:36:15 +03002635static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
2636 struct qed_ptt *p_ptt,
2637 struct qed_vf_info *vf)
2638{
2639 int i;
2640
2641 /* Reset the SBs */
2642 for (i = 0; i < vf->num_sbs; i++)
2643 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2644 vf->igu_sbs[i],
2645 vf->opaque_fid, false);
2646
2647 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
2648 sizeof(struct pfvf_def_resp_tlv),
2649 PFVF_STATUS_SUCCESS);
2650}
2651
2652static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
2653 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
2654{
2655 u16 length = sizeof(struct pfvf_def_resp_tlv);
2656 u8 status = PFVF_STATUS_SUCCESS;
2657
2658 /* Disable Interrupts for VF */
2659 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
2660
2661 /* Reset Permission table */
2662 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
2663
2664 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
2665 length, status);
2666}
2667
2668static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
2669 struct qed_ptt *p_ptt,
2670 struct qed_vf_info *p_vf)
2671{
2672 u16 length = sizeof(struct pfvf_def_resp_tlv);
Yuval Mintz1fe614d2016-06-05 13:11:11 +03002673 u8 status = PFVF_STATUS_SUCCESS;
2674 int rc = 0;
Yuval Mintz0b55e272016-05-11 16:36:15 +03002675
2676 qed_iov_vf_cleanup(p_hwfn, p_vf);
2677
Yuval Mintz1fe614d2016-06-05 13:11:11 +03002678 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
2679 /* Stopping the VF */
2680 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
2681 p_vf->opaque_fid);
2682
2683 if (rc) {
2684 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
2685 rc);
2686 status = PFVF_STATUS_FAILURE;
2687 }
2688
2689 p_vf->state = VF_STOPPED;
2690 }
2691
Yuval Mintz0b55e272016-05-11 16:36:15 +03002692 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
Yuval Mintz1fe614d2016-06-05 13:11:11 +03002693 length, status);
Yuval Mintz0b55e272016-05-11 16:36:15 +03002694}
2695
2696static int
2697qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
2698 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2699{
2700 int cnt;
2701 u32 val;
2702
2703 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
2704
2705 for (cnt = 0; cnt < 50; cnt++) {
2706 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
2707 if (!val)
2708 break;
2709 msleep(20);
2710 }
2711 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
2712
2713 if (cnt == 50) {
2714 DP_ERR(p_hwfn,
2715 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2716 p_vf->abs_vf_id, val);
2717 return -EBUSY;
2718 }
2719
2720 return 0;
2721}
2722
2723static int
2724qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
2725 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2726{
2727 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
2728 int i, cnt;
2729
2730 /* Read initial consumers & producers */
2731 for (i = 0; i < MAX_NUM_VOQS; i++) {
2732 u32 prod;
2733
2734 cons[i] = qed_rd(p_hwfn, p_ptt,
2735 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2736 i * 0x40);
2737 prod = qed_rd(p_hwfn, p_ptt,
2738 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
2739 i * 0x40);
2740 distance[i] = prod - cons[i];
2741 }
2742
2743 /* Wait for consumers to pass the producers */
2744 i = 0;
2745 for (cnt = 0; cnt < 50; cnt++) {
2746 for (; i < MAX_NUM_VOQS; i++) {
2747 u32 tmp;
2748
2749 tmp = qed_rd(p_hwfn, p_ptt,
2750 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2751 i * 0x40);
2752 if (distance[i] > tmp - cons[i])
2753 break;
2754 }
2755
2756 if (i == MAX_NUM_VOQS)
2757 break;
2758
2759 msleep(20);
2760 }
2761
2762 if (cnt == 50) {
2763 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
2764 p_vf->abs_vf_id, i);
2765 return -EBUSY;
2766 }
2767
2768 return 0;
2769}
2770
2771static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
2772 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2773{
2774 int rc;
2775
2776 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
2777 if (rc)
2778 return rc;
2779
2780 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
2781 if (rc)
2782 return rc;
2783
2784 return 0;
2785}
2786
2787static int
2788qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
2789 struct qed_ptt *p_ptt,
2790 u16 rel_vf_id, u32 *ack_vfs)
2791{
2792 struct qed_vf_info *p_vf;
2793 int rc = 0;
2794
2795 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
2796 if (!p_vf)
2797 return 0;
2798
2799 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
2800 (1ULL << (rel_vf_id % 64))) {
2801 u16 vfid = p_vf->abs_vf_id;
2802
2803 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2804 "VF[%d] - Handling FLR\n", vfid);
2805
2806 qed_iov_vf_cleanup(p_hwfn, p_vf);
2807
2808 /* If VF isn't active, no need for anything but SW */
2809 if (!p_vf->b_init)
2810 goto cleanup;
2811
2812 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
2813 if (rc)
2814 goto cleanup;
2815
2816 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
2817 if (rc) {
2818 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
2819 return rc;
2820 }
2821
Yuval Mintz7eff82b2016-10-14 05:19:22 -04002822 /* Workaround to make VF-PF channel ready, as FW
2823 * doesn't do that as a part of FLR.
2824 */
2825 REG_WR(p_hwfn,
2826 GTT_BAR0_MAP_REG_USDM_RAM +
2827 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
2828
Yuval Mintz0b55e272016-05-11 16:36:15 +03002829 /* VF_STOPPED has to be set only after final cleanup
2830 * but prior to re-enabling the VF.
2831 */
2832 p_vf->state = VF_STOPPED;
2833
2834 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
2835 if (rc) {
2836 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
2837 vfid);
2838 return rc;
2839 }
2840cleanup:
2841 /* Mark VF for ack and clean pending state */
2842 if (p_vf->state == VF_RESET)
2843 p_vf->state = VF_STOPPED;
Yuval Mintz1a635e42016-08-15 10:42:43 +03002844 ack_vfs[vfid / 32] |= BIT((vfid % 32));
Yuval Mintz0b55e272016-05-11 16:36:15 +03002845 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
2846 ~(1ULL << (rel_vf_id % 64));
2847 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
2848 ~(1ULL << (rel_vf_id % 64));
2849 }
2850
2851 return rc;
2852}
2853
Baoyou Xieba569472016-09-09 09:21:15 +08002854static int
2855qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintz0b55e272016-05-11 16:36:15 +03002856{
2857 u32 ack_vfs[VF_MAX_STATIC / 32];
2858 int rc = 0;
2859 u16 i;
2860
2861 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
2862
2863 /* Since BRB <-> PRS interface can't be tested as part of the flr
2864 * polling due to HW limitations, simply sleep a bit. And since
2865 * there's no need to wait per-vf, do it before looping.
2866 */
2867 msleep(100);
2868
2869 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
2870 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
2871
2872 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
2873 return rc;
2874}
2875
2876int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
2877{
2878 u16 i, found = 0;
2879
2880 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
2881 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
2882 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2883 "[%08x,...,%08x]: %08x\n",
2884 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
2885
2886 if (!p_hwfn->cdev->p_iov_info) {
2887 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
2888 return 0;
2889 }
2890
2891 /* Mark VFs */
2892 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
2893 struct qed_vf_info *p_vf;
2894 u8 vfid;
2895
2896 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
2897 if (!p_vf)
2898 continue;
2899
2900 vfid = p_vf->abs_vf_id;
Yuval Mintz1a635e42016-08-15 10:42:43 +03002901 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
Yuval Mintz0b55e272016-05-11 16:36:15 +03002902 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
2903 u16 rel_vf_id = p_vf->relative_vf_id;
2904
2905 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2906 "VF[%d] [rel %d] got FLR-ed\n",
2907 vfid, rel_vf_id);
2908
2909 p_vf->state = VF_RESET;
2910
2911 /* No need to lock here, since pending_flr should
2912 * only change here and before ACKing MFw. Since
2913 * MFW will not trigger an additional attention for
2914 * VF flr until ACKs, we're safe.
2915 */
2916 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
2917 found = 1;
2918 }
2919 }
2920
2921 return found;
2922}
2923
Yuval Mintz73390ac2016-05-11 16:36:24 +03002924static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
2925 u16 vfid,
2926 struct qed_mcp_link_params *p_params,
2927 struct qed_mcp_link_state *p_link,
2928 struct qed_mcp_link_capabilities *p_caps)
2929{
2930 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
2931 vfid,
2932 false);
2933 struct qed_bulletin_content *p_bulletin;
2934
2935 if (!p_vf)
2936 return;
2937
2938 p_bulletin = p_vf->bulletin.p_virt;
2939
2940 if (p_params)
2941 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
2942 if (p_link)
2943 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
2944 if (p_caps)
2945 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
2946}
2947
Yuval Mintz37bff2b2016-05-11 16:36:13 +03002948static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
2949 struct qed_ptt *p_ptt, int vfid)
2950{
2951 struct qed_iov_vf_mbx *mbx;
2952 struct qed_vf_info *p_vf;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03002953
2954 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
2955 if (!p_vf)
2956 return;
2957
2958 mbx = &p_vf->vf_mbx;
2959
2960 /* qed_iov_process_mbx_request */
Yuval Mintz54fdd802016-06-05 13:11:16 +03002961 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2962 "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03002963
2964 mbx->first_tlv = mbx->req_virt->first_tlv;
2965
2966 /* check if tlv type is known */
Yuval Mintz7eff82b2016-10-14 05:19:22 -04002967 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
2968 !p_vf->b_malicious) {
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03002969 switch (mbx->first_tlv.tl.type) {
2970 case CHANNEL_TLV_ACQUIRE:
2971 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
2972 break;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002973 case CHANNEL_TLV_VPORT_START:
2974 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
2975 break;
2976 case CHANNEL_TLV_VPORT_TEARDOWN:
2977 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
2978 break;
2979 case CHANNEL_TLV_START_RXQ:
2980 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
2981 break;
2982 case CHANNEL_TLV_START_TXQ:
2983 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
2984 break;
2985 case CHANNEL_TLV_STOP_RXQS:
2986 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
2987 break;
2988 case CHANNEL_TLV_STOP_TXQS:
2989 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
2990 break;
Yuval Mintz17b235c2016-05-11 16:36:18 +03002991 case CHANNEL_TLV_UPDATE_RXQ:
2992 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
2993 break;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002994 case CHANNEL_TLV_VPORT_UPDATE:
2995 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
2996 break;
2997 case CHANNEL_TLV_UCAST_FILTER:
2998 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
2999 break;
Yuval Mintz0b55e272016-05-11 16:36:15 +03003000 case CHANNEL_TLV_CLOSE:
3001 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3002 break;
3003 case CHANNEL_TLV_INT_CLEANUP:
3004 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3005 break;
3006 case CHANNEL_TLV_RELEASE:
3007 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3008 break;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03003009 }
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003010 } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3011 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3012 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3013 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3014
3015 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3016 mbx->first_tlv.tl.type,
3017 sizeof(struct pfvf_def_resp_tlv),
3018 PFVF_STATUS_MALICIOUS);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003019 } else {
3020 /* unknown TLV - this may belong to a VF driver from the future
3021 * - a version written after this PF driver was written, which
3022 * supports features unknown as of yet. Too bad since we don't
3023 * support them. Or this may be because someone wrote a crappy
3024 * VF driver and is sending garbage over the channel.
3025 */
Yuval Mintz54fdd802016-06-05 13:11:16 +03003026 DP_NOTICE(p_hwfn,
3027 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3028 p_vf->abs_vf_id,
3029 mbx->first_tlv.tl.type,
3030 mbx->first_tlv.tl.length,
3031 mbx->first_tlv.padding, mbx->first_tlv.reply_address);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003032
Yuval Mintz54fdd802016-06-05 13:11:16 +03003033 /* Try replying in case reply address matches the acquisition's
3034 * posted address.
3035 */
3036 if (p_vf->acquire.first_tlv.reply_address &&
3037 (mbx->first_tlv.reply_address ==
3038 p_vf->acquire.first_tlv.reply_address)) {
3039 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3040 mbx->first_tlv.tl.type,
3041 sizeof(struct pfvf_def_resp_tlv),
3042 PFVF_STATUS_NOT_SUPPORTED);
3043 } else {
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003044 DP_VERBOSE(p_hwfn,
3045 QED_MSG_IOV,
Yuval Mintz54fdd802016-06-05 13:11:16 +03003046 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3047 p_vf->abs_vf_id);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003048 }
3049 }
3050}
3051
Baoyou Xieba569472016-09-09 09:21:15 +08003052static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003053{
3054 u64 add_bit = 1ULL << (vfid % 64);
3055
3056 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
3057}
3058
3059static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
3060 u64 *events)
3061{
3062 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
3063
3064 memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3065 memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3066}
3067
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003068static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
3069 u16 abs_vfid)
3070{
3071 u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
3072
3073 if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3074 DP_VERBOSE(p_hwfn,
3075 QED_MSG_IOV,
3076 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
3077 abs_vfid);
3078 return NULL;
3079 }
3080
3081 return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
3082}
3083
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003084static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
3085 u16 abs_vfid, struct regpair *vf_msg)
3086{
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003087 struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003088 abs_vfid);
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003089
3090 if (!p_vf)
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003091 return 0;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003092
3093 /* List the physical address of the request so that handler
3094 * could later on copy the message from it.
3095 */
3096 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3097
3098 /* Mark the event and schedule the workqueue */
3099 qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
3100 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
3101
3102 return 0;
3103}
3104
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003105static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
3106 struct malicious_vf_eqe_data *p_data)
3107{
3108 struct qed_vf_info *p_vf;
3109
3110 p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
3111
3112 if (!p_vf)
3113 return;
3114
3115 DP_INFO(p_hwfn,
3116 "VF [%d] - Malicious behavior [%02x]\n",
3117 p_vf->abs_vf_id, p_data->err_id);
3118
3119 p_vf->b_malicious = true;
3120}
3121
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003122int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
3123 u8 opcode, __le16 echo, union event_ring_data *data)
3124{
3125 switch (opcode) {
3126 case COMMON_EVENT_VF_PF_CHANNEL:
3127 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
3128 &data->vf_pf_channel.msg_addr);
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003129 case COMMON_EVENT_MALICIOUS_VF:
3130 qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
3131 return 0;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003132 default:
3133 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
3134 opcode);
3135 return -EINVAL;
3136 }
3137}
3138
Yuval Mintz32a47e72016-05-11 16:36:12 +03003139u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
3140{
3141 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
3142 u16 i;
3143
3144 if (!p_iov)
3145 goto out;
3146
3147 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003148 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
Yuval Mintz32a47e72016-05-11 16:36:12 +03003149 return i;
3150
3151out:
3152 return MAX_NUM_VFS;
3153}
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003154
3155static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
3156 int vfid)
3157{
3158 struct qed_dmae_params params;
3159 struct qed_vf_info *vf_info;
3160
3161 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3162 if (!vf_info)
3163 return -EINVAL;
3164
3165 memset(&params, 0, sizeof(struct qed_dmae_params));
3166 params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
3167 params.src_vfid = vf_info->abs_vf_id;
3168
3169 if (qed_dmae_host2host(p_hwfn, ptt,
3170 vf_info->vf_mbx.pending_req,
3171 vf_info->vf_mbx.req_phys,
3172 sizeof(union vfpf_tlvs) / 4, &params)) {
3173 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3174 "Failed to copy message from VF 0x%02x\n", vfid);
3175
3176 return -EIO;
3177 }
3178
3179 return 0;
3180}
3181
Yuval Mintzeff16962016-05-11 16:36:21 +03003182static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
3183 u8 *mac, int vfid)
3184{
3185 struct qed_vf_info *vf_info;
3186 u64 feature;
3187
3188 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3189 if (!vf_info) {
3190 DP_NOTICE(p_hwfn->cdev,
3191 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3192 return;
3193 }
3194
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003195 if (vf_info->b_malicious) {
3196 DP_NOTICE(p_hwfn->cdev,
3197 "Can't set forced MAC to malicious VF [%d]\n", vfid);
3198 return;
3199 }
3200
Yuval Mintzeff16962016-05-11 16:36:21 +03003201 feature = 1 << MAC_ADDR_FORCED;
3202 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3203
3204 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3205 /* Forced MAC will disable MAC_ADDR */
Yuval Mintz1a635e42016-08-15 10:42:43 +03003206 vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
Yuval Mintzeff16962016-05-11 16:36:21 +03003207
3208 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3209}
3210
Baoyou Xieba569472016-09-09 09:21:15 +08003211static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
3212 u16 pvid, int vfid)
Yuval Mintz08feecd2016-05-11 16:36:20 +03003213{
3214 struct qed_vf_info *vf_info;
3215 u64 feature;
3216
3217 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3218 if (!vf_info) {
3219 DP_NOTICE(p_hwfn->cdev,
3220 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3221 return;
3222 }
3223
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003224 if (vf_info->b_malicious) {
3225 DP_NOTICE(p_hwfn->cdev,
3226 "Can't set forced vlan to malicious VF [%d]\n", vfid);
3227 return;
3228 }
3229
Yuval Mintz08feecd2016-05-11 16:36:20 +03003230 feature = 1 << VLAN_ADDR_FORCED;
3231 vf_info->bulletin.p_virt->pvid = pvid;
3232 if (pvid)
3233 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3234 else
3235 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
3236
3237 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3238}
3239
Yuval Mintz6ddc7602016-05-11 16:36:23 +03003240static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
3241{
3242 struct qed_vf_info *p_vf_info;
3243
3244 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3245 if (!p_vf_info)
3246 return false;
3247
3248 return !!p_vf_info->vport_instance;
3249}
3250
Baoyou Xieba569472016-09-09 09:21:15 +08003251static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
Yuval Mintz0b55e272016-05-11 16:36:15 +03003252{
3253 struct qed_vf_info *p_vf_info;
3254
3255 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3256 if (!p_vf_info)
3257 return true;
3258
3259 return p_vf_info->state == VF_STOPPED;
3260}
3261
Yuval Mintz73390ac2016-05-11 16:36:24 +03003262static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
3263{
3264 struct qed_vf_info *vf_info;
3265
3266 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3267 if (!vf_info)
3268 return false;
3269
3270 return vf_info->spoof_chk;
3271}
3272
Baoyou Xieba569472016-09-09 09:21:15 +08003273static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
Yuval Mintz6ddc7602016-05-11 16:36:23 +03003274{
3275 struct qed_vf_info *vf;
3276 int rc = -EINVAL;
3277
3278 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3279 DP_NOTICE(p_hwfn,
3280 "SR-IOV sanity check failed, can't set spoofchk\n");
3281 goto out;
3282 }
3283
3284 vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3285 if (!vf)
3286 goto out;
3287
3288 if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
3289 /* After VF VPORT start PF will configure spoof check */
3290 vf->req_spoofchk_val = val;
3291 rc = 0;
3292 goto out;
3293 }
3294
3295 rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
3296
3297out:
3298 return rc;
3299}
3300
Yuval Mintzeff16962016-05-11 16:36:21 +03003301static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
3302 u16 rel_vf_id)
3303{
3304 struct qed_vf_info *p_vf;
3305
3306 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3307 if (!p_vf || !p_vf->bulletin.p_virt)
3308 return NULL;
3309
Yuval Mintz1a635e42016-08-15 10:42:43 +03003310 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
Yuval Mintzeff16962016-05-11 16:36:21 +03003311 return NULL;
3312
3313 return p_vf->bulletin.p_virt->mac;
3314}
3315
Baoyou Xieba569472016-09-09 09:21:15 +08003316static u16
3317qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
Yuval Mintz08feecd2016-05-11 16:36:20 +03003318{
3319 struct qed_vf_info *p_vf;
3320
3321 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3322 if (!p_vf || !p_vf->bulletin.p_virt)
3323 return 0;
3324
Yuval Mintz1a635e42016-08-15 10:42:43 +03003325 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
Yuval Mintz08feecd2016-05-11 16:36:20 +03003326 return 0;
3327
3328 return p_vf->bulletin.p_virt->pvid;
3329}
3330
Yuval Mintz733def62016-05-11 16:36:22 +03003331static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
3332 struct qed_ptt *p_ptt, int vfid, int val)
3333{
3334 struct qed_vf_info *vf;
3335 u8 abs_vp_id = 0;
3336 int rc;
3337
3338 vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3339 if (!vf)
3340 return -EINVAL;
3341
3342 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
3343 if (rc)
3344 return rc;
3345
3346 return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
3347}
3348
Baoyou Xieba569472016-09-09 09:21:15 +08003349static int
3350qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
Yuval Mintz733def62016-05-11 16:36:22 +03003351{
3352 struct qed_vf_info *vf;
3353 u8 vport_id;
3354 int i;
3355
3356 for_each_hwfn(cdev, i) {
3357 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3358
3359 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3360 DP_NOTICE(p_hwfn,
3361 "SR-IOV sanity check failed, can't set min rate\n");
3362 return -EINVAL;
3363 }
3364 }
3365
3366 vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
3367 vport_id = vf->vport_id;
3368
3369 return qed_configure_vport_wfq(cdev, vport_id, rate);
3370}
3371
Yuval Mintz73390ac2016-05-11 16:36:24 +03003372static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
3373{
3374 struct qed_wfq_data *vf_vp_wfq;
3375 struct qed_vf_info *vf_info;
3376
3377 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3378 if (!vf_info)
3379 return 0;
3380
3381 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
3382
3383 if (vf_vp_wfq->configured)
3384 return vf_vp_wfq->min_speed;
3385 else
3386 return 0;
3387}
3388
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003389/**
3390 * qed_schedule_iov - schedules IOV task for VF and PF
3391 * @hwfn: hardware function pointer
3392 * @flag: IOV flag for VF/PF
3393 */
3394void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
3395{
3396 smp_mb__before_atomic();
3397 set_bit(flag, &hwfn->iov_task_flags);
3398 smp_mb__after_atomic();
3399 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3400 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
3401}
3402
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03003403void qed_vf_start_iov_wq(struct qed_dev *cdev)
3404{
3405 int i;
3406
3407 for_each_hwfn(cdev, i)
3408 queue_delayed_work(cdev->hwfns[i].iov_wq,
3409 &cdev->hwfns[i].iov_task, 0);
3410}
3411
Yuval Mintz0b55e272016-05-11 16:36:15 +03003412int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
3413{
3414 int i, j;
3415
3416 for_each_hwfn(cdev, i)
3417 if (cdev->hwfns[i].iov_wq)
3418 flush_workqueue(cdev->hwfns[i].iov_wq);
3419
3420 /* Mark VFs for disablement */
3421 qed_iov_set_vfs_to_disable(cdev, true);
3422
3423 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
3424 pci_disable_sriov(cdev->pdev);
3425
3426 for_each_hwfn(cdev, i) {
3427 struct qed_hwfn *hwfn = &cdev->hwfns[i];
3428 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3429
3430 /* Failure to acquire the ptt in 100g creates an odd error
3431 * where the first engine has already relased IOV.
3432 */
3433 if (!ptt) {
3434 DP_ERR(hwfn, "Failed to acquire ptt\n");
3435 return -EBUSY;
3436 }
3437
Yuval Mintz733def62016-05-11 16:36:22 +03003438 /* Clean WFQ db and configure equal weight for all vports */
3439 qed_clean_wfq_db(hwfn, ptt);
3440
Yuval Mintz0b55e272016-05-11 16:36:15 +03003441 qed_for_each_vf(hwfn, j) {
3442 int k;
3443
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003444 if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
Yuval Mintz0b55e272016-05-11 16:36:15 +03003445 continue;
3446
3447 /* Wait until VF is disabled before releasing */
3448 for (k = 0; k < 100; k++) {
3449 if (!qed_iov_is_vf_stopped(hwfn, j))
3450 msleep(20);
3451 else
3452 break;
3453 }
3454
3455 if (k < 100)
3456 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
3457 ptt, j);
3458 else
3459 DP_ERR(hwfn,
3460 "Timeout waiting for VF's FLR to end\n");
3461 }
3462
3463 qed_ptt_release(hwfn, ptt);
3464 }
3465
3466 qed_iov_set_vfs_to_disable(cdev, false);
3467
3468 return 0;
3469}
3470
3471static int qed_sriov_enable(struct qed_dev *cdev, int num)
3472{
Yuval Mintz0b55e272016-05-11 16:36:15 +03003473 int i, j, rc;
3474
3475 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
3476 DP_NOTICE(cdev, "Can start at most %d VFs\n",
3477 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
3478 return -EINVAL;
3479 }
3480
3481 /* Initialize HW for VF access */
3482 for_each_hwfn(cdev, j) {
3483 struct qed_hwfn *hwfn = &cdev->hwfns[j];
3484 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
Mintz, Yuval5a1f9652016-10-31 07:14:26 +02003485 int num_queues;
3486
3487 /* Make sure not to use more than 16 queues per VF */
3488 num_queues = min_t(int,
3489 FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, 16);
Yuval Mintz0b55e272016-05-11 16:36:15 +03003490
3491 if (!ptt) {
3492 DP_ERR(hwfn, "Failed to acquire ptt\n");
3493 rc = -EBUSY;
3494 goto err;
3495 }
3496
Yuval Mintz0b55e272016-05-11 16:36:15 +03003497 for (i = 0; i < num; i++) {
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003498 if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
Yuval Mintz0b55e272016-05-11 16:36:15 +03003499 continue;
3500
Mintz, Yuval5a1f9652016-10-31 07:14:26 +02003501 rc = qed_iov_init_hw_for_vf(hwfn, ptt, i, num_queues);
Yuval Mintz0b55e272016-05-11 16:36:15 +03003502 if (rc) {
3503 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
3504 qed_ptt_release(hwfn, ptt);
3505 goto err;
3506 }
3507 }
3508
3509 qed_ptt_release(hwfn, ptt);
3510 }
3511
3512 /* Enable SRIOV PCIe functions */
3513 rc = pci_enable_sriov(cdev->pdev, num);
3514 if (rc) {
3515 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
3516 goto err;
3517 }
3518
3519 return num;
3520
3521err:
3522 qed_sriov_disable(cdev, false);
3523 return rc;
3524}
3525
3526static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
3527{
3528 if (!IS_QED_SRIOV(cdev)) {
3529 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
3530 return -EOPNOTSUPP;
3531 }
3532
3533 if (num_vfs_param)
3534 return qed_sriov_enable(cdev, num_vfs_param);
3535 else
3536 return qed_sriov_disable(cdev, true);
3537}
3538
Yuval Mintzeff16962016-05-11 16:36:21 +03003539static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
3540{
3541 int i;
3542
3543 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
3544 DP_VERBOSE(cdev, QED_MSG_IOV,
3545 "Cannot set a VF MAC; Sriov is not enabled\n");
3546 return -EINVAL;
3547 }
3548
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003549 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
Yuval Mintzeff16962016-05-11 16:36:21 +03003550 DP_VERBOSE(cdev, QED_MSG_IOV,
3551 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
3552 return -EINVAL;
3553 }
3554
3555 for_each_hwfn(cdev, i) {
3556 struct qed_hwfn *hwfn = &cdev->hwfns[i];
3557 struct qed_public_vf_info *vf_info;
3558
3559 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3560 if (!vf_info)
3561 continue;
3562
3563 /* Set the forced MAC, and schedule the IOV task */
3564 ether_addr_copy(vf_info->forced_mac, mac);
3565 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
3566 }
3567
3568 return 0;
3569}
3570
Yuval Mintz08feecd2016-05-11 16:36:20 +03003571static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
3572{
3573 int i;
3574
3575 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
3576 DP_VERBOSE(cdev, QED_MSG_IOV,
3577 "Cannot set a VF MAC; Sriov is not enabled\n");
3578 return -EINVAL;
3579 }
3580
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003581 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
Yuval Mintz08feecd2016-05-11 16:36:20 +03003582 DP_VERBOSE(cdev, QED_MSG_IOV,
3583 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
3584 return -EINVAL;
3585 }
3586
3587 for_each_hwfn(cdev, i) {
3588 struct qed_hwfn *hwfn = &cdev->hwfns[i];
3589 struct qed_public_vf_info *vf_info;
3590
3591 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3592 if (!vf_info)
3593 continue;
3594
3595 /* Set the forced vlan, and schedule the IOV task */
3596 vf_info->forced_vlan = vid;
3597 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
3598 }
3599
3600 return 0;
3601}
3602
Yuval Mintz73390ac2016-05-11 16:36:24 +03003603static int qed_get_vf_config(struct qed_dev *cdev,
3604 int vf_id, struct ifla_vf_info *ivi)
3605{
3606 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
3607 struct qed_public_vf_info *vf_info;
3608 struct qed_mcp_link_state link;
3609 u32 tx_rate;
3610
3611 /* Sanitize request */
3612 if (IS_VF(cdev))
3613 return -EINVAL;
3614
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003615 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
Yuval Mintz73390ac2016-05-11 16:36:24 +03003616 DP_VERBOSE(cdev, QED_MSG_IOV,
3617 "VF index [%d] isn't active\n", vf_id);
3618 return -EINVAL;
3619 }
3620
3621 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
3622
3623 qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
3624
3625 /* Fill information about VF */
3626 ivi->vf = vf_id;
3627
3628 if (is_valid_ether_addr(vf_info->forced_mac))
3629 ether_addr_copy(ivi->mac, vf_info->forced_mac);
3630 else
3631 ether_addr_copy(ivi->mac, vf_info->mac);
3632
3633 ivi->vlan = vf_info->forced_vlan;
3634 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
3635 ivi->linkstate = vf_info->link_state;
3636 tx_rate = vf_info->tx_rate;
3637 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
3638 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
3639
3640 return 0;
3641}
3642
Yuval Mintz36558c32016-05-11 16:36:17 +03003643void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
3644{
3645 struct qed_mcp_link_capabilities caps;
3646 struct qed_mcp_link_params params;
3647 struct qed_mcp_link_state link;
3648 int i;
3649
3650 if (!hwfn->pf_iov_info)
3651 return;
3652
3653 /* Update bulletin of all future possible VFs with link configuration */
3654 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
Yuval Mintz733def62016-05-11 16:36:22 +03003655 struct qed_public_vf_info *vf_info;
3656
3657 vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
3658 if (!vf_info)
3659 continue;
3660
Yuval Mintz36558c32016-05-11 16:36:17 +03003661 memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
3662 memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
3663 memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
3664 sizeof(caps));
3665
Yuval Mintz733def62016-05-11 16:36:22 +03003666 /* Modify link according to the VF's configured link state */
3667 switch (vf_info->link_state) {
3668 case IFLA_VF_LINK_STATE_DISABLE:
3669 link.link_up = false;
3670 break;
3671 case IFLA_VF_LINK_STATE_ENABLE:
3672 link.link_up = true;
3673 /* Set speed according to maximum supported by HW.
3674 * that is 40G for regular devices and 100G for CMT
3675 * mode devices.
3676 */
3677 link.speed = (hwfn->cdev->num_hwfns > 1) ?
3678 100000 : 40000;
3679 default:
3680 /* In auto mode pass PF link image to VF */
3681 break;
3682 }
3683
3684 if (link.link_up && vf_info->tx_rate) {
3685 struct qed_ptt *ptt;
3686 int rate;
3687
3688 rate = min_t(int, vf_info->tx_rate, link.speed);
3689
3690 ptt = qed_ptt_acquire(hwfn);
3691 if (!ptt) {
3692 DP_NOTICE(hwfn, "Failed to acquire PTT\n");
3693 return;
3694 }
3695
3696 if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
3697 vf_info->tx_rate = rate;
3698 link.speed = rate;
3699 }
3700
3701 qed_ptt_release(hwfn, ptt);
3702 }
3703
Yuval Mintz36558c32016-05-11 16:36:17 +03003704 qed_iov_set_link(hwfn, i, &params, &link, &caps);
3705 }
3706
3707 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3708}
3709
Yuval Mintz733def62016-05-11 16:36:22 +03003710static int qed_set_vf_link_state(struct qed_dev *cdev,
3711 int vf_id, int link_state)
3712{
3713 int i;
3714
3715 /* Sanitize request */
3716 if (IS_VF(cdev))
3717 return -EINVAL;
3718
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003719 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
Yuval Mintz733def62016-05-11 16:36:22 +03003720 DP_VERBOSE(cdev, QED_MSG_IOV,
3721 "VF index [%d] isn't active\n", vf_id);
3722 return -EINVAL;
3723 }
3724
3725 /* Handle configuration of link state */
3726 for_each_hwfn(cdev, i) {
3727 struct qed_hwfn *hwfn = &cdev->hwfns[i];
3728 struct qed_public_vf_info *vf;
3729
3730 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
3731 if (!vf)
3732 continue;
3733
3734 if (vf->link_state == link_state)
3735 continue;
3736
3737 vf->link_state = link_state;
3738 qed_inform_vf_link_state(&cdev->hwfns[i]);
3739 }
3740
3741 return 0;
3742}
3743
Yuval Mintz6ddc7602016-05-11 16:36:23 +03003744static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
3745{
3746 int i, rc = -EINVAL;
3747
3748 for_each_hwfn(cdev, i) {
3749 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3750
3751 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
3752 if (rc)
3753 break;
3754 }
3755
3756 return rc;
3757}
3758
Yuval Mintz733def62016-05-11 16:36:22 +03003759static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
3760{
3761 int i;
3762
3763 for_each_hwfn(cdev, i) {
3764 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3765 struct qed_public_vf_info *vf;
3766
3767 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3768 DP_NOTICE(p_hwfn,
3769 "SR-IOV sanity check failed, can't set tx rate\n");
3770 return -EINVAL;
3771 }
3772
3773 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
3774
3775 vf->tx_rate = rate;
3776
3777 qed_inform_vf_link_state(p_hwfn);
3778 }
3779
3780 return 0;
3781}
3782
3783static int qed_set_vf_rate(struct qed_dev *cdev,
3784 int vfid, u32 min_rate, u32 max_rate)
3785{
3786 int rc_min = 0, rc_max = 0;
3787
3788 if (max_rate)
3789 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
3790
3791 if (min_rate)
3792 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
3793
3794 if (rc_max | rc_min)
3795 return -EINVAL;
3796
3797 return 0;
3798}
3799
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003800static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
3801{
3802 u64 events[QED_VF_ARRAY_LENGTH];
3803 struct qed_ptt *ptt;
3804 int i;
3805
3806 ptt = qed_ptt_acquire(hwfn);
3807 if (!ptt) {
3808 DP_VERBOSE(hwfn, QED_MSG_IOV,
3809 "Can't acquire PTT; re-scheduling\n");
3810 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
3811 return;
3812 }
3813
3814 qed_iov_pf_get_and_clear_pending_events(hwfn, events);
3815
3816 DP_VERBOSE(hwfn, QED_MSG_IOV,
3817 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
3818 events[0], events[1], events[2]);
3819
3820 qed_for_each_vf(hwfn, i) {
3821 /* Skip VFs with no pending messages */
3822 if (!(events[i / 64] & (1ULL << (i % 64))))
3823 continue;
3824
3825 DP_VERBOSE(hwfn, QED_MSG_IOV,
3826 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
3827 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
3828
3829 /* Copy VF's message to PF's request buffer for that VF */
3830 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
3831 continue;
3832
3833 qed_iov_process_mbx_req(hwfn, ptt, i);
3834 }
3835
3836 qed_ptt_release(hwfn, ptt);
3837}
3838
Yuval Mintz08feecd2016-05-11 16:36:20 +03003839static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
3840{
3841 int i;
3842
3843 qed_for_each_vf(hwfn, i) {
3844 struct qed_public_vf_info *info;
3845 bool update = false;
Yuval Mintzeff16962016-05-11 16:36:21 +03003846 u8 *mac;
Yuval Mintz08feecd2016-05-11 16:36:20 +03003847
3848 info = qed_iov_get_public_vf_info(hwfn, i, true);
3849 if (!info)
3850 continue;
3851
3852 /* Update data on bulletin board */
Yuval Mintzeff16962016-05-11 16:36:21 +03003853 mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
3854 if (is_valid_ether_addr(info->forced_mac) &&
3855 (!mac || !ether_addr_equal(mac, info->forced_mac))) {
3856 DP_VERBOSE(hwfn,
3857 QED_MSG_IOV,
3858 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
3859 i,
3860 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
3861
3862 /* Update bulletin board with forced MAC */
3863 qed_iov_bulletin_set_forced_mac(hwfn,
3864 info->forced_mac, i);
3865 update = true;
3866 }
Yuval Mintz08feecd2016-05-11 16:36:20 +03003867
3868 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
3869 info->forced_vlan) {
3870 DP_VERBOSE(hwfn,
3871 QED_MSG_IOV,
3872 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
3873 info->forced_vlan,
3874 i,
3875 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
3876 qed_iov_bulletin_set_forced_vlan(hwfn,
3877 info->forced_vlan, i);
3878 update = true;
3879 }
3880
3881 if (update)
3882 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3883 }
3884}
3885
Yuval Mintz36558c32016-05-11 16:36:17 +03003886static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
3887{
3888 struct qed_ptt *ptt;
3889 int i;
3890
3891 ptt = qed_ptt_acquire(hwfn);
3892 if (!ptt) {
3893 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
3894 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3895 return;
3896 }
3897
3898 qed_for_each_vf(hwfn, i)
3899 qed_iov_post_vf_bulletin(hwfn, i, ptt);
3900
3901 qed_ptt_release(hwfn, ptt);
3902}
3903
Baoyou Xieba569472016-09-09 09:21:15 +08003904static void qed_iov_pf_task(struct work_struct *work)
3905
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003906{
3907 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
3908 iov_task.work);
Yuval Mintz0b55e272016-05-11 16:36:15 +03003909 int rc;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003910
3911 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
3912 return;
3913
Yuval Mintz0b55e272016-05-11 16:36:15 +03003914 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
3915 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3916
3917 if (!ptt) {
3918 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
3919 return;
3920 }
3921
3922 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
3923 if (rc)
3924 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
3925
3926 qed_ptt_release(hwfn, ptt);
3927 }
3928
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003929 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
3930 qed_handle_vf_msg(hwfn);
Yuval Mintz08feecd2016-05-11 16:36:20 +03003931
3932 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
3933 &hwfn->iov_task_flags))
3934 qed_handle_pf_set_vf_unicast(hwfn);
3935
Yuval Mintz36558c32016-05-11 16:36:17 +03003936 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
3937 &hwfn->iov_task_flags))
3938 qed_handle_bulletin_post(hwfn);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003939}
3940
3941void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
3942{
3943 int i;
3944
3945 for_each_hwfn(cdev, i) {
3946 if (!cdev->hwfns[i].iov_wq)
3947 continue;
3948
3949 if (schedule_first) {
3950 qed_schedule_iov(&cdev->hwfns[i],
3951 QED_IOV_WQ_STOP_WQ_FLAG);
3952 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
3953 }
3954
3955 flush_workqueue(cdev->hwfns[i].iov_wq);
3956 destroy_workqueue(cdev->hwfns[i].iov_wq);
3957 }
3958}
3959
3960int qed_iov_wq_start(struct qed_dev *cdev)
3961{
3962 char name[NAME_SIZE];
3963 int i;
3964
3965 for_each_hwfn(cdev, i) {
3966 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3967
Yuval Mintz36558c32016-05-11 16:36:17 +03003968 /* PFs needs a dedicated workqueue only if they support IOV.
3969 * VFs always require one.
3970 */
3971 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003972 continue;
3973
3974 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
3975 cdev->pdev->bus->number,
3976 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
3977
3978 p_hwfn->iov_wq = create_singlethread_workqueue(name);
3979 if (!p_hwfn->iov_wq) {
3980 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
3981 return -ENOMEM;
3982 }
3983
Yuval Mintz36558c32016-05-11 16:36:17 +03003984 if (IS_PF(cdev))
3985 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
3986 else
3987 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003988 }
3989
3990 return 0;
3991}
Yuval Mintz0b55e272016-05-11 16:36:15 +03003992
3993const struct qed_iov_hv_ops qed_iov_ops_pass = {
3994 .configure = &qed_sriov_configure,
Yuval Mintzeff16962016-05-11 16:36:21 +03003995 .set_mac = &qed_sriov_pf_set_mac,
Yuval Mintz08feecd2016-05-11 16:36:20 +03003996 .set_vlan = &qed_sriov_pf_set_vlan,
Yuval Mintz73390ac2016-05-11 16:36:24 +03003997 .get_config = &qed_get_vf_config,
Yuval Mintz733def62016-05-11 16:36:22 +03003998 .set_link_state = &qed_set_vf_link_state,
Yuval Mintz6ddc7602016-05-11 16:36:23 +03003999 .set_spoof = &qed_spoof_configure,
Yuval Mintz733def62016-05-11 16:36:22 +03004000 .set_rate = &qed_set_vf_rate,
Yuval Mintz0b55e272016-05-11 16:36:15 +03004001};