blob: cb68674640f9fdd8af135ef148f7fd65ac3e1455 [file] [log] [blame]
Yuval Mintz32a47e72016-05-11 16:36:12 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
Yuval Mintzdacd88d2016-05-11 16:36:16 +03009#include <linux/etherdevice.h>
Yuval Mintz36558c32016-05-11 16:36:17 +030010#include <linux/crc32.h>
Yuval Mintz0b55e272016-05-11 16:36:15 +030011#include <linux/qed/qed_iov_if.h>
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030012#include "qed_cxt.h"
13#include "qed_hsi.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030014#include "qed_hw.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030015#include "qed_init_ops.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030016#include "qed_int.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030017#include "qed_mcp.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030018#include "qed_reg_addr.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030019#include "qed_sp.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030020#include "qed_sriov.h"
21#include "qed_vf.h"
22
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030023/* IOV ramrods */
Yuval Mintz1fe614d2016-06-05 13:11:11 +030024static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030025{
26 struct vf_start_ramrod_data *p_ramrod = NULL;
27 struct qed_spq_entry *p_ent = NULL;
28 struct qed_sp_init_data init_data;
29 int rc = -EINVAL;
Yuval Mintz1fe614d2016-06-05 13:11:11 +030030 u8 fp_minor;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030031
32 /* Get SPQ entry */
33 memset(&init_data, 0, sizeof(init_data));
34 init_data.cid = qed_spq_get_cid(p_hwfn);
Yuval Mintz1fe614d2016-06-05 13:11:11 +030035 init_data.opaque_fid = p_vf->opaque_fid;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030036 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
37
38 rc = qed_sp_init_request(p_hwfn, &p_ent,
39 COMMON_RAMROD_VF_START,
40 PROTOCOLID_COMMON, &init_data);
41 if (rc)
42 return rc;
43
44 p_ramrod = &p_ent->ramrod.vf_start;
45
Yuval Mintz1fe614d2016-06-05 13:11:11 +030046 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
47 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030048
Yuval Mintz1fe614d2016-06-05 13:11:11 +030049 switch (p_hwfn->hw_info.personality) {
50 case QED_PCI_ETH:
51 p_ramrod->personality = PERSONALITY_ETH;
52 break;
53 case QED_PCI_ETH_ROCE:
54 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
55 break;
56 default:
57 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
58 p_hwfn->hw_info.personality);
59 return -EINVAL;
60 }
61
62 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
Yuval Mintza044df82016-08-22 13:25:09 +030063 if (fp_minor > ETH_HSI_VER_MINOR &&
64 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
Yuval Mintz1fe614d2016-06-05 13:11:11 +030065 DP_VERBOSE(p_hwfn,
66 QED_MSG_IOV,
67 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
68 p_vf->abs_vf_id,
69 ETH_HSI_VER_MAJOR,
70 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
71 fp_minor = ETH_HSI_VER_MINOR;
72 }
73
Yuval Mintz351a4ded2016-06-02 10:23:29 +030074 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
Yuval Mintz1fe614d2016-06-05 13:11:11 +030075 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
76
77 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
78 "VF[%d] - Starting using HSI %02x.%02x\n",
79 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030080
81 return qed_spq_post(p_hwfn, p_ent, NULL);
82}
83
Yuval Mintz0b55e272016-05-11 16:36:15 +030084static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
85 u32 concrete_vfid, u16 opaque_vfid)
86{
87 struct vf_stop_ramrod_data *p_ramrod = NULL;
88 struct qed_spq_entry *p_ent = NULL;
89 struct qed_sp_init_data init_data;
90 int rc = -EINVAL;
91
92 /* Get SPQ entry */
93 memset(&init_data, 0, sizeof(init_data));
94 init_data.cid = qed_spq_get_cid(p_hwfn);
95 init_data.opaque_fid = opaque_vfid;
96 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
97
98 rc = qed_sp_init_request(p_hwfn, &p_ent,
99 COMMON_RAMROD_VF_STOP,
100 PROTOCOLID_COMMON, &init_data);
101 if (rc)
102 return rc;
103
104 p_ramrod = &p_ent->ramrod.vf_stop;
105
106 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
107
108 return qed_spq_post(p_hwfn, p_ent, NULL);
109}
110
Yuval Mintz32a47e72016-05-11 16:36:12 +0300111bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
112 int rel_vf_id, bool b_enabled_only)
113{
114 if (!p_hwfn->pf_iov_info) {
115 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
116 return false;
117 }
118
119 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
120 (rel_vf_id < 0))
121 return false;
122
123 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
124 b_enabled_only)
125 return false;
126
127 return true;
128}
129
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300130static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
131 u16 relative_vf_id,
132 bool b_enabled_only)
133{
134 struct qed_vf_info *vf = NULL;
135
136 if (!p_hwfn->pf_iov_info) {
137 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
138 return NULL;
139 }
140
141 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
142 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
143 else
144 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
145 relative_vf_id);
146
147 return vf;
148}
149
Yuval Mintz41086462016-06-05 13:11:13 +0300150static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
151 struct qed_vf_info *p_vf, u16 rx_qid)
152{
153 if (rx_qid >= p_vf->num_rxqs)
154 DP_VERBOSE(p_hwfn,
155 QED_MSG_IOV,
156 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
157 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
158 return rx_qid < p_vf->num_rxqs;
159}
160
161static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
162 struct qed_vf_info *p_vf, u16 tx_qid)
163{
164 if (tx_qid >= p_vf->num_txqs)
165 DP_VERBOSE(p_hwfn,
166 QED_MSG_IOV,
167 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
168 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
169 return tx_qid < p_vf->num_txqs;
170}
171
172static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
173 struct qed_vf_info *p_vf, u16 sb_idx)
174{
175 int i;
176
177 for (i = 0; i < p_vf->num_sbs; i++)
178 if (p_vf->igu_sbs[i] == sb_idx)
179 return true;
180
181 DP_VERBOSE(p_hwfn,
182 QED_MSG_IOV,
183 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
184 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
185
186 return false;
187}
188
Yuval Mintz36558c32016-05-11 16:36:17 +0300189int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
190 int vfid, struct qed_ptt *p_ptt)
191{
192 struct qed_bulletin_content *p_bulletin;
193 int crc_size = sizeof(p_bulletin->crc);
194 struct qed_dmae_params params;
195 struct qed_vf_info *p_vf;
196
197 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
198 if (!p_vf)
199 return -EINVAL;
200
201 if (!p_vf->vf_bulletin)
202 return -EINVAL;
203
204 p_bulletin = p_vf->bulletin.p_virt;
205
206 /* Increment bulletin board version and compute crc */
207 p_bulletin->version++;
208 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
209 p_vf->bulletin.size - crc_size);
210
211 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
212 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
213 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
214
215 /* propagate bulletin board via dmae to vm memory */
216 memset(&params, 0, sizeof(params));
217 params.flags = QED_DMAE_FLAG_VF_DST;
218 params.dst_vfid = p_vf->abs_vf_id;
219 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
220 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
221 &params);
222}
223
Yuval Mintz32a47e72016-05-11 16:36:12 +0300224static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
225{
226 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
227 int pos = iov->pos;
228
229 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
230 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
231
232 pci_read_config_word(cdev->pdev,
233 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
234 pci_read_config_word(cdev->pdev,
235 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
236
237 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
238 if (iov->num_vfs) {
239 DP_VERBOSE(cdev,
240 QED_MSG_IOV,
241 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
242 iov->num_vfs = 0;
243 }
244
245 pci_read_config_word(cdev->pdev,
246 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
247
248 pci_read_config_word(cdev->pdev,
249 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
250
251 pci_read_config_word(cdev->pdev,
252 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
253
254 pci_read_config_dword(cdev->pdev,
255 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
256
257 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
258
259 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
260
261 DP_VERBOSE(cdev,
262 QED_MSG_IOV,
263 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
264 iov->nres,
265 iov->cap,
266 iov->ctrl,
267 iov->total_vfs,
268 iov->initial_vfs,
269 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
270
271 /* Some sanity checks */
272 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
273 iov->total_vfs > NUM_OF_VFS(cdev)) {
274 /* This can happen only due to a bug. In this case we set
275 * num_vfs to zero to avoid memory corruption in the code that
276 * assumes max number of vfs
277 */
278 DP_NOTICE(cdev,
279 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
280 iov->num_vfs);
281
282 iov->num_vfs = 0;
283 iov->total_vfs = 0;
284 }
285
286 return 0;
287}
288
289static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
290 struct qed_ptt *p_ptt)
291{
292 struct qed_igu_block *p_sb;
293 u16 sb_id;
294 u32 val;
295
296 if (!p_hwfn->hw_info.p_igu_info) {
297 DP_ERR(p_hwfn,
298 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
299 return;
300 }
301
302 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
303 sb_id++) {
304 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
305 if ((p_sb->status & QED_IGU_STATUS_FREE) &&
306 !(p_sb->status & QED_IGU_STATUS_PF)) {
307 val = qed_rd(p_hwfn, p_ptt,
308 IGU_REG_MAPPING_MEMORY + sb_id * 4);
309 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
310 qed_wr(p_hwfn, p_ptt,
311 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
312 }
313 }
314}
315
316static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
317{
318 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
319 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
320 struct qed_bulletin_content *p_bulletin_virt;
321 dma_addr_t req_p, rply_p, bulletin_p;
322 union pfvf_tlvs *p_reply_virt_addr;
323 union vfpf_tlvs *p_req_virt_addr;
324 u8 idx = 0;
325
326 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
327
328 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
329 req_p = p_iov_info->mbx_msg_phys_addr;
330 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
331 rply_p = p_iov_info->mbx_reply_phys_addr;
332 p_bulletin_virt = p_iov_info->p_bulletins;
333 bulletin_p = p_iov_info->bulletins_phys;
334 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
335 DP_ERR(p_hwfn,
336 "qed_iov_setup_vfdb called without allocating mem first\n");
337 return;
338 }
339
340 for (idx = 0; idx < p_iov->total_vfs; idx++) {
341 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
342 u32 concrete;
343
344 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
345 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
346 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
347 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
348
349 vf->state = VF_STOPPED;
350 vf->b_init = false;
351
352 vf->bulletin.phys = idx *
353 sizeof(struct qed_bulletin_content) +
354 bulletin_p;
355 vf->bulletin.p_virt = p_bulletin_virt + idx;
356 vf->bulletin.size = sizeof(struct qed_bulletin_content);
357
358 vf->relative_vf_id = idx;
359 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
360 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
361 vf->concrete_fid = concrete;
362 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
363 (vf->abs_vf_id << 8);
364 vf->vport_id = idx + 1;
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +0300365
366 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
367 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300368 }
369}
370
371static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
372{
373 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
374 void **p_v_addr;
375 u16 num_vfs = 0;
376
377 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
378
379 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
380 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
381
382 /* Allocate PF Mailbox buffer (per-VF) */
383 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
384 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
385 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
386 p_iov_info->mbx_msg_size,
387 &p_iov_info->mbx_msg_phys_addr,
388 GFP_KERNEL);
389 if (!*p_v_addr)
390 return -ENOMEM;
391
392 /* Allocate PF Mailbox Reply buffer (per-VF) */
393 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
394 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
395 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
396 p_iov_info->mbx_reply_size,
397 &p_iov_info->mbx_reply_phys_addr,
398 GFP_KERNEL);
399 if (!*p_v_addr)
400 return -ENOMEM;
401
402 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
403 num_vfs;
404 p_v_addr = &p_iov_info->p_bulletins;
405 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
406 p_iov_info->bulletins_size,
407 &p_iov_info->bulletins_phys,
408 GFP_KERNEL);
409 if (!*p_v_addr)
410 return -ENOMEM;
411
412 DP_VERBOSE(p_hwfn,
413 QED_MSG_IOV,
414 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
415 p_iov_info->mbx_msg_virt_addr,
416 (u64) p_iov_info->mbx_msg_phys_addr,
417 p_iov_info->mbx_reply_virt_addr,
418 (u64) p_iov_info->mbx_reply_phys_addr,
419 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
420
421 return 0;
422}
423
424static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
425{
426 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
427
428 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
429 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
430 p_iov_info->mbx_msg_size,
431 p_iov_info->mbx_msg_virt_addr,
432 p_iov_info->mbx_msg_phys_addr);
433
434 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
435 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
436 p_iov_info->mbx_reply_size,
437 p_iov_info->mbx_reply_virt_addr,
438 p_iov_info->mbx_reply_phys_addr);
439
440 if (p_iov_info->p_bulletins)
441 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
442 p_iov_info->bulletins_size,
443 p_iov_info->p_bulletins,
444 p_iov_info->bulletins_phys);
445}
446
447int qed_iov_alloc(struct qed_hwfn *p_hwfn)
448{
449 struct qed_pf_iov *p_sriov;
450
451 if (!IS_PF_SRIOV(p_hwfn)) {
452 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
453 "No SR-IOV - no need for IOV db\n");
454 return 0;
455 }
456
457 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
458 if (!p_sriov) {
459 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
460 return -ENOMEM;
461 }
462
463 p_hwfn->pf_iov_info = p_sriov;
464
465 return qed_iov_allocate_vfdb(p_hwfn);
466}
467
468void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
469{
470 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
471 return;
472
473 qed_iov_setup_vfdb(p_hwfn);
474 qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
475}
476
477void qed_iov_free(struct qed_hwfn *p_hwfn)
478{
479 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
480 qed_iov_free_vfdb(p_hwfn);
481 kfree(p_hwfn->pf_iov_info);
482 }
483}
484
485void qed_iov_free_hw_info(struct qed_dev *cdev)
486{
487 kfree(cdev->p_iov_info);
488 cdev->p_iov_info = NULL;
489}
490
491int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
492{
493 struct qed_dev *cdev = p_hwfn->cdev;
494 int pos;
495 int rc;
496
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300497 if (IS_VF(p_hwfn->cdev))
498 return 0;
499
Yuval Mintz32a47e72016-05-11 16:36:12 +0300500 /* Learn the PCI configuration */
501 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
502 PCI_EXT_CAP_ID_SRIOV);
503 if (!pos) {
504 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
505 return 0;
506 }
507
508 /* Allocate a new struct for IOV information */
509 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
510 if (!cdev->p_iov_info) {
511 DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
512 return -ENOMEM;
513 }
514 cdev->p_iov_info->pos = pos;
515
516 rc = qed_iov_pci_cfg_info(cdev);
517 if (rc)
518 return rc;
519
520 /* We want PF IOV to be synonemous with the existance of p_iov_info;
521 * In case the capability is published but there are no VFs, simply
522 * de-allocate the struct.
523 */
524 if (!cdev->p_iov_info->total_vfs) {
525 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
526 "IOV capabilities, but no VFs are published\n");
527 kfree(cdev->p_iov_info);
528 cdev->p_iov_info = NULL;
529 return 0;
530 }
531
532 /* Calculate the first VF index - this is a bit tricky; Basically,
533 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
534 * after the first engine's VFs.
535 */
536 cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
537 p_hwfn->abs_pf_id - 16;
538 if (QED_PATH_ID(p_hwfn))
539 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
540
541 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
542 "First VF in hwfn 0x%08x\n",
543 cdev->p_iov_info->first_vf_in_pf);
544
545 return 0;
546}
547
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300548static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
549{
550 /* Check PF supports sriov */
Yuval Mintzb0409fa2016-05-15 14:48:05 +0300551 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
552 !IS_PF_SRIOV_ALLOC(p_hwfn))
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300553 return false;
554
555 /* Check VF validity */
Yuval Mintzb0409fa2016-05-15 14:48:05 +0300556 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300557 return false;
558
559 return true;
560}
561
Yuval Mintz0b55e272016-05-11 16:36:15 +0300562static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
563 u16 rel_vf_id, u8 to_disable)
564{
565 struct qed_vf_info *vf;
566 int i;
567
568 for_each_hwfn(cdev, i) {
569 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
570
571 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
572 if (!vf)
573 continue;
574
575 vf->to_disable = to_disable;
576 }
577}
578
579void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
580{
581 u16 i;
582
583 if (!IS_QED_SRIOV(cdev))
584 return;
585
586 for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
587 qed_iov_set_vf_to_disable(cdev, i, to_disable);
588}
589
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300590static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
591 struct qed_ptt *p_ptt, u8 abs_vfid)
592{
593 qed_wr(p_hwfn, p_ptt,
594 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
595 1 << (abs_vfid & 0x1f));
596}
597
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300598static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
599 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
600{
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300601 int i;
602
603 /* Set VF masks and configuration - pretend */
604 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
605
606 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
607
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300608 /* unpretend */
609 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
610
611 /* iterate over all queues, clear sb consumer */
Yuval Mintzb2b897e2016-05-15 14:48:06 +0300612 for (i = 0; i < vf->num_sbs; i++)
613 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
614 vf->igu_sbs[i],
615 vf->opaque_fid, true);
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300616}
617
Yuval Mintz0b55e272016-05-11 16:36:15 +0300618static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
619 struct qed_ptt *p_ptt,
620 struct qed_vf_info *vf, bool enable)
621{
622 u32 igu_vf_conf;
623
624 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
625
626 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
627
628 if (enable)
629 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
630 else
631 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
632
633 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
634
635 /* unpretend */
636 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
637}
638
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300639static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
640 struct qed_ptt *p_ptt,
641 struct qed_vf_info *vf)
642{
643 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
644 int rc;
645
Yuval Mintz0b55e272016-05-11 16:36:15 +0300646 if (vf->to_disable)
647 return 0;
648
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300649 DP_VERBOSE(p_hwfn,
650 QED_MSG_IOV,
651 "Enable internal access for vf %x [abs %x]\n",
652 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
653
654 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
655
Yuval Mintzb2b897e2016-05-15 14:48:06 +0300656 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
657
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300658 rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
659 if (rc)
660 return rc;
661
662 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
663
664 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
665 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
666
667 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
668 p_hwfn->hw_info.hw_mode);
669
670 /* unpretend */
671 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
672
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300673 vf->state = VF_FREE;
674
675 return rc;
676}
677
Yuval Mintz0b55e272016-05-11 16:36:15 +0300678/**
679 * @brief qed_iov_config_perm_table - configure the permission
680 * zone table.
681 * In E4, queue zone permission table size is 320x9. There
682 * are 320 VF queues for single engine device (256 for dual
683 * engine device), and each entry has the following format:
684 * {Valid, VF[7:0]}
685 * @param p_hwfn
686 * @param p_ptt
687 * @param vf
688 * @param enable
689 */
690static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
691 struct qed_ptt *p_ptt,
692 struct qed_vf_info *vf, u8 enable)
693{
694 u32 reg_addr, val;
695 u16 qzone_id = 0;
696 int qid;
697
698 for (qid = 0; qid < vf->num_rxqs; qid++) {
699 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
700 &qzone_id);
701
702 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
Yuval Mintz1a635e42016-08-15 10:42:43 +0300703 val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300704 qed_wr(p_hwfn, p_ptt, reg_addr, val);
705 }
706}
707
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300708static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
709 struct qed_ptt *p_ptt,
710 struct qed_vf_info *vf)
711{
712 /* Reset vf in IGU - interrupts are still disabled */
713 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
714
715 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
716
717 /* Permission Table */
718 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
719}
720
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300721static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
722 struct qed_ptt *p_ptt,
723 struct qed_vf_info *vf, u16 num_rx_queues)
724{
725 struct qed_igu_block *igu_blocks;
726 int qid = 0, igu_id = 0;
727 u32 val = 0;
728
729 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
730
731 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
732 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
733 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
734
735 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
736 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
737 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
738
739 while ((qid < num_rx_queues) &&
740 (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
741 if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
742 struct cau_sb_entry sb_entry;
743
744 vf->igu_sbs[qid] = (u16)igu_id;
745 igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
746
747 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
748
749 qed_wr(p_hwfn, p_ptt,
750 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
751 val);
752
753 /* Configure igu sb in CAU which were marked valid */
754 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
755 p_hwfn->rel_pf_id,
756 vf->abs_vf_id, 1);
757 qed_dmae_host2grc(p_hwfn, p_ptt,
758 (u64)(uintptr_t)&sb_entry,
759 CAU_REG_SB_VAR_MEMORY +
760 igu_id * sizeof(u64), 2, 0);
761 qid++;
762 }
763 igu_id++;
764 }
765
766 vf->num_sbs = (u8) num_rx_queues;
767
768 return vf->num_sbs;
769}
770
Yuval Mintz0b55e272016-05-11 16:36:15 +0300771static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
772 struct qed_ptt *p_ptt,
773 struct qed_vf_info *vf)
774{
775 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
776 int idx, igu_id;
777 u32 addr, val;
778
779 /* Invalidate igu CAM lines and mark them as free */
780 for (idx = 0; idx < vf->num_sbs; idx++) {
781 igu_id = vf->igu_sbs[idx];
782 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
783
784 val = qed_rd(p_hwfn, p_ptt, addr);
785 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
786 qed_wr(p_hwfn, p_ptt, addr, val);
787
788 p_info->igu_map.igu_blocks[igu_id].status |=
789 QED_IGU_STATUS_FREE;
790
791 p_hwfn->hw_info.p_igu_info->free_blks++;
792 }
793
794 vf->num_sbs = 0;
795}
796
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300797static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
798 struct qed_ptt *p_ptt,
799 u16 rel_vf_id, u16 num_rx_queues)
800{
801 u8 num_of_vf_avaiable_chains = 0;
802 struct qed_vf_info *vf = NULL;
803 int rc = 0;
804 u32 cids;
805 u8 i;
806
807 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
808 if (!vf) {
809 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
810 return -EINVAL;
811 }
812
813 if (vf->b_init) {
814 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
815 return -EINVAL;
816 }
817
818 /* Limit number of queues according to number of CIDs */
819 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
820 DP_VERBOSE(p_hwfn,
821 QED_MSG_IOV,
822 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
823 vf->relative_vf_id, num_rx_queues, (u16) cids);
824 num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
825
826 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
827 p_ptt,
828 vf,
829 num_rx_queues);
830 if (!num_of_vf_avaiable_chains) {
831 DP_ERR(p_hwfn, "no available igu sbs\n");
832 return -ENOMEM;
833 }
834
835 /* Choose queue number and index ranges */
836 vf->num_rxqs = num_of_vf_avaiable_chains;
837 vf->num_txqs = num_of_vf_avaiable_chains;
838
839 for (i = 0; i < vf->num_rxqs; i++) {
840 u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
841 vf->igu_sbs[i]);
842
843 if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
844 DP_NOTICE(p_hwfn,
845 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
846 vf->relative_vf_id, queue_id);
847 return -EINVAL;
848 }
849
850 /* CIDs are per-VF, so no problem having them 0-based. */
851 vf->vf_queues[i].fw_rx_qid = queue_id;
852 vf->vf_queues[i].fw_tx_qid = queue_id;
853 vf->vf_queues[i].fw_cid = i;
854
855 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
856 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
857 vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
858 }
859 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
860 if (!rc) {
861 vf->b_init = true;
862
863 if (IS_LEAD_HWFN(p_hwfn))
864 p_hwfn->cdev->p_iov_info->num_vfs++;
865 }
866
867 return rc;
868}
869
Manish Chopra079d20a2016-05-15 14:48:07 +0300870static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
871 u16 vfid,
872 struct qed_mcp_link_params *params,
873 struct qed_mcp_link_state *link,
874 struct qed_mcp_link_capabilities *p_caps)
875{
876 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
877 vfid,
878 false);
879 struct qed_bulletin_content *p_bulletin;
880
881 if (!p_vf)
882 return;
883
884 p_bulletin = p_vf->bulletin.p_virt;
885 p_bulletin->req_autoneg = params->speed.autoneg;
886 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
887 p_bulletin->req_forced_speed = params->speed.forced_speed;
888 p_bulletin->req_autoneg_pause = params->pause.autoneg;
889 p_bulletin->req_forced_rx = params->pause.forced_rx;
890 p_bulletin->req_forced_tx = params->pause.forced_tx;
891 p_bulletin->req_loopback = params->loopback_mode;
892
893 p_bulletin->link_up = link->link_up;
894 p_bulletin->speed = link->speed;
895 p_bulletin->full_duplex = link->full_duplex;
896 p_bulletin->autoneg = link->an;
897 p_bulletin->autoneg_complete = link->an_complete;
898 p_bulletin->parallel_detection = link->parallel_detection;
899 p_bulletin->pfc_enabled = link->pfc_enabled;
900 p_bulletin->partner_adv_speed = link->partner_adv_speed;
901 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
902 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
903 p_bulletin->partner_adv_pause = link->partner_adv_pause;
904 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
905
906 p_bulletin->capability_speed = p_caps->speed_capabilities;
907}
908
Yuval Mintz0b55e272016-05-11 16:36:15 +0300909static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
910 struct qed_ptt *p_ptt, u16 rel_vf_id)
911{
Manish Chopra079d20a2016-05-15 14:48:07 +0300912 struct qed_mcp_link_capabilities caps;
913 struct qed_mcp_link_params params;
914 struct qed_mcp_link_state link;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300915 struct qed_vf_info *vf = NULL;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300916
917 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
918 if (!vf) {
919 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
920 return -EINVAL;
921 }
922
Yuval Mintz36558c32016-05-11 16:36:17 +0300923 if (vf->bulletin.p_virt)
924 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
925
926 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
927
Manish Chopra079d20a2016-05-15 14:48:07 +0300928 /* Get the link configuration back in bulletin so
929 * that when VFs are re-enabled they get the actual
930 * link configuration.
931 */
932 memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
933 memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
934 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
935 qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
936
Yuval Mintz1fe614d2016-06-05 13:11:11 +0300937 /* Forget the VF's acquisition message */
938 memset(&vf->acquire, 0, sizeof(vf->acquire));
Yuval Mintz0b55e272016-05-11 16:36:15 +0300939
940 /* disablng interrupts and resetting permission table was done during
941 * vf-close, however, we could get here without going through vf_close
942 */
943 /* Disable Interrupts for VF */
944 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
945
946 /* Reset Permission table */
947 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
948
949 vf->num_rxqs = 0;
950 vf->num_txqs = 0;
951 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
952
953 if (vf->b_init) {
954 vf->b_init = false;
955
956 if (IS_LEAD_HWFN(p_hwfn))
957 p_hwfn->cdev->p_iov_info->num_vfs--;
958 }
959
960 return 0;
961}
962
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300963static bool qed_iov_tlv_supported(u16 tlvtype)
964{
965 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
966}
967
968/* place a given tlv on the tlv buffer, continuing current tlv list */
969void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
970{
971 struct channel_tlv *tl = (struct channel_tlv *)*offset;
972
973 tl->type = type;
974 tl->length = length;
975
976 /* Offset should keep pointing to next TLV (the end of the last) */
977 *offset += length;
978
979 /* Return a pointer to the start of the added tlv */
980 return *offset - length;
981}
982
983/* list the types and lengths of the tlvs on the buffer */
984void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
985{
986 u16 i = 1, total_length = 0;
987 struct channel_tlv *tlv;
988
989 do {
990 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
991
992 /* output tlv */
993 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
994 "TLV number %d: type %d, length %d\n",
995 i, tlv->type, tlv->length);
996
997 if (tlv->type == CHANNEL_TLV_LIST_END)
998 return;
999
1000 /* Validate entry - protect against malicious VFs */
1001 if (!tlv->length) {
1002 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1003 return;
1004 }
1005
1006 total_length += tlv->length;
1007
1008 if (total_length >= sizeof(struct tlv_buffer_size)) {
1009 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1010 return;
1011 }
1012
1013 i++;
1014 } while (1);
1015}
1016
1017static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1018 struct qed_ptt *p_ptt,
1019 struct qed_vf_info *p_vf,
1020 u16 length, u8 status)
1021{
1022 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1023 struct qed_dmae_params params;
1024 u8 eng_vf_id;
1025
1026 mbx->reply_virt->default_resp.hdr.status = status;
1027
1028 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1029
1030 eng_vf_id = p_vf->abs_vf_id;
1031
1032 memset(&params, 0, sizeof(struct qed_dmae_params));
1033 params.flags = QED_DMAE_FLAG_VF_DST;
1034 params.dst_vfid = eng_vf_id;
1035
1036 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1037 mbx->req_virt->first_tlv.reply_address +
1038 sizeof(u64),
1039 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1040 &params);
1041
1042 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1043 mbx->req_virt->first_tlv.reply_address,
1044 sizeof(u64) / 4, &params);
1045
1046 REG_WR(p_hwfn,
1047 GTT_BAR0_MAP_REG_USDM_RAM +
1048 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1049}
1050
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001051static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1052 enum qed_iov_vport_update_flag flag)
1053{
1054 switch (flag) {
1055 case QED_IOV_VP_UPDATE_ACTIVATE:
1056 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
Yuval Mintz17b235c2016-05-11 16:36:18 +03001057 case QED_IOV_VP_UPDATE_VLAN_STRIP:
1058 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1059 case QED_IOV_VP_UPDATE_TX_SWITCH:
1060 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001061 case QED_IOV_VP_UPDATE_MCAST:
1062 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1063 case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1064 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1065 case QED_IOV_VP_UPDATE_RSS:
1066 return CHANNEL_TLV_VPORT_UPDATE_RSS;
Yuval Mintz17b235c2016-05-11 16:36:18 +03001067 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1068 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1069 case QED_IOV_VP_UPDATE_SGE_TPA:
1070 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001071 default:
1072 return 0;
1073 }
1074}
1075
1076static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1077 struct qed_vf_info *p_vf,
1078 struct qed_iov_vf_mbx *p_mbx,
1079 u8 status,
1080 u16 tlvs_mask, u16 tlvs_accepted)
1081{
1082 struct pfvf_def_resp_tlv *resp;
1083 u16 size, total_len, i;
1084
1085 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1086 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1087 size = sizeof(struct pfvf_def_resp_tlv);
1088 total_len = size;
1089
1090 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1091
1092 /* Prepare response for all extended tlvs if they are found by PF */
1093 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
Yuval Mintz1a635e42016-08-15 10:42:43 +03001094 if (!(tlvs_mask & BIT(i)))
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001095 continue;
1096
1097 resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1098 qed_iov_vport_to_tlv(p_hwfn, i), size);
1099
Yuval Mintz1a635e42016-08-15 10:42:43 +03001100 if (tlvs_accepted & BIT(i))
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001101 resp->hdr.status = status;
1102 else
1103 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1104
1105 DP_VERBOSE(p_hwfn,
1106 QED_MSG_IOV,
1107 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1108 p_vf->relative_vf_id,
1109 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1110
1111 total_len += size;
1112 }
1113
1114 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1115 sizeof(struct channel_list_end_tlv));
1116
1117 return total_len;
1118}
1119
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001120static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1121 struct qed_ptt *p_ptt,
1122 struct qed_vf_info *vf_info,
1123 u16 type, u16 length, u8 status)
1124{
1125 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1126
1127 mbx->offset = (u8 *)mbx->reply_virt;
1128
1129 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1130 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1131 sizeof(struct channel_list_end_tlv));
1132
1133 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1134}
1135
Yuval Mintz0b55e272016-05-11 16:36:15 +03001136struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1137 u16 relative_vf_id,
1138 bool b_enabled_only)
1139{
1140 struct qed_vf_info *vf = NULL;
1141
1142 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1143 if (!vf)
1144 return NULL;
1145
1146 return &vf->p_vf_info;
1147}
1148
1149void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1150{
1151 struct qed_public_vf_info *vf_info;
1152
1153 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1154
1155 if (!vf_info)
1156 return;
1157
1158 /* Clear the VF mac */
1159 memset(vf_info->mac, 0, ETH_ALEN);
1160}
1161
1162static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1163 struct qed_vf_info *p_vf)
1164{
1165 u32 i;
1166
1167 p_vf->vf_bulletin = 0;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001168 p_vf->vport_instance = 0;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001169 p_vf->configured_features = 0;
Yuval Mintz0b55e272016-05-11 16:36:15 +03001170
1171 /* If VF previously requested less resources, go back to default */
1172 p_vf->num_rxqs = p_vf->num_sbs;
1173 p_vf->num_txqs = p_vf->num_sbs;
1174
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001175 p_vf->num_active_rxqs = 0;
1176
Yuval Mintz0b55e272016-05-11 16:36:15 +03001177 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
1178 p_vf->vf_queues[i].rxq_active = 0;
1179
Yuval Mintz08feecd2016-05-11 16:36:20 +03001180 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001181 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
Yuval Mintz0b55e272016-05-11 16:36:15 +03001182 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1183}
1184
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001185static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1186 struct qed_ptt *p_ptt,
1187 struct qed_vf_info *p_vf,
1188 struct vf_pf_resc_request *p_req,
1189 struct pf_vf_resc *p_resp)
1190{
1191 int i;
1192
1193 /* Queue related information */
1194 p_resp->num_rxqs = p_vf->num_rxqs;
1195 p_resp->num_txqs = p_vf->num_txqs;
1196 p_resp->num_sbs = p_vf->num_sbs;
1197
1198 for (i = 0; i < p_resp->num_sbs; i++) {
1199 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1200 p_resp->hw_sbs[i].sb_qid = 0;
1201 }
1202
1203 /* These fields are filled for backward compatibility.
1204 * Unused by modern vfs.
1205 */
1206 for (i = 0; i < p_resp->num_rxqs; i++) {
1207 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1208 (u16 *)&p_resp->hw_qid[i]);
1209 p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1210 }
1211
1212 /* Filter related information */
1213 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1214 p_req->num_mac_filters);
1215 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1216 p_req->num_vlan_filters);
1217
1218 /* This isn't really needed/enforced, but some legacy VFs might depend
1219 * on the correct filling of this field.
1220 */
1221 p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1222
1223 /* Validate sufficient resources for VF */
1224 if (p_resp->num_rxqs < p_req->num_rxqs ||
1225 p_resp->num_txqs < p_req->num_txqs ||
1226 p_resp->num_sbs < p_req->num_sbs ||
1227 p_resp->num_mac_filters < p_req->num_mac_filters ||
1228 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1229 p_resp->num_mc_filters < p_req->num_mc_filters) {
1230 DP_VERBOSE(p_hwfn,
1231 QED_MSG_IOV,
1232 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
1233 p_vf->abs_vf_id,
1234 p_req->num_rxqs,
1235 p_resp->num_rxqs,
1236 p_req->num_rxqs,
1237 p_resp->num_txqs,
1238 p_req->num_sbs,
1239 p_resp->num_sbs,
1240 p_req->num_mac_filters,
1241 p_resp->num_mac_filters,
1242 p_req->num_vlan_filters,
1243 p_resp->num_vlan_filters,
1244 p_req->num_mc_filters, p_resp->num_mc_filters);
Yuval Mintza044df82016-08-22 13:25:09 +03001245
1246 /* Some legacy OSes are incapable of correctly handling this
1247 * failure.
1248 */
1249 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1250 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1251 (p_vf->acquire.vfdev_info.os_type ==
1252 VFPF_ACQUIRE_OS_WINDOWS))
1253 return PFVF_STATUS_SUCCESS;
1254
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001255 return PFVF_STATUS_NO_RESOURCE;
1256 }
1257
1258 return PFVF_STATUS_SUCCESS;
1259}
1260
1261static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1262 struct pfvf_stats_info *p_stats)
1263{
1264 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1265 offsetof(struct mstorm_vf_zone,
1266 non_trigger.eth_queue_stat);
1267 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1268 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1269 offsetof(struct ustorm_vf_zone,
1270 non_trigger.eth_queue_stat);
1271 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1272 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1273 offsetof(struct pstorm_vf_zone,
1274 non_trigger.eth_queue_stat);
1275 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1276 p_stats->tstats.address = 0;
1277 p_stats->tstats.len = 0;
1278}
1279
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001280static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1281 struct qed_ptt *p_ptt,
1282 struct qed_vf_info *vf)
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001283{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001284 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1285 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1286 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1287 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001288 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001289 struct pf_vf_resc *resc = &resp->resc;
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001290 int rc;
1291
1292 memset(resp, 0, sizeof(*resp));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001293
Yuval Mintz05fafbf2016-08-19 09:33:31 +03001294 /* Write the PF version so that VF would know which version
1295 * is supported - might be later overriden. This guarantees that
1296 * VF could recognize legacy PF based on lack of versions in reply.
1297 */
1298 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1299 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1300
Yuval Mintza044df82016-08-22 13:25:09 +03001301 if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1302 DP_VERBOSE(p_hwfn,
1303 QED_MSG_IOV,
1304 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1305 vf->abs_vf_id, vf->state);
1306 goto out;
1307 }
1308
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001309 /* Validate FW compatibility */
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001310 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
Yuval Mintza044df82016-08-22 13:25:09 +03001311 if (req->vfdev_info.capabilities &
1312 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1313 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001314
Yuval Mintza044df82016-08-22 13:25:09 +03001315 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1316 "VF[%d] is pre-fastpath HSI\n",
1317 vf->abs_vf_id);
1318 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1319 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1320 } else {
1321 DP_INFO(p_hwfn,
1322 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1323 vf->abs_vf_id,
1324 req->vfdev_info.eth_fp_hsi_major,
1325 req->vfdev_info.eth_fp_hsi_minor,
1326 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1327
1328 goto out;
1329 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001330 }
1331
1332 /* On 100g PFs, prevent old VFs from loading */
1333 if ((p_hwfn->cdev->num_hwfns > 1) &&
1334 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1335 DP_INFO(p_hwfn,
1336 "VF[%d] is running an old driver that doesn't support 100g\n",
1337 vf->abs_vf_id);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001338 goto out;
1339 }
1340
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001341 /* Store the acquire message */
1342 memcpy(&vf->acquire, req, sizeof(vf->acquire));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001343
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001344 vf->opaque_fid = req->vfdev_info.opaque_fid;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001345
1346 vf->vf_bulletin = req->bulletin_addr;
1347 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1348 vf->bulletin.size : req->bulletin_size;
1349
1350 /* fill in pfdev info */
1351 pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1352 pfdev_info->db_size = 0;
1353 pfdev_info->indices_per_sb = PIS_PER_SB;
1354
1355 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1356 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1357 if (p_hwfn->cdev->num_hwfns > 1)
1358 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1359
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001360 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001361
1362 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1363
1364 pfdev_info->fw_major = FW_MAJOR_VERSION;
1365 pfdev_info->fw_minor = FW_MINOR_VERSION;
1366 pfdev_info->fw_rev = FW_REVISION_VERSION;
1367 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
Yuval Mintza044df82016-08-22 13:25:09 +03001368
1369 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1370 * this field.
1371 */
Yuval Mintz1a635e42016-08-15 10:42:43 +03001372 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001373 req->vfdev_info.eth_fp_hsi_minor);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001374 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1375 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1376
1377 pfdev_info->dev_type = p_hwfn->cdev->type;
1378 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1379
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001380 /* Fill resources available to VF; Make sure there are enough to
1381 * satisfy the VF's request.
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001382 */
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001383 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1384 &req->resc_request, resc);
1385 if (vfpf_status != PFVF_STATUS_SUCCESS)
1386 goto out;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001387
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001388 /* Start the VF in FW */
1389 rc = qed_sp_vf_start(p_hwfn, vf);
1390 if (rc) {
1391 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1392 vfpf_status = PFVF_STATUS_FAILURE;
1393 goto out;
1394 }
1395
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001396 /* Fill agreed size of bulletin board in response */
1397 resp->bulletin_size = vf->bulletin.size;
Yuval Mintz36558c32016-05-11 16:36:17 +03001398 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001399
1400 DP_VERBOSE(p_hwfn,
1401 QED_MSG_IOV,
1402 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1403 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1404 vf->abs_vf_id,
1405 resp->pfdev_info.chip_num,
1406 resp->pfdev_info.db_size,
1407 resp->pfdev_info.indices_per_sb,
1408 resp->pfdev_info.capabilities,
1409 resc->num_rxqs,
1410 resc->num_txqs,
1411 resc->num_sbs,
1412 resc->num_mac_filters,
1413 resc->num_vlan_filters);
1414 vf->state = VF_ACQUIRED;
1415
1416 /* Prepare Response */
1417out:
1418 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1419 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001420}
1421
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001422static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1423 struct qed_vf_info *p_vf, bool val)
1424{
1425 struct qed_sp_vport_update_params params;
1426 int rc;
1427
1428 if (val == p_vf->spoof_chk) {
1429 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1430 "Spoofchk value[%d] is already configured\n", val);
1431 return 0;
1432 }
1433
1434 memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
1435 params.opaque_fid = p_vf->opaque_fid;
1436 params.vport_id = p_vf->vport_id;
1437 params.update_anti_spoofing_en_flg = 1;
1438 params.anti_spoofing_en = val;
1439
1440 rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
Yuval Mintzcb1fa082016-07-27 14:45:20 +03001441 if (!rc) {
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001442 p_vf->spoof_chk = val;
1443 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1444 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1445 "Spoofchk val[%d] configured\n", val);
1446 } else {
1447 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1448 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1449 val, p_vf->relative_vf_id);
1450 }
1451
1452 return rc;
1453}
1454
Yuval Mintz08feecd2016-05-11 16:36:20 +03001455static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1456 struct qed_vf_info *p_vf)
1457{
1458 struct qed_filter_ucast filter;
1459 int rc = 0;
1460 int i;
1461
1462 memset(&filter, 0, sizeof(filter));
1463 filter.is_rx_filter = 1;
1464 filter.is_tx_filter = 1;
1465 filter.vport_to_add_to = p_vf->vport_id;
1466 filter.opcode = QED_FILTER_ADD;
1467
1468 /* Reconfigure vlans */
1469 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1470 if (!p_vf->shadow_config.vlans[i].used)
1471 continue;
1472
1473 filter.type = QED_FILTER_VLAN;
1474 filter.vlan = p_vf->shadow_config.vlans[i].vid;
Yuval Mintz1a635e42016-08-15 10:42:43 +03001475 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
Yuval Mintz08feecd2016-05-11 16:36:20 +03001476 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1477 filter.vlan, p_vf->relative_vf_id);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001478 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1479 &filter, QED_SPQ_MODE_CB, NULL);
Yuval Mintz08feecd2016-05-11 16:36:20 +03001480 if (rc) {
1481 DP_NOTICE(p_hwfn,
1482 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1483 filter.vlan, p_vf->relative_vf_id);
1484 break;
1485 }
1486 }
1487
1488 return rc;
1489}
1490
1491static int
1492qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1493 struct qed_vf_info *p_vf, u64 events)
1494{
1495 int rc = 0;
1496
Yuval Mintz1a635e42016-08-15 10:42:43 +03001497 if ((events & BIT(VLAN_ADDR_FORCED)) &&
Yuval Mintz08feecd2016-05-11 16:36:20 +03001498 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1499 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1500
1501 return rc;
1502}
1503
1504static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1505 struct qed_vf_info *p_vf, u64 events)
1506{
1507 int rc = 0;
1508 struct qed_filter_ucast filter;
1509
1510 if (!p_vf->vport_instance)
1511 return -EINVAL;
1512
Yuval Mintz1a635e42016-08-15 10:42:43 +03001513 if (events & BIT(MAC_ADDR_FORCED)) {
Yuval Mintzeff16962016-05-11 16:36:21 +03001514 /* Since there's no way [currently] of removing the MAC,
1515 * we can always assume this means we need to force it.
1516 */
1517 memset(&filter, 0, sizeof(filter));
1518 filter.type = QED_FILTER_MAC;
1519 filter.opcode = QED_FILTER_REPLACE;
1520 filter.is_rx_filter = 1;
1521 filter.is_tx_filter = 1;
1522 filter.vport_to_add_to = p_vf->vport_id;
1523 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1524
1525 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1526 &filter, QED_SPQ_MODE_CB, NULL);
1527 if (rc) {
1528 DP_NOTICE(p_hwfn,
1529 "PF failed to configure MAC for VF\n");
1530 return rc;
1531 }
1532
1533 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1534 }
1535
Yuval Mintz1a635e42016-08-15 10:42:43 +03001536 if (events & BIT(VLAN_ADDR_FORCED)) {
Yuval Mintz08feecd2016-05-11 16:36:20 +03001537 struct qed_sp_vport_update_params vport_update;
1538 u8 removal;
1539 int i;
1540
1541 memset(&filter, 0, sizeof(filter));
1542 filter.type = QED_FILTER_VLAN;
1543 filter.is_rx_filter = 1;
1544 filter.is_tx_filter = 1;
1545 filter.vport_to_add_to = p_vf->vport_id;
1546 filter.vlan = p_vf->bulletin.p_virt->pvid;
1547 filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1548 QED_FILTER_FLUSH;
1549
1550 /* Send the ramrod */
1551 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1552 &filter, QED_SPQ_MODE_CB, NULL);
1553 if (rc) {
1554 DP_NOTICE(p_hwfn,
1555 "PF failed to configure VLAN for VF\n");
1556 return rc;
1557 }
1558
1559 /* Update the default-vlan & silent vlan stripping */
1560 memset(&vport_update, 0, sizeof(vport_update));
1561 vport_update.opaque_fid = p_vf->opaque_fid;
1562 vport_update.vport_id = p_vf->vport_id;
1563 vport_update.update_default_vlan_enable_flg = 1;
1564 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1565 vport_update.update_default_vlan_flg = 1;
1566 vport_update.default_vlan = filter.vlan;
1567
1568 vport_update.update_inner_vlan_removal_flg = 1;
1569 removal = filter.vlan ? 1
1570 : p_vf->shadow_config.inner_vlan_removal;
1571 vport_update.inner_vlan_removal_flg = removal;
1572 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1573 rc = qed_sp_vport_update(p_hwfn,
1574 &vport_update,
1575 QED_SPQ_MODE_EBLOCK, NULL);
1576 if (rc) {
1577 DP_NOTICE(p_hwfn,
1578 "PF failed to configure VF vport for vlan\n");
1579 return rc;
1580 }
1581
1582 /* Update all the Rx queues */
1583 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1584 u16 qid;
1585
1586 if (!p_vf->vf_queues[i].rxq_active)
1587 continue;
1588
1589 qid = p_vf->vf_queues[i].fw_rx_qid;
1590
1591 rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
1592 1, 0, 1,
1593 QED_SPQ_MODE_EBLOCK,
1594 NULL);
1595 if (rc) {
1596 DP_NOTICE(p_hwfn,
1597 "Failed to send Rx update fo queue[0x%04x]\n",
1598 qid);
1599 return rc;
1600 }
1601 }
1602
1603 if (filter.vlan)
1604 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1605 else
Yuval Mintz1a635e42016-08-15 10:42:43 +03001606 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
Yuval Mintz08feecd2016-05-11 16:36:20 +03001607 }
1608
1609 /* If forced features are terminated, we need to configure the shadow
1610 * configuration back again.
1611 */
1612 if (events)
1613 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1614
1615 return rc;
1616}
1617
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001618static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1619 struct qed_ptt *p_ptt,
1620 struct qed_vf_info *vf)
1621{
1622 struct qed_sp_vport_start_params params = { 0 };
1623 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1624 struct vfpf_vport_start_tlv *start;
1625 u8 status = PFVF_STATUS_SUCCESS;
1626 struct qed_vf_info *vf_info;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001627 u64 *p_bitmap;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001628 int sb_id;
1629 int rc;
1630
1631 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1632 if (!vf_info) {
1633 DP_NOTICE(p_hwfn->cdev,
1634 "Failed to get VF info, invalid vfid [%d]\n",
1635 vf->relative_vf_id);
1636 return;
1637 }
1638
1639 vf->state = VF_ENABLED;
1640 start = &mbx->req_virt->start_vport;
1641
1642 /* Initialize Status block in CAU */
1643 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1644 if (!start->sb_addr[sb_id]) {
1645 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1646 "VF[%d] did not fill the address of SB %d\n",
1647 vf->relative_vf_id, sb_id);
1648 break;
1649 }
1650
1651 qed_int_cau_conf_sb(p_hwfn, p_ptt,
1652 start->sb_addr[sb_id],
Yuval Mintz1a635e42016-08-15 10:42:43 +03001653 vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001654 }
1655 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1656
1657 vf->mtu = start->mtu;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001658 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1659
1660 /* Take into consideration configuration forced by hypervisor;
1661 * If none is configured, use the supplied VF values [for old
1662 * vfs that would still be fine, since they passed '0' as padding].
1663 */
1664 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
Yuval Mintz1a635e42016-08-15 10:42:43 +03001665 if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
Yuval Mintz08feecd2016-05-11 16:36:20 +03001666 u8 vf_req = start->only_untagged;
1667
1668 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1669 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1670 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001671
1672 params.tpa_mode = start->tpa_mode;
1673 params.remove_inner_vlan = start->inner_vlan_removal;
Yuval Mintz831bfb0e2016-05-11 16:36:25 +03001674 params.tx_switching = true;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001675
Yuval Mintz08feecd2016-05-11 16:36:20 +03001676 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001677 params.drop_ttl0 = false;
1678 params.concrete_fid = vf->concrete_fid;
1679 params.opaque_fid = vf->opaque_fid;
1680 params.vport_id = vf->vport_id;
1681 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1682 params.mtu = vf->mtu;
Yuval Mintz11a85d72016-08-22 13:25:10 +03001683 params.check_mac = true;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001684
1685 rc = qed_sp_eth_vport_start(p_hwfn, &params);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001686 if (rc) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001687 DP_ERR(p_hwfn,
1688 "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1689 status = PFVF_STATUS_FAILURE;
1690 } else {
1691 vf->vport_instance++;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001692
1693 /* Force configuration if needed on the newly opened vport */
1694 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001695
1696 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001697 }
1698 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1699 sizeof(struct pfvf_def_resp_tlv), status);
1700}
1701
1702static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1703 struct qed_ptt *p_ptt,
1704 struct qed_vf_info *vf)
1705{
1706 u8 status = PFVF_STATUS_SUCCESS;
1707 int rc;
1708
1709 vf->vport_instance--;
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001710 vf->spoof_chk = false;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001711
1712 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001713 if (rc) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001714 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1715 rc);
1716 status = PFVF_STATUS_FAILURE;
1717 }
1718
Yuval Mintz08feecd2016-05-11 16:36:20 +03001719 /* Forget the configuration on the vport */
1720 vf->configured_features = 0;
1721 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
1722
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001723 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1724 sizeof(struct pfvf_def_resp_tlv), status);
1725}
1726
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001727static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1728 struct qed_ptt *p_ptt,
Yuval Mintza044df82016-08-22 13:25:09 +03001729 struct qed_vf_info *vf,
1730 u8 status, bool b_legacy)
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001731{
1732 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1733 struct pfvf_start_queue_resp_tlv *p_tlv;
1734 struct vfpf_start_rxq_tlv *req;
Yuval Mintza044df82016-08-22 13:25:09 +03001735 u16 length;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001736
1737 mbx->offset = (u8 *)mbx->reply_virt;
1738
Yuval Mintza044df82016-08-22 13:25:09 +03001739 /* Taking a bigger struct instead of adding a TLV to list was a
1740 * mistake, but one which we're now stuck with, as some older
1741 * clients assume the size of the previous response.
1742 */
1743 if (!b_legacy)
1744 length = sizeof(*p_tlv);
1745 else
1746 length = sizeof(struct pfvf_def_resp_tlv);
1747
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001748 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
Yuval Mintza044df82016-08-22 13:25:09 +03001749 length);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001750 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1751 sizeof(struct channel_list_end_tlv));
1752
1753 /* Update the TLV with the response */
Yuval Mintza044df82016-08-22 13:25:09 +03001754 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001755 req = &mbx->req_virt->start_rxq;
Yuval Mintz351a4ded2016-06-02 10:23:29 +03001756 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
1757 offsetof(struct mstorm_vf_zone,
1758 non_trigger.eth_rx_queue_producers) +
1759 sizeof(struct eth_rx_prod_data) * req->rx_qid;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001760 }
1761
Yuval Mintza044df82016-08-22 13:25:09 +03001762 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001763}
1764
1765static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1766 struct qed_ptt *p_ptt,
1767 struct qed_vf_info *vf)
1768{
1769 struct qed_queue_start_common_params params;
1770 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
Yuval Mintz41086462016-06-05 13:11:13 +03001771 u8 status = PFVF_STATUS_NO_RESOURCE;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001772 struct vfpf_start_rxq_tlv *req;
Yuval Mintza044df82016-08-22 13:25:09 +03001773 bool b_legacy_vf = false;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001774 int rc;
1775
1776 memset(&params, 0, sizeof(params));
1777 req = &mbx->req_virt->start_rxq;
Yuval Mintz41086462016-06-05 13:11:13 +03001778
1779 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
1780 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1781 goto out;
1782
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001783 params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
Yuval Mintz351a4ded2016-06-02 10:23:29 +03001784 params.vf_qid = req->rx_qid;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001785 params.vport_id = vf->vport_id;
1786 params.sb = req->hw_sb;
1787 params.sb_idx = req->sb_index;
1788
Yuval Mintza044df82016-08-22 13:25:09 +03001789 /* Legacy VFs have their Producers in a different location, which they
1790 * calculate on their own and clean the producer prior to this.
1791 */
1792 if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1793 ETH_HSI_VER_NO_PKT_LEN_TUNN) {
1794 b_legacy_vf = true;
1795 } else {
1796 REG_WR(p_hwfn,
1797 GTT_BAR0_MAP_REG_MSDM_RAM +
1798 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
1799 0);
1800 }
1801
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001802 rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
1803 vf->vf_queues[req->rx_qid].fw_cid,
1804 &params,
1805 vf->abs_vf_id + 0x10,
1806 req->bd_max_bytes,
1807 req->rxq_addr,
Yuval Mintza044df82016-08-22 13:25:09 +03001808 req->cqe_pbl_addr, req->cqe_pbl_size,
1809 b_legacy_vf);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001810
1811 if (rc) {
1812 status = PFVF_STATUS_FAILURE;
1813 } else {
Yuval Mintz41086462016-06-05 13:11:13 +03001814 status = PFVF_STATUS_SUCCESS;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001815 vf->vf_queues[req->rx_qid].rxq_active = true;
1816 vf->num_active_rxqs++;
1817 }
1818
Yuval Mintz41086462016-06-05 13:11:13 +03001819out:
Yuval Mintza044df82016-08-22 13:25:09 +03001820 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001821}
1822
Yuval Mintz5040acf2016-06-05 13:11:14 +03001823static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
1824 struct qed_ptt *p_ptt,
1825 struct qed_vf_info *p_vf, u8 status)
1826{
1827 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1828 struct pfvf_start_queue_resp_tlv *p_tlv;
Yuval Mintza044df82016-08-22 13:25:09 +03001829 bool b_legacy = false;
1830 u16 length;
Yuval Mintz5040acf2016-06-05 13:11:14 +03001831
1832 mbx->offset = (u8 *)mbx->reply_virt;
1833
Yuval Mintza044df82016-08-22 13:25:09 +03001834 /* Taking a bigger struct instead of adding a TLV to list was a
1835 * mistake, but one which we're now stuck with, as some older
1836 * clients assume the size of the previous response.
1837 */
1838 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1839 ETH_HSI_VER_NO_PKT_LEN_TUNN)
1840 b_legacy = true;
1841
1842 if (!b_legacy)
1843 length = sizeof(*p_tlv);
1844 else
1845 length = sizeof(struct pfvf_def_resp_tlv);
1846
Yuval Mintz5040acf2016-06-05 13:11:14 +03001847 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
Yuval Mintza044df82016-08-22 13:25:09 +03001848 length);
Yuval Mintz5040acf2016-06-05 13:11:14 +03001849 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1850 sizeof(struct channel_list_end_tlv));
1851
1852 /* Update the TLV with the response */
Yuval Mintza044df82016-08-22 13:25:09 +03001853 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
Yuval Mintz5040acf2016-06-05 13:11:14 +03001854 u16 qid = mbx->req_virt->start_txq.tx_qid;
1855
1856 p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
1857 DQ_DEMS_LEGACY);
1858 }
1859
Yuval Mintza044df82016-08-22 13:25:09 +03001860 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
Yuval Mintz5040acf2016-06-05 13:11:14 +03001861}
1862
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001863static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
1864 struct qed_ptt *p_ptt,
1865 struct qed_vf_info *vf)
1866{
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001867 struct qed_queue_start_common_params params;
1868 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
Yuval Mintz41086462016-06-05 13:11:13 +03001869 u8 status = PFVF_STATUS_NO_RESOURCE;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001870 union qed_qm_pq_params pq_params;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001871 struct vfpf_start_txq_tlv *req;
1872 int rc;
1873
1874 /* Prepare the parameters which would choose the right PQ */
1875 memset(&pq_params, 0, sizeof(pq_params));
1876 pq_params.eth.is_vf = 1;
1877 pq_params.eth.vf_id = vf->relative_vf_id;
1878
1879 memset(&params, 0, sizeof(params));
1880 req = &mbx->req_virt->start_txq;
Yuval Mintz41086462016-06-05 13:11:13 +03001881
1882 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
1883 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1884 goto out;
1885
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001886 params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
1887 params.vport_id = vf->vport_id;
1888 params.sb = req->hw_sb;
1889 params.sb_idx = req->sb_index;
1890
1891 rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
1892 vf->opaque_fid,
1893 vf->vf_queues[req->tx_qid].fw_cid,
1894 &params,
1895 vf->abs_vf_id + 0x10,
1896 req->pbl_addr,
1897 req->pbl_size, &pq_params);
1898
Yuval Mintz41086462016-06-05 13:11:13 +03001899 if (rc) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001900 status = PFVF_STATUS_FAILURE;
Yuval Mintz41086462016-06-05 13:11:13 +03001901 } else {
1902 status = PFVF_STATUS_SUCCESS;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001903 vf->vf_queues[req->tx_qid].txq_active = true;
Yuval Mintz41086462016-06-05 13:11:13 +03001904 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001905
Yuval Mintz41086462016-06-05 13:11:13 +03001906out:
Yuval Mintz5040acf2016-06-05 13:11:14 +03001907 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001908}
1909
1910static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
1911 struct qed_vf_info *vf,
1912 u16 rxq_id, u8 num_rxqs, bool cqe_completion)
1913{
1914 int rc = 0;
1915 int qid;
1916
1917 if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
1918 return -EINVAL;
1919
1920 for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
1921 if (vf->vf_queues[qid].rxq_active) {
1922 rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1923 vf->vf_queues[qid].
1924 fw_rx_qid, false,
1925 cqe_completion);
1926
1927 if (rc)
1928 return rc;
1929 }
1930 vf->vf_queues[qid].rxq_active = false;
1931 vf->num_active_rxqs--;
1932 }
1933
1934 return rc;
1935}
1936
1937static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
1938 struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
1939{
1940 int rc = 0;
1941 int qid;
1942
1943 if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
1944 return -EINVAL;
1945
1946 for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
1947 if (vf->vf_queues[qid].txq_active) {
1948 rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1949 vf->vf_queues[qid].
1950 fw_tx_qid);
1951
1952 if (rc)
1953 return rc;
1954 }
1955 vf->vf_queues[qid].txq_active = false;
1956 }
1957 return rc;
1958}
1959
1960static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
1961 struct qed_ptt *p_ptt,
1962 struct qed_vf_info *vf)
1963{
1964 u16 length = sizeof(struct pfvf_def_resp_tlv);
1965 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1966 u8 status = PFVF_STATUS_SUCCESS;
1967 struct vfpf_stop_rxqs_tlv *req;
1968 int rc;
1969
1970 /* We give the option of starting from qid != 0, in this case we
1971 * need to make sure that qid + num_qs doesn't exceed the actual
1972 * amount of queues that exist.
1973 */
1974 req = &mbx->req_virt->stop_rxqs;
1975 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
1976 req->num_rxqs, req->cqe_completion);
1977 if (rc)
1978 status = PFVF_STATUS_FAILURE;
1979
1980 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
1981 length, status);
1982}
1983
1984static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
1985 struct qed_ptt *p_ptt,
1986 struct qed_vf_info *vf)
1987{
1988 u16 length = sizeof(struct pfvf_def_resp_tlv);
1989 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1990 u8 status = PFVF_STATUS_SUCCESS;
1991 struct vfpf_stop_txqs_tlv *req;
1992 int rc;
1993
1994 /* We give the option of starting from qid != 0, in this case we
1995 * need to make sure that qid + num_qs doesn't exceed the actual
1996 * amount of queues that exist.
1997 */
1998 req = &mbx->req_virt->stop_txqs;
1999 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
2000 if (rc)
2001 status = PFVF_STATUS_FAILURE;
2002
2003 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2004 length, status);
2005}
2006
Yuval Mintz17b235c2016-05-11 16:36:18 +03002007static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2008 struct qed_ptt *p_ptt,
2009 struct qed_vf_info *vf)
2010{
2011 u16 length = sizeof(struct pfvf_def_resp_tlv);
2012 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2013 struct vfpf_update_rxq_tlv *req;
2014 u8 status = PFVF_STATUS_SUCCESS;
2015 u8 complete_event_flg;
2016 u8 complete_cqe_flg;
2017 u16 qid;
2018 int rc;
2019 u8 i;
2020
2021 req = &mbx->req_virt->update_rxq;
2022 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2023 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2024
2025 for (i = 0; i < req->num_rxqs; i++) {
2026 qid = req->rx_qid + i;
2027
2028 if (!vf->vf_queues[qid].rxq_active) {
2029 DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
2030 qid);
2031 status = PFVF_STATUS_FAILURE;
2032 break;
2033 }
2034
2035 rc = qed_sp_eth_rx_queues_update(p_hwfn,
2036 vf->vf_queues[qid].fw_rx_qid,
2037 1,
2038 complete_cqe_flg,
2039 complete_event_flg,
2040 QED_SPQ_MODE_EBLOCK, NULL);
2041
2042 if (rc) {
2043 status = PFVF_STATUS_FAILURE;
2044 break;
2045 }
2046 }
2047
2048 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2049 length, status);
2050}
2051
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002052void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2053 void *p_tlvs_list, u16 req_type)
2054{
2055 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2056 int len = 0;
2057
2058 do {
2059 if (!p_tlv->length) {
2060 DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2061 return NULL;
2062 }
2063
2064 if (p_tlv->type == req_type) {
2065 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2066 "Extended tlv type %d, length %d found\n",
2067 p_tlv->type, p_tlv->length);
2068 return p_tlv;
2069 }
2070
2071 len += p_tlv->length;
2072 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2073
2074 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2075 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2076 return NULL;
2077 }
2078 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2079
2080 return NULL;
2081}
2082
2083static void
2084qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2085 struct qed_sp_vport_update_params *p_data,
2086 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2087{
2088 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2089 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2090
2091 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2092 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2093 if (!p_act_tlv)
2094 return;
2095
2096 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2097 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2098 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2099 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2100 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2101}
2102
2103static void
Yuval Mintz17b235c2016-05-11 16:36:18 +03002104qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2105 struct qed_sp_vport_update_params *p_data,
2106 struct qed_vf_info *p_vf,
2107 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2108{
2109 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2110 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2111
2112 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2113 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2114 if (!p_vlan_tlv)
2115 return;
2116
Yuval Mintz08feecd2016-05-11 16:36:20 +03002117 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2118
2119 /* Ignore the VF request if we're forcing a vlan */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002120 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
Yuval Mintz08feecd2016-05-11 16:36:20 +03002121 p_data->update_inner_vlan_removal_flg = 1;
2122 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2123 }
Yuval Mintz17b235c2016-05-11 16:36:18 +03002124
2125 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2126}
2127
2128static void
2129qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2130 struct qed_sp_vport_update_params *p_data,
2131 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2132{
2133 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2134 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2135
2136 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2137 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2138 tlv);
2139 if (!p_tx_switch_tlv)
2140 return;
2141
2142 p_data->update_tx_switching_flg = 1;
2143 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2144 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2145}
2146
2147static void
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002148qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2149 struct qed_sp_vport_update_params *p_data,
2150 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2151{
2152 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2153 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2154
2155 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2156 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2157 if (!p_mcast_tlv)
2158 return;
2159
2160 p_data->update_approx_mcast_flg = 1;
2161 memcpy(p_data->bins, p_mcast_tlv->bins,
2162 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2163 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2164}
2165
2166static void
2167qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2168 struct qed_sp_vport_update_params *p_data,
2169 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2170{
2171 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2172 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2173 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2174
2175 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2176 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2177 if (!p_accept_tlv)
2178 return;
2179
2180 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2181 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2182 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2183 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2184 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2185}
2186
2187static void
Yuval Mintz17b235c2016-05-11 16:36:18 +03002188qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2189 struct qed_sp_vport_update_params *p_data,
2190 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2191{
2192 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2193 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2194
2195 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2196 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2197 tlv);
2198 if (!p_accept_any_vlan)
2199 return;
2200
2201 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2202 p_data->update_accept_any_vlan_flg =
2203 p_accept_any_vlan->update_accept_any_vlan_flg;
2204 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2205}
2206
2207static void
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002208qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2209 struct qed_vf_info *vf,
2210 struct qed_sp_vport_update_params *p_data,
2211 struct qed_rss_params *p_rss,
2212 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2213{
2214 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2215 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2216 u16 i, q_idx, max_q_idx;
2217 u16 table_size;
2218
2219 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2220 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2221 if (!p_rss_tlv) {
2222 p_data->rss_params = NULL;
2223 return;
2224 }
2225
2226 memset(p_rss, 0, sizeof(struct qed_rss_params));
2227
2228 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2229 VFPF_UPDATE_RSS_CONFIG_FLAG);
2230 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2231 VFPF_UPDATE_RSS_CAPS_FLAG);
2232 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2233 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2234 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2235 VFPF_UPDATE_RSS_KEY_FLAG);
2236
2237 p_rss->rss_enable = p_rss_tlv->rss_enable;
2238 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2239 p_rss->rss_caps = p_rss_tlv->rss_caps;
2240 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2241 memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
2242 sizeof(p_rss->rss_ind_table));
2243 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2244
2245 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2246 (1 << p_rss_tlv->rss_table_size_log));
2247
2248 max_q_idx = ARRAY_SIZE(vf->vf_queues);
2249
2250 for (i = 0; i < table_size; i++) {
2251 u16 index = vf->vf_queues[0].fw_rx_qid;
2252
2253 q_idx = p_rss->rss_ind_table[i];
2254 if (q_idx >= max_q_idx)
2255 DP_NOTICE(p_hwfn,
2256 "rss_ind_table[%d] = %d, rxq is out of range\n",
2257 i, q_idx);
2258 else if (!vf->vf_queues[q_idx].rxq_active)
2259 DP_NOTICE(p_hwfn,
2260 "rss_ind_table[%d] = %d, rxq is not active\n",
2261 i, q_idx);
2262 else
2263 index = vf->vf_queues[q_idx].fw_rx_qid;
2264 p_rss->rss_ind_table[i] = index;
2265 }
2266
2267 p_data->rss_params = p_rss;
2268 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2269}
2270
Yuval Mintz17b235c2016-05-11 16:36:18 +03002271static void
2272qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2273 struct qed_vf_info *vf,
2274 struct qed_sp_vport_update_params *p_data,
2275 struct qed_sge_tpa_params *p_sge_tpa,
2276 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2277{
2278 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2279 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2280
2281 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2282 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2283
2284 if (!p_sge_tpa_tlv) {
2285 p_data->sge_tpa_params = NULL;
2286 return;
2287 }
2288
2289 memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2290
2291 p_sge_tpa->update_tpa_en_flg =
2292 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2293 p_sge_tpa->update_tpa_param_flg =
2294 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2295 VFPF_UPDATE_TPA_PARAM_FLAG);
2296
2297 p_sge_tpa->tpa_ipv4_en_flg =
2298 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2299 p_sge_tpa->tpa_ipv6_en_flg =
2300 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2301 p_sge_tpa->tpa_pkt_split_flg =
2302 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2303 p_sge_tpa->tpa_hdr_data_split_flg =
2304 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2305 p_sge_tpa->tpa_gro_consistent_flg =
2306 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2307
2308 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2309 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2310 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2311 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2312 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2313
2314 p_data->sge_tpa_params = p_sge_tpa;
2315
2316 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2317}
2318
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002319static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
2320 struct qed_ptt *p_ptt,
2321 struct qed_vf_info *vf)
2322{
2323 struct qed_sp_vport_update_params params;
2324 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
Yuval Mintz17b235c2016-05-11 16:36:18 +03002325 struct qed_sge_tpa_params sge_tpa_params;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002326 struct qed_rss_params rss_params;
2327 u8 status = PFVF_STATUS_SUCCESS;
2328 u16 tlvs_mask = 0;
2329 u16 length;
2330 int rc;
2331
Yuval Mintz41086462016-06-05 13:11:13 +03002332 /* Valiate PF can send such a request */
2333 if (!vf->vport_instance) {
2334 DP_VERBOSE(p_hwfn,
2335 QED_MSG_IOV,
2336 "No VPORT instance available for VF[%d], failing vport update\n",
2337 vf->abs_vf_id);
2338 status = PFVF_STATUS_FAILURE;
2339 goto out;
2340 }
2341
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002342 memset(&params, 0, sizeof(params));
2343 params.opaque_fid = vf->opaque_fid;
2344 params.vport_id = vf->vport_id;
2345 params.rss_params = NULL;
2346
2347 /* Search for extended tlvs list and update values
2348 * from VF in struct qed_sp_vport_update_params.
2349 */
2350 qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
Yuval Mintz17b235c2016-05-11 16:36:18 +03002351 qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
2352 qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002353 qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
2354 qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
2355 qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
2356 mbx, &tlvs_mask);
Yuval Mintz17b235c2016-05-11 16:36:18 +03002357 qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
2358 qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
2359 &sge_tpa_params, mbx, &tlvs_mask);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002360
2361 /* Just log a message if there is no single extended tlv in buffer.
2362 * When all features of vport update ramrod would be requested by VF
2363 * as extended TLVs in buffer then an error can be returned in response
2364 * if there is no extended TLV present in buffer.
2365 */
2366 if (!tlvs_mask) {
2367 DP_NOTICE(p_hwfn,
2368 "No feature tlvs found for vport update\n");
2369 status = PFVF_STATUS_NOT_SUPPORTED;
2370 goto out;
2371 }
2372
2373 rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
2374
2375 if (rc)
2376 status = PFVF_STATUS_FAILURE;
2377
2378out:
2379 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2380 tlvs_mask, tlvs_mask);
2381 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2382}
2383
Yuval Mintz8246d0b2016-06-05 13:11:15 +03002384static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
2385 struct qed_vf_info *p_vf,
2386 struct qed_filter_ucast *p_params)
Yuval Mintz08feecd2016-05-11 16:36:20 +03002387{
2388 int i;
2389
Yuval Mintz08feecd2016-05-11 16:36:20 +03002390 /* First remove entries and then add new ones */
2391 if (p_params->opcode == QED_FILTER_REMOVE) {
2392 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2393 if (p_vf->shadow_config.vlans[i].used &&
2394 p_vf->shadow_config.vlans[i].vid ==
2395 p_params->vlan) {
2396 p_vf->shadow_config.vlans[i].used = false;
2397 break;
2398 }
2399 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2400 DP_VERBOSE(p_hwfn,
2401 QED_MSG_IOV,
2402 "VF [%d] - Tries to remove a non-existing vlan\n",
2403 p_vf->relative_vf_id);
2404 return -EINVAL;
2405 }
2406 } else if (p_params->opcode == QED_FILTER_REPLACE ||
2407 p_params->opcode == QED_FILTER_FLUSH) {
2408 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2409 p_vf->shadow_config.vlans[i].used = false;
2410 }
2411
2412 /* In forced mode, we're willing to remove entries - but we don't add
2413 * new ones.
2414 */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002415 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
Yuval Mintz08feecd2016-05-11 16:36:20 +03002416 return 0;
2417
2418 if (p_params->opcode == QED_FILTER_ADD ||
2419 p_params->opcode == QED_FILTER_REPLACE) {
2420 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
2421 if (p_vf->shadow_config.vlans[i].used)
2422 continue;
2423
2424 p_vf->shadow_config.vlans[i].used = true;
2425 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
2426 break;
2427 }
2428
2429 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2430 DP_VERBOSE(p_hwfn,
2431 QED_MSG_IOV,
2432 "VF [%d] - Tries to configure more than %d vlan filters\n",
2433 p_vf->relative_vf_id,
2434 QED_ETH_VF_NUM_VLAN_FILTERS + 1);
2435 return -EINVAL;
2436 }
2437 }
2438
2439 return 0;
2440}
2441
Yuval Mintz8246d0b2016-06-05 13:11:15 +03002442static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
2443 struct qed_vf_info *p_vf,
2444 struct qed_filter_ucast *p_params)
2445{
2446 int i;
2447
2448 /* If we're in forced-mode, we don't allow any change */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002449 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
Yuval Mintz8246d0b2016-06-05 13:11:15 +03002450 return 0;
2451
2452 /* First remove entries and then add new ones */
2453 if (p_params->opcode == QED_FILTER_REMOVE) {
2454 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2455 if (ether_addr_equal(p_vf->shadow_config.macs[i],
2456 p_params->mac)) {
2457 memset(p_vf->shadow_config.macs[i], 0,
2458 ETH_ALEN);
2459 break;
2460 }
2461 }
2462
2463 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2464 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2465 "MAC isn't configured\n");
2466 return -EINVAL;
2467 }
2468 } else if (p_params->opcode == QED_FILTER_REPLACE ||
2469 p_params->opcode == QED_FILTER_FLUSH) {
2470 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
2471 memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
2472 }
2473
2474 /* List the new MAC address */
2475 if (p_params->opcode != QED_FILTER_ADD &&
2476 p_params->opcode != QED_FILTER_REPLACE)
2477 return 0;
2478
2479 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2480 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
2481 ether_addr_copy(p_vf->shadow_config.macs[i],
2482 p_params->mac);
2483 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2484 "Added MAC at %d entry in shadow\n", i);
2485 break;
2486 }
2487 }
2488
2489 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2490 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
2491 return -EINVAL;
2492 }
2493
2494 return 0;
2495}
2496
2497static int
2498qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
2499 struct qed_vf_info *p_vf,
2500 struct qed_filter_ucast *p_params)
2501{
2502 int rc = 0;
2503
2504 if (p_params->type == QED_FILTER_MAC) {
2505 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
2506 if (rc)
2507 return rc;
2508 }
2509
2510 if (p_params->type == QED_FILTER_VLAN)
2511 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
2512
2513 return rc;
2514}
2515
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002516int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
2517 int vfid, struct qed_filter_ucast *params)
2518{
2519 struct qed_public_vf_info *vf;
2520
2521 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
2522 if (!vf)
2523 return -EINVAL;
2524
2525 /* No real decision to make; Store the configured MAC */
2526 if (params->type == QED_FILTER_MAC ||
2527 params->type == QED_FILTER_MAC_VLAN)
2528 ether_addr_copy(vf->mac, params->mac);
2529
2530 return 0;
2531}
2532
2533static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
2534 struct qed_ptt *p_ptt,
2535 struct qed_vf_info *vf)
2536{
Yuval Mintz08feecd2016-05-11 16:36:20 +03002537 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002538 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2539 struct vfpf_ucast_filter_tlv *req;
2540 u8 status = PFVF_STATUS_SUCCESS;
2541 struct qed_filter_ucast params;
2542 int rc;
2543
2544 /* Prepare the unicast filter params */
2545 memset(&params, 0, sizeof(struct qed_filter_ucast));
2546 req = &mbx->req_virt->ucast_filter;
2547 params.opcode = (enum qed_filter_opcode)req->opcode;
2548 params.type = (enum qed_filter_ucast_type)req->type;
2549
2550 params.is_rx_filter = 1;
2551 params.is_tx_filter = 1;
2552 params.vport_to_remove_from = vf->vport_id;
2553 params.vport_to_add_to = vf->vport_id;
2554 memcpy(params.mac, req->mac, ETH_ALEN);
2555 params.vlan = req->vlan;
2556
2557 DP_VERBOSE(p_hwfn,
2558 QED_MSG_IOV,
2559 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2560 vf->abs_vf_id, params.opcode, params.type,
2561 params.is_rx_filter ? "RX" : "",
2562 params.is_tx_filter ? "TX" : "",
2563 params.vport_to_add_to,
2564 params.mac[0], params.mac[1],
2565 params.mac[2], params.mac[3],
2566 params.mac[4], params.mac[5], params.vlan);
2567
2568 if (!vf->vport_instance) {
2569 DP_VERBOSE(p_hwfn,
2570 QED_MSG_IOV,
2571 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
2572 vf->abs_vf_id);
2573 status = PFVF_STATUS_FAILURE;
2574 goto out;
2575 }
2576
Yuval Mintz08feecd2016-05-11 16:36:20 +03002577 /* Update shadow copy of the VF configuration */
2578 if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
2579 status = PFVF_STATUS_FAILURE;
2580 goto out;
2581 }
2582
2583 /* Determine if the unicast filtering is acceptible by PF */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002584 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
Yuval Mintz08feecd2016-05-11 16:36:20 +03002585 (params.type == QED_FILTER_VLAN ||
2586 params.type == QED_FILTER_MAC_VLAN)) {
2587 /* Once VLAN is forced or PVID is set, do not allow
2588 * to add/replace any further VLANs.
2589 */
2590 if (params.opcode == QED_FILTER_ADD ||
2591 params.opcode == QED_FILTER_REPLACE)
2592 status = PFVF_STATUS_FORCED;
2593 goto out;
2594 }
2595
Yuval Mintz1a635e42016-08-15 10:42:43 +03002596 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
Yuval Mintzeff16962016-05-11 16:36:21 +03002597 (params.type == QED_FILTER_MAC ||
2598 params.type == QED_FILTER_MAC_VLAN)) {
2599 if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
2600 (params.opcode != QED_FILTER_ADD &&
2601 params.opcode != QED_FILTER_REPLACE))
2602 status = PFVF_STATUS_FORCED;
2603 goto out;
2604 }
2605
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002606 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
2607 if (rc) {
2608 status = PFVF_STATUS_FAILURE;
2609 goto out;
2610 }
2611
2612 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
2613 QED_SPQ_MODE_CB, NULL);
2614 if (rc)
2615 status = PFVF_STATUS_FAILURE;
2616
2617out:
2618 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
2619 sizeof(struct pfvf_def_resp_tlv), status);
2620}
2621
Yuval Mintz0b55e272016-05-11 16:36:15 +03002622static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
2623 struct qed_ptt *p_ptt,
2624 struct qed_vf_info *vf)
2625{
2626 int i;
2627
2628 /* Reset the SBs */
2629 for (i = 0; i < vf->num_sbs; i++)
2630 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2631 vf->igu_sbs[i],
2632 vf->opaque_fid, false);
2633
2634 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
2635 sizeof(struct pfvf_def_resp_tlv),
2636 PFVF_STATUS_SUCCESS);
2637}
2638
2639static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
2640 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
2641{
2642 u16 length = sizeof(struct pfvf_def_resp_tlv);
2643 u8 status = PFVF_STATUS_SUCCESS;
2644
2645 /* Disable Interrupts for VF */
2646 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
2647
2648 /* Reset Permission table */
2649 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
2650
2651 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
2652 length, status);
2653}
2654
2655static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
2656 struct qed_ptt *p_ptt,
2657 struct qed_vf_info *p_vf)
2658{
2659 u16 length = sizeof(struct pfvf_def_resp_tlv);
Yuval Mintz1fe614d2016-06-05 13:11:11 +03002660 u8 status = PFVF_STATUS_SUCCESS;
2661 int rc = 0;
Yuval Mintz0b55e272016-05-11 16:36:15 +03002662
2663 qed_iov_vf_cleanup(p_hwfn, p_vf);
2664
Yuval Mintz1fe614d2016-06-05 13:11:11 +03002665 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
2666 /* Stopping the VF */
2667 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
2668 p_vf->opaque_fid);
2669
2670 if (rc) {
2671 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
2672 rc);
2673 status = PFVF_STATUS_FAILURE;
2674 }
2675
2676 p_vf->state = VF_STOPPED;
2677 }
2678
Yuval Mintz0b55e272016-05-11 16:36:15 +03002679 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
Yuval Mintz1fe614d2016-06-05 13:11:11 +03002680 length, status);
Yuval Mintz0b55e272016-05-11 16:36:15 +03002681}
2682
2683static int
2684qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
2685 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2686{
2687 int cnt;
2688 u32 val;
2689
2690 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
2691
2692 for (cnt = 0; cnt < 50; cnt++) {
2693 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
2694 if (!val)
2695 break;
2696 msleep(20);
2697 }
2698 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
2699
2700 if (cnt == 50) {
2701 DP_ERR(p_hwfn,
2702 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2703 p_vf->abs_vf_id, val);
2704 return -EBUSY;
2705 }
2706
2707 return 0;
2708}
2709
2710static int
2711qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
2712 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2713{
2714 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
2715 int i, cnt;
2716
2717 /* Read initial consumers & producers */
2718 for (i = 0; i < MAX_NUM_VOQS; i++) {
2719 u32 prod;
2720
2721 cons[i] = qed_rd(p_hwfn, p_ptt,
2722 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2723 i * 0x40);
2724 prod = qed_rd(p_hwfn, p_ptt,
2725 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
2726 i * 0x40);
2727 distance[i] = prod - cons[i];
2728 }
2729
2730 /* Wait for consumers to pass the producers */
2731 i = 0;
2732 for (cnt = 0; cnt < 50; cnt++) {
2733 for (; i < MAX_NUM_VOQS; i++) {
2734 u32 tmp;
2735
2736 tmp = qed_rd(p_hwfn, p_ptt,
2737 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2738 i * 0x40);
2739 if (distance[i] > tmp - cons[i])
2740 break;
2741 }
2742
2743 if (i == MAX_NUM_VOQS)
2744 break;
2745
2746 msleep(20);
2747 }
2748
2749 if (cnt == 50) {
2750 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
2751 p_vf->abs_vf_id, i);
2752 return -EBUSY;
2753 }
2754
2755 return 0;
2756}
2757
2758static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
2759 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2760{
2761 int rc;
2762
2763 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
2764 if (rc)
2765 return rc;
2766
2767 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
2768 if (rc)
2769 return rc;
2770
2771 return 0;
2772}
2773
2774static int
2775qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
2776 struct qed_ptt *p_ptt,
2777 u16 rel_vf_id, u32 *ack_vfs)
2778{
2779 struct qed_vf_info *p_vf;
2780 int rc = 0;
2781
2782 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
2783 if (!p_vf)
2784 return 0;
2785
2786 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
2787 (1ULL << (rel_vf_id % 64))) {
2788 u16 vfid = p_vf->abs_vf_id;
2789
2790 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2791 "VF[%d] - Handling FLR\n", vfid);
2792
2793 qed_iov_vf_cleanup(p_hwfn, p_vf);
2794
2795 /* If VF isn't active, no need for anything but SW */
2796 if (!p_vf->b_init)
2797 goto cleanup;
2798
2799 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
2800 if (rc)
2801 goto cleanup;
2802
2803 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
2804 if (rc) {
2805 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
2806 return rc;
2807 }
2808
2809 /* VF_STOPPED has to be set only after final cleanup
2810 * but prior to re-enabling the VF.
2811 */
2812 p_vf->state = VF_STOPPED;
2813
2814 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
2815 if (rc) {
2816 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
2817 vfid);
2818 return rc;
2819 }
2820cleanup:
2821 /* Mark VF for ack and clean pending state */
2822 if (p_vf->state == VF_RESET)
2823 p_vf->state = VF_STOPPED;
Yuval Mintz1a635e42016-08-15 10:42:43 +03002824 ack_vfs[vfid / 32] |= BIT((vfid % 32));
Yuval Mintz0b55e272016-05-11 16:36:15 +03002825 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
2826 ~(1ULL << (rel_vf_id % 64));
2827 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
2828 ~(1ULL << (rel_vf_id % 64));
2829 }
2830
2831 return rc;
2832}
2833
2834int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2835{
2836 u32 ack_vfs[VF_MAX_STATIC / 32];
2837 int rc = 0;
2838 u16 i;
2839
2840 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
2841
2842 /* Since BRB <-> PRS interface can't be tested as part of the flr
2843 * polling due to HW limitations, simply sleep a bit. And since
2844 * there's no need to wait per-vf, do it before looping.
2845 */
2846 msleep(100);
2847
2848 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
2849 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
2850
2851 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
2852 return rc;
2853}
2854
2855int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
2856{
2857 u16 i, found = 0;
2858
2859 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
2860 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
2861 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2862 "[%08x,...,%08x]: %08x\n",
2863 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
2864
2865 if (!p_hwfn->cdev->p_iov_info) {
2866 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
2867 return 0;
2868 }
2869
2870 /* Mark VFs */
2871 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
2872 struct qed_vf_info *p_vf;
2873 u8 vfid;
2874
2875 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
2876 if (!p_vf)
2877 continue;
2878
2879 vfid = p_vf->abs_vf_id;
Yuval Mintz1a635e42016-08-15 10:42:43 +03002880 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
Yuval Mintz0b55e272016-05-11 16:36:15 +03002881 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
2882 u16 rel_vf_id = p_vf->relative_vf_id;
2883
2884 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2885 "VF[%d] [rel %d] got FLR-ed\n",
2886 vfid, rel_vf_id);
2887
2888 p_vf->state = VF_RESET;
2889
2890 /* No need to lock here, since pending_flr should
2891 * only change here and before ACKing MFw. Since
2892 * MFW will not trigger an additional attention for
2893 * VF flr until ACKs, we're safe.
2894 */
2895 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
2896 found = 1;
2897 }
2898 }
2899
2900 return found;
2901}
2902
Yuval Mintz73390ac2016-05-11 16:36:24 +03002903static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
2904 u16 vfid,
2905 struct qed_mcp_link_params *p_params,
2906 struct qed_mcp_link_state *p_link,
2907 struct qed_mcp_link_capabilities *p_caps)
2908{
2909 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
2910 vfid,
2911 false);
2912 struct qed_bulletin_content *p_bulletin;
2913
2914 if (!p_vf)
2915 return;
2916
2917 p_bulletin = p_vf->bulletin.p_virt;
2918
2919 if (p_params)
2920 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
2921 if (p_link)
2922 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
2923 if (p_caps)
2924 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
2925}
2926
Yuval Mintz37bff2b2016-05-11 16:36:13 +03002927static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
2928 struct qed_ptt *p_ptt, int vfid)
2929{
2930 struct qed_iov_vf_mbx *mbx;
2931 struct qed_vf_info *p_vf;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03002932
2933 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
2934 if (!p_vf)
2935 return;
2936
2937 mbx = &p_vf->vf_mbx;
2938
2939 /* qed_iov_process_mbx_request */
Yuval Mintz54fdd802016-06-05 13:11:16 +03002940 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2941 "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03002942
2943 mbx->first_tlv = mbx->req_virt->first_tlv;
2944
2945 /* check if tlv type is known */
2946 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03002947 switch (mbx->first_tlv.tl.type) {
2948 case CHANNEL_TLV_ACQUIRE:
2949 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
2950 break;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002951 case CHANNEL_TLV_VPORT_START:
2952 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
2953 break;
2954 case CHANNEL_TLV_VPORT_TEARDOWN:
2955 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
2956 break;
2957 case CHANNEL_TLV_START_RXQ:
2958 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
2959 break;
2960 case CHANNEL_TLV_START_TXQ:
2961 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
2962 break;
2963 case CHANNEL_TLV_STOP_RXQS:
2964 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
2965 break;
2966 case CHANNEL_TLV_STOP_TXQS:
2967 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
2968 break;
Yuval Mintz17b235c2016-05-11 16:36:18 +03002969 case CHANNEL_TLV_UPDATE_RXQ:
2970 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
2971 break;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002972 case CHANNEL_TLV_VPORT_UPDATE:
2973 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
2974 break;
2975 case CHANNEL_TLV_UCAST_FILTER:
2976 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
2977 break;
Yuval Mintz0b55e272016-05-11 16:36:15 +03002978 case CHANNEL_TLV_CLOSE:
2979 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
2980 break;
2981 case CHANNEL_TLV_INT_CLEANUP:
2982 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
2983 break;
2984 case CHANNEL_TLV_RELEASE:
2985 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
2986 break;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03002987 }
Yuval Mintz37bff2b2016-05-11 16:36:13 +03002988 } else {
2989 /* unknown TLV - this may belong to a VF driver from the future
2990 * - a version written after this PF driver was written, which
2991 * supports features unknown as of yet. Too bad since we don't
2992 * support them. Or this may be because someone wrote a crappy
2993 * VF driver and is sending garbage over the channel.
2994 */
Yuval Mintz54fdd802016-06-05 13:11:16 +03002995 DP_NOTICE(p_hwfn,
2996 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
2997 p_vf->abs_vf_id,
2998 mbx->first_tlv.tl.type,
2999 mbx->first_tlv.tl.length,
3000 mbx->first_tlv.padding, mbx->first_tlv.reply_address);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003001
Yuval Mintz54fdd802016-06-05 13:11:16 +03003002 /* Try replying in case reply address matches the acquisition's
3003 * posted address.
3004 */
3005 if (p_vf->acquire.first_tlv.reply_address &&
3006 (mbx->first_tlv.reply_address ==
3007 p_vf->acquire.first_tlv.reply_address)) {
3008 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3009 mbx->first_tlv.tl.type,
3010 sizeof(struct pfvf_def_resp_tlv),
3011 PFVF_STATUS_NOT_SUPPORTED);
3012 } else {
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003013 DP_VERBOSE(p_hwfn,
3014 QED_MSG_IOV,
Yuval Mintz54fdd802016-06-05 13:11:16 +03003015 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3016 p_vf->abs_vf_id);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003017 }
3018 }
3019}
3020
3021void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
3022{
3023 u64 add_bit = 1ULL << (vfid % 64);
3024
3025 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
3026}
3027
3028static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
3029 u64 *events)
3030{
3031 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
3032
3033 memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3034 memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3035}
3036
3037static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
3038 u16 abs_vfid, struct regpair *vf_msg)
3039{
3040 u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
3041 struct qed_vf_info *p_vf;
3042
3043 if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
3044 DP_VERBOSE(p_hwfn,
3045 QED_MSG_IOV,
3046 "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
3047 abs_vfid);
3048 return 0;
3049 }
3050 p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
3051
3052 /* List the physical address of the request so that handler
3053 * could later on copy the message from it.
3054 */
3055 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3056
3057 /* Mark the event and schedule the workqueue */
3058 qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
3059 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
3060
3061 return 0;
3062}
3063
3064int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
3065 u8 opcode, __le16 echo, union event_ring_data *data)
3066{
3067 switch (opcode) {
3068 case COMMON_EVENT_VF_PF_CHANNEL:
3069 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
3070 &data->vf_pf_channel.msg_addr);
3071 default:
3072 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
3073 opcode);
3074 return -EINVAL;
3075 }
3076}
3077
Yuval Mintz32a47e72016-05-11 16:36:12 +03003078u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
3079{
3080 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
3081 u16 i;
3082
3083 if (!p_iov)
3084 goto out;
3085
3086 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
3087 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
3088 return i;
3089
3090out:
3091 return MAX_NUM_VFS;
3092}
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003093
3094static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
3095 int vfid)
3096{
3097 struct qed_dmae_params params;
3098 struct qed_vf_info *vf_info;
3099
3100 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3101 if (!vf_info)
3102 return -EINVAL;
3103
3104 memset(&params, 0, sizeof(struct qed_dmae_params));
3105 params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
3106 params.src_vfid = vf_info->abs_vf_id;
3107
3108 if (qed_dmae_host2host(p_hwfn, ptt,
3109 vf_info->vf_mbx.pending_req,
3110 vf_info->vf_mbx.req_phys,
3111 sizeof(union vfpf_tlvs) / 4, &params)) {
3112 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3113 "Failed to copy message from VF 0x%02x\n", vfid);
3114
3115 return -EIO;
3116 }
3117
3118 return 0;
3119}
3120
Yuval Mintzeff16962016-05-11 16:36:21 +03003121static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
3122 u8 *mac, int vfid)
3123{
3124 struct qed_vf_info *vf_info;
3125 u64 feature;
3126
3127 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3128 if (!vf_info) {
3129 DP_NOTICE(p_hwfn->cdev,
3130 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3131 return;
3132 }
3133
3134 feature = 1 << MAC_ADDR_FORCED;
3135 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3136
3137 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3138 /* Forced MAC will disable MAC_ADDR */
Yuval Mintz1a635e42016-08-15 10:42:43 +03003139 vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
Yuval Mintzeff16962016-05-11 16:36:21 +03003140
3141 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3142}
3143
Yuval Mintz08feecd2016-05-11 16:36:20 +03003144void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
3145 u16 pvid, int vfid)
3146{
3147 struct qed_vf_info *vf_info;
3148 u64 feature;
3149
3150 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3151 if (!vf_info) {
3152 DP_NOTICE(p_hwfn->cdev,
3153 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3154 return;
3155 }
3156
3157 feature = 1 << VLAN_ADDR_FORCED;
3158 vf_info->bulletin.p_virt->pvid = pvid;
3159 if (pvid)
3160 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3161 else
3162 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
3163
3164 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3165}
3166
Yuval Mintz6ddc7602016-05-11 16:36:23 +03003167static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
3168{
3169 struct qed_vf_info *p_vf_info;
3170
3171 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3172 if (!p_vf_info)
3173 return false;
3174
3175 return !!p_vf_info->vport_instance;
3176}
3177
Yuval Mintz0b55e272016-05-11 16:36:15 +03003178bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
3179{
3180 struct qed_vf_info *p_vf_info;
3181
3182 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3183 if (!p_vf_info)
3184 return true;
3185
3186 return p_vf_info->state == VF_STOPPED;
3187}
3188
Yuval Mintz73390ac2016-05-11 16:36:24 +03003189static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
3190{
3191 struct qed_vf_info *vf_info;
3192
3193 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3194 if (!vf_info)
3195 return false;
3196
3197 return vf_info->spoof_chk;
3198}
3199
Yuval Mintz6ddc7602016-05-11 16:36:23 +03003200int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
3201{
3202 struct qed_vf_info *vf;
3203 int rc = -EINVAL;
3204
3205 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3206 DP_NOTICE(p_hwfn,
3207 "SR-IOV sanity check failed, can't set spoofchk\n");
3208 goto out;
3209 }
3210
3211 vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3212 if (!vf)
3213 goto out;
3214
3215 if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
3216 /* After VF VPORT start PF will configure spoof check */
3217 vf->req_spoofchk_val = val;
3218 rc = 0;
3219 goto out;
3220 }
3221
3222 rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
3223
3224out:
3225 return rc;
3226}
3227
Yuval Mintzeff16962016-05-11 16:36:21 +03003228static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
3229 u16 rel_vf_id)
3230{
3231 struct qed_vf_info *p_vf;
3232
3233 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3234 if (!p_vf || !p_vf->bulletin.p_virt)
3235 return NULL;
3236
Yuval Mintz1a635e42016-08-15 10:42:43 +03003237 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
Yuval Mintzeff16962016-05-11 16:36:21 +03003238 return NULL;
3239
3240 return p_vf->bulletin.p_virt->mac;
3241}
3242
Yuval Mintz08feecd2016-05-11 16:36:20 +03003243u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
3244{
3245 struct qed_vf_info *p_vf;
3246
3247 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3248 if (!p_vf || !p_vf->bulletin.p_virt)
3249 return 0;
3250
Yuval Mintz1a635e42016-08-15 10:42:43 +03003251 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
Yuval Mintz08feecd2016-05-11 16:36:20 +03003252 return 0;
3253
3254 return p_vf->bulletin.p_virt->pvid;
3255}
3256
Yuval Mintz733def62016-05-11 16:36:22 +03003257static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
3258 struct qed_ptt *p_ptt, int vfid, int val)
3259{
3260 struct qed_vf_info *vf;
3261 u8 abs_vp_id = 0;
3262 int rc;
3263
3264 vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3265 if (!vf)
3266 return -EINVAL;
3267
3268 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
3269 if (rc)
3270 return rc;
3271
3272 return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
3273}
3274
3275int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
3276{
3277 struct qed_vf_info *vf;
3278 u8 vport_id;
3279 int i;
3280
3281 for_each_hwfn(cdev, i) {
3282 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3283
3284 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3285 DP_NOTICE(p_hwfn,
3286 "SR-IOV sanity check failed, can't set min rate\n");
3287 return -EINVAL;
3288 }
3289 }
3290
3291 vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
3292 vport_id = vf->vport_id;
3293
3294 return qed_configure_vport_wfq(cdev, vport_id, rate);
3295}
3296
Yuval Mintz73390ac2016-05-11 16:36:24 +03003297static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
3298{
3299 struct qed_wfq_data *vf_vp_wfq;
3300 struct qed_vf_info *vf_info;
3301
3302 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3303 if (!vf_info)
3304 return 0;
3305
3306 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
3307
3308 if (vf_vp_wfq->configured)
3309 return vf_vp_wfq->min_speed;
3310 else
3311 return 0;
3312}
3313
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003314/**
3315 * qed_schedule_iov - schedules IOV task for VF and PF
3316 * @hwfn: hardware function pointer
3317 * @flag: IOV flag for VF/PF
3318 */
3319void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
3320{
3321 smp_mb__before_atomic();
3322 set_bit(flag, &hwfn->iov_task_flags);
3323 smp_mb__after_atomic();
3324 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3325 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
3326}
3327
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03003328void qed_vf_start_iov_wq(struct qed_dev *cdev)
3329{
3330 int i;
3331
3332 for_each_hwfn(cdev, i)
3333 queue_delayed_work(cdev->hwfns[i].iov_wq,
3334 &cdev->hwfns[i].iov_task, 0);
3335}
3336
Yuval Mintz0b55e272016-05-11 16:36:15 +03003337int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
3338{
3339 int i, j;
3340
3341 for_each_hwfn(cdev, i)
3342 if (cdev->hwfns[i].iov_wq)
3343 flush_workqueue(cdev->hwfns[i].iov_wq);
3344
3345 /* Mark VFs for disablement */
3346 qed_iov_set_vfs_to_disable(cdev, true);
3347
3348 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
3349 pci_disable_sriov(cdev->pdev);
3350
3351 for_each_hwfn(cdev, i) {
3352 struct qed_hwfn *hwfn = &cdev->hwfns[i];
3353 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3354
3355 /* Failure to acquire the ptt in 100g creates an odd error
3356 * where the first engine has already relased IOV.
3357 */
3358 if (!ptt) {
3359 DP_ERR(hwfn, "Failed to acquire ptt\n");
3360 return -EBUSY;
3361 }
3362
Yuval Mintz733def62016-05-11 16:36:22 +03003363 /* Clean WFQ db and configure equal weight for all vports */
3364 qed_clean_wfq_db(hwfn, ptt);
3365
Yuval Mintz0b55e272016-05-11 16:36:15 +03003366 qed_for_each_vf(hwfn, j) {
3367 int k;
3368
3369 if (!qed_iov_is_valid_vfid(hwfn, j, true))
3370 continue;
3371
3372 /* Wait until VF is disabled before releasing */
3373 for (k = 0; k < 100; k++) {
3374 if (!qed_iov_is_vf_stopped(hwfn, j))
3375 msleep(20);
3376 else
3377 break;
3378 }
3379
3380 if (k < 100)
3381 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
3382 ptt, j);
3383 else
3384 DP_ERR(hwfn,
3385 "Timeout waiting for VF's FLR to end\n");
3386 }
3387
3388 qed_ptt_release(hwfn, ptt);
3389 }
3390
3391 qed_iov_set_vfs_to_disable(cdev, false);
3392
3393 return 0;
3394}
3395
3396static int qed_sriov_enable(struct qed_dev *cdev, int num)
3397{
3398 struct qed_sb_cnt_info sb_cnt_info;
3399 int i, j, rc;
3400
3401 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
3402 DP_NOTICE(cdev, "Can start at most %d VFs\n",
3403 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
3404 return -EINVAL;
3405 }
3406
3407 /* Initialize HW for VF access */
3408 for_each_hwfn(cdev, j) {
3409 struct qed_hwfn *hwfn = &cdev->hwfns[j];
3410 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3411 int num_sbs = 0, limit = 16;
3412
3413 if (!ptt) {
3414 DP_ERR(hwfn, "Failed to acquire ptt\n");
3415 rc = -EBUSY;
3416 goto err;
3417 }
3418
Yuval Mintz83f34bd2016-05-15 14:48:08 +03003419 if (IS_MF_DEFAULT(hwfn))
3420 limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
3421
Yuval Mintz0b55e272016-05-11 16:36:15 +03003422 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
3423 qed_int_get_num_sbs(hwfn, &sb_cnt_info);
3424 num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
3425
3426 for (i = 0; i < num; i++) {
3427 if (!qed_iov_is_valid_vfid(hwfn, i, false))
3428 continue;
3429
3430 rc = qed_iov_init_hw_for_vf(hwfn,
3431 ptt, i, num_sbs / num);
3432 if (rc) {
3433 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
3434 qed_ptt_release(hwfn, ptt);
3435 goto err;
3436 }
3437 }
3438
3439 qed_ptt_release(hwfn, ptt);
3440 }
3441
3442 /* Enable SRIOV PCIe functions */
3443 rc = pci_enable_sriov(cdev->pdev, num);
3444 if (rc) {
3445 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
3446 goto err;
3447 }
3448
3449 return num;
3450
3451err:
3452 qed_sriov_disable(cdev, false);
3453 return rc;
3454}
3455
3456static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
3457{
3458 if (!IS_QED_SRIOV(cdev)) {
3459 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
3460 return -EOPNOTSUPP;
3461 }
3462
3463 if (num_vfs_param)
3464 return qed_sriov_enable(cdev, num_vfs_param);
3465 else
3466 return qed_sriov_disable(cdev, true);
3467}
3468
Yuval Mintzeff16962016-05-11 16:36:21 +03003469static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
3470{
3471 int i;
3472
3473 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
3474 DP_VERBOSE(cdev, QED_MSG_IOV,
3475 "Cannot set a VF MAC; Sriov is not enabled\n");
3476 return -EINVAL;
3477 }
3478
3479 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
3480 DP_VERBOSE(cdev, QED_MSG_IOV,
3481 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
3482 return -EINVAL;
3483 }
3484
3485 for_each_hwfn(cdev, i) {
3486 struct qed_hwfn *hwfn = &cdev->hwfns[i];
3487 struct qed_public_vf_info *vf_info;
3488
3489 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3490 if (!vf_info)
3491 continue;
3492
3493 /* Set the forced MAC, and schedule the IOV task */
3494 ether_addr_copy(vf_info->forced_mac, mac);
3495 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
3496 }
3497
3498 return 0;
3499}
3500
Yuval Mintz08feecd2016-05-11 16:36:20 +03003501static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
3502{
3503 int i;
3504
3505 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
3506 DP_VERBOSE(cdev, QED_MSG_IOV,
3507 "Cannot set a VF MAC; Sriov is not enabled\n");
3508 return -EINVAL;
3509 }
3510
3511 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
3512 DP_VERBOSE(cdev, QED_MSG_IOV,
3513 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
3514 return -EINVAL;
3515 }
3516
3517 for_each_hwfn(cdev, i) {
3518 struct qed_hwfn *hwfn = &cdev->hwfns[i];
3519 struct qed_public_vf_info *vf_info;
3520
3521 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3522 if (!vf_info)
3523 continue;
3524
3525 /* Set the forced vlan, and schedule the IOV task */
3526 vf_info->forced_vlan = vid;
3527 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
3528 }
3529
3530 return 0;
3531}
3532
Yuval Mintz73390ac2016-05-11 16:36:24 +03003533static int qed_get_vf_config(struct qed_dev *cdev,
3534 int vf_id, struct ifla_vf_info *ivi)
3535{
3536 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
3537 struct qed_public_vf_info *vf_info;
3538 struct qed_mcp_link_state link;
3539 u32 tx_rate;
3540
3541 /* Sanitize request */
3542 if (IS_VF(cdev))
3543 return -EINVAL;
3544
3545 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
3546 DP_VERBOSE(cdev, QED_MSG_IOV,
3547 "VF index [%d] isn't active\n", vf_id);
3548 return -EINVAL;
3549 }
3550
3551 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
3552
3553 qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
3554
3555 /* Fill information about VF */
3556 ivi->vf = vf_id;
3557
3558 if (is_valid_ether_addr(vf_info->forced_mac))
3559 ether_addr_copy(ivi->mac, vf_info->forced_mac);
3560 else
3561 ether_addr_copy(ivi->mac, vf_info->mac);
3562
3563 ivi->vlan = vf_info->forced_vlan;
3564 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
3565 ivi->linkstate = vf_info->link_state;
3566 tx_rate = vf_info->tx_rate;
3567 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
3568 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
3569
3570 return 0;
3571}
3572
Yuval Mintz36558c32016-05-11 16:36:17 +03003573void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
3574{
3575 struct qed_mcp_link_capabilities caps;
3576 struct qed_mcp_link_params params;
3577 struct qed_mcp_link_state link;
3578 int i;
3579
3580 if (!hwfn->pf_iov_info)
3581 return;
3582
3583 /* Update bulletin of all future possible VFs with link configuration */
3584 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
Yuval Mintz733def62016-05-11 16:36:22 +03003585 struct qed_public_vf_info *vf_info;
3586
3587 vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
3588 if (!vf_info)
3589 continue;
3590
Yuval Mintz36558c32016-05-11 16:36:17 +03003591 memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
3592 memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
3593 memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
3594 sizeof(caps));
3595
Yuval Mintz733def62016-05-11 16:36:22 +03003596 /* Modify link according to the VF's configured link state */
3597 switch (vf_info->link_state) {
3598 case IFLA_VF_LINK_STATE_DISABLE:
3599 link.link_up = false;
3600 break;
3601 case IFLA_VF_LINK_STATE_ENABLE:
3602 link.link_up = true;
3603 /* Set speed according to maximum supported by HW.
3604 * that is 40G for regular devices and 100G for CMT
3605 * mode devices.
3606 */
3607 link.speed = (hwfn->cdev->num_hwfns > 1) ?
3608 100000 : 40000;
3609 default:
3610 /* In auto mode pass PF link image to VF */
3611 break;
3612 }
3613
3614 if (link.link_up && vf_info->tx_rate) {
3615 struct qed_ptt *ptt;
3616 int rate;
3617
3618 rate = min_t(int, vf_info->tx_rate, link.speed);
3619
3620 ptt = qed_ptt_acquire(hwfn);
3621 if (!ptt) {
3622 DP_NOTICE(hwfn, "Failed to acquire PTT\n");
3623 return;
3624 }
3625
3626 if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
3627 vf_info->tx_rate = rate;
3628 link.speed = rate;
3629 }
3630
3631 qed_ptt_release(hwfn, ptt);
3632 }
3633
Yuval Mintz36558c32016-05-11 16:36:17 +03003634 qed_iov_set_link(hwfn, i, &params, &link, &caps);
3635 }
3636
3637 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3638}
3639
Yuval Mintz733def62016-05-11 16:36:22 +03003640static int qed_set_vf_link_state(struct qed_dev *cdev,
3641 int vf_id, int link_state)
3642{
3643 int i;
3644
3645 /* Sanitize request */
3646 if (IS_VF(cdev))
3647 return -EINVAL;
3648
3649 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
3650 DP_VERBOSE(cdev, QED_MSG_IOV,
3651 "VF index [%d] isn't active\n", vf_id);
3652 return -EINVAL;
3653 }
3654
3655 /* Handle configuration of link state */
3656 for_each_hwfn(cdev, i) {
3657 struct qed_hwfn *hwfn = &cdev->hwfns[i];
3658 struct qed_public_vf_info *vf;
3659
3660 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
3661 if (!vf)
3662 continue;
3663
3664 if (vf->link_state == link_state)
3665 continue;
3666
3667 vf->link_state = link_state;
3668 qed_inform_vf_link_state(&cdev->hwfns[i]);
3669 }
3670
3671 return 0;
3672}
3673
Yuval Mintz6ddc7602016-05-11 16:36:23 +03003674static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
3675{
3676 int i, rc = -EINVAL;
3677
3678 for_each_hwfn(cdev, i) {
3679 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3680
3681 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
3682 if (rc)
3683 break;
3684 }
3685
3686 return rc;
3687}
3688
Yuval Mintz733def62016-05-11 16:36:22 +03003689static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
3690{
3691 int i;
3692
3693 for_each_hwfn(cdev, i) {
3694 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3695 struct qed_public_vf_info *vf;
3696
3697 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3698 DP_NOTICE(p_hwfn,
3699 "SR-IOV sanity check failed, can't set tx rate\n");
3700 return -EINVAL;
3701 }
3702
3703 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
3704
3705 vf->tx_rate = rate;
3706
3707 qed_inform_vf_link_state(p_hwfn);
3708 }
3709
3710 return 0;
3711}
3712
3713static int qed_set_vf_rate(struct qed_dev *cdev,
3714 int vfid, u32 min_rate, u32 max_rate)
3715{
3716 int rc_min = 0, rc_max = 0;
3717
3718 if (max_rate)
3719 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
3720
3721 if (min_rate)
3722 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
3723
3724 if (rc_max | rc_min)
3725 return -EINVAL;
3726
3727 return 0;
3728}
3729
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003730static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
3731{
3732 u64 events[QED_VF_ARRAY_LENGTH];
3733 struct qed_ptt *ptt;
3734 int i;
3735
3736 ptt = qed_ptt_acquire(hwfn);
3737 if (!ptt) {
3738 DP_VERBOSE(hwfn, QED_MSG_IOV,
3739 "Can't acquire PTT; re-scheduling\n");
3740 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
3741 return;
3742 }
3743
3744 qed_iov_pf_get_and_clear_pending_events(hwfn, events);
3745
3746 DP_VERBOSE(hwfn, QED_MSG_IOV,
3747 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
3748 events[0], events[1], events[2]);
3749
3750 qed_for_each_vf(hwfn, i) {
3751 /* Skip VFs with no pending messages */
3752 if (!(events[i / 64] & (1ULL << (i % 64))))
3753 continue;
3754
3755 DP_VERBOSE(hwfn, QED_MSG_IOV,
3756 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
3757 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
3758
3759 /* Copy VF's message to PF's request buffer for that VF */
3760 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
3761 continue;
3762
3763 qed_iov_process_mbx_req(hwfn, ptt, i);
3764 }
3765
3766 qed_ptt_release(hwfn, ptt);
3767}
3768
Yuval Mintz08feecd2016-05-11 16:36:20 +03003769static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
3770{
3771 int i;
3772
3773 qed_for_each_vf(hwfn, i) {
3774 struct qed_public_vf_info *info;
3775 bool update = false;
Yuval Mintzeff16962016-05-11 16:36:21 +03003776 u8 *mac;
Yuval Mintz08feecd2016-05-11 16:36:20 +03003777
3778 info = qed_iov_get_public_vf_info(hwfn, i, true);
3779 if (!info)
3780 continue;
3781
3782 /* Update data on bulletin board */
Yuval Mintzeff16962016-05-11 16:36:21 +03003783 mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
3784 if (is_valid_ether_addr(info->forced_mac) &&
3785 (!mac || !ether_addr_equal(mac, info->forced_mac))) {
3786 DP_VERBOSE(hwfn,
3787 QED_MSG_IOV,
3788 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
3789 i,
3790 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
3791
3792 /* Update bulletin board with forced MAC */
3793 qed_iov_bulletin_set_forced_mac(hwfn,
3794 info->forced_mac, i);
3795 update = true;
3796 }
Yuval Mintz08feecd2016-05-11 16:36:20 +03003797
3798 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
3799 info->forced_vlan) {
3800 DP_VERBOSE(hwfn,
3801 QED_MSG_IOV,
3802 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
3803 info->forced_vlan,
3804 i,
3805 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
3806 qed_iov_bulletin_set_forced_vlan(hwfn,
3807 info->forced_vlan, i);
3808 update = true;
3809 }
3810
3811 if (update)
3812 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3813 }
3814}
3815
Yuval Mintz36558c32016-05-11 16:36:17 +03003816static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
3817{
3818 struct qed_ptt *ptt;
3819 int i;
3820
3821 ptt = qed_ptt_acquire(hwfn);
3822 if (!ptt) {
3823 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
3824 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3825 return;
3826 }
3827
3828 qed_for_each_vf(hwfn, i)
3829 qed_iov_post_vf_bulletin(hwfn, i, ptt);
3830
3831 qed_ptt_release(hwfn, ptt);
3832}
3833
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003834void qed_iov_pf_task(struct work_struct *work)
3835{
3836 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
3837 iov_task.work);
Yuval Mintz0b55e272016-05-11 16:36:15 +03003838 int rc;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003839
3840 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
3841 return;
3842
Yuval Mintz0b55e272016-05-11 16:36:15 +03003843 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
3844 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3845
3846 if (!ptt) {
3847 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
3848 return;
3849 }
3850
3851 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
3852 if (rc)
3853 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
3854
3855 qed_ptt_release(hwfn, ptt);
3856 }
3857
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003858 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
3859 qed_handle_vf_msg(hwfn);
Yuval Mintz08feecd2016-05-11 16:36:20 +03003860
3861 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
3862 &hwfn->iov_task_flags))
3863 qed_handle_pf_set_vf_unicast(hwfn);
3864
Yuval Mintz36558c32016-05-11 16:36:17 +03003865 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
3866 &hwfn->iov_task_flags))
3867 qed_handle_bulletin_post(hwfn);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003868}
3869
3870void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
3871{
3872 int i;
3873
3874 for_each_hwfn(cdev, i) {
3875 if (!cdev->hwfns[i].iov_wq)
3876 continue;
3877
3878 if (schedule_first) {
3879 qed_schedule_iov(&cdev->hwfns[i],
3880 QED_IOV_WQ_STOP_WQ_FLAG);
3881 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
3882 }
3883
3884 flush_workqueue(cdev->hwfns[i].iov_wq);
3885 destroy_workqueue(cdev->hwfns[i].iov_wq);
3886 }
3887}
3888
3889int qed_iov_wq_start(struct qed_dev *cdev)
3890{
3891 char name[NAME_SIZE];
3892 int i;
3893
3894 for_each_hwfn(cdev, i) {
3895 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3896
Yuval Mintz36558c32016-05-11 16:36:17 +03003897 /* PFs needs a dedicated workqueue only if they support IOV.
3898 * VFs always require one.
3899 */
3900 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003901 continue;
3902
3903 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
3904 cdev->pdev->bus->number,
3905 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
3906
3907 p_hwfn->iov_wq = create_singlethread_workqueue(name);
3908 if (!p_hwfn->iov_wq) {
3909 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
3910 return -ENOMEM;
3911 }
3912
Yuval Mintz36558c32016-05-11 16:36:17 +03003913 if (IS_PF(cdev))
3914 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
3915 else
3916 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003917 }
3918
3919 return 0;
3920}
Yuval Mintz0b55e272016-05-11 16:36:15 +03003921
3922const struct qed_iov_hv_ops qed_iov_ops_pass = {
3923 .configure = &qed_sriov_configure,
Yuval Mintzeff16962016-05-11 16:36:21 +03003924 .set_mac = &qed_sriov_pf_set_mac,
Yuval Mintz08feecd2016-05-11 16:36:20 +03003925 .set_vlan = &qed_sriov_pf_set_vlan,
Yuval Mintz73390ac2016-05-11 16:36:24 +03003926 .get_config = &qed_get_vf_config,
Yuval Mintz733def62016-05-11 16:36:22 +03003927 .set_link_state = &qed_set_vf_link_state,
Yuval Mintz6ddc7602016-05-11 16:36:23 +03003928 .set_spoof = &qed_spoof_configure,
Yuval Mintz733def62016-05-11 16:36:22 +03003929 .set_rate = &qed_set_vf_rate,
Yuval Mintz0b55e272016-05-11 16:36:15 +03003930};