blob: b6bda45d048982cbc696615b9a8324e762590d00 [file] [log] [blame]
Yuval Mintz32a47e72016-05-11 16:36:12 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz32a47e72016-05-11 16:36:12 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz32a47e72016-05-11 16:36:12 +030031 */
32
Yuval Mintzdacd88d2016-05-11 16:36:16 +030033#include <linux/etherdevice.h>
Yuval Mintz36558c32016-05-11 16:36:17 +030034#include <linux/crc32.h>
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +020035#include <linux/vmalloc.h>
Yuval Mintz0b55e272016-05-11 16:36:15 +030036#include <linux/qed/qed_iov_if.h>
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030037#include "qed_cxt.h"
38#include "qed_hsi.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030039#include "qed_hw.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030040#include "qed_init_ops.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030041#include "qed_int.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030042#include "qed_mcp.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030043#include "qed_reg_addr.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030044#include "qed_sp.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030045#include "qed_sriov.h"
46#include "qed_vf.h"
47
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030048/* IOV ramrods */
Yuval Mintz1fe614d2016-06-05 13:11:11 +030049static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030050{
51 struct vf_start_ramrod_data *p_ramrod = NULL;
52 struct qed_spq_entry *p_ent = NULL;
53 struct qed_sp_init_data init_data;
54 int rc = -EINVAL;
Yuval Mintz1fe614d2016-06-05 13:11:11 +030055 u8 fp_minor;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030056
57 /* Get SPQ entry */
58 memset(&init_data, 0, sizeof(init_data));
59 init_data.cid = qed_spq_get_cid(p_hwfn);
Yuval Mintz1fe614d2016-06-05 13:11:11 +030060 init_data.opaque_fid = p_vf->opaque_fid;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030061 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
62
63 rc = qed_sp_init_request(p_hwfn, &p_ent,
64 COMMON_RAMROD_VF_START,
65 PROTOCOLID_COMMON, &init_data);
66 if (rc)
67 return rc;
68
69 p_ramrod = &p_ent->ramrod.vf_start;
70
Yuval Mintz1fe614d2016-06-05 13:11:11 +030071 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
72 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030073
Yuval Mintz1fe614d2016-06-05 13:11:11 +030074 switch (p_hwfn->hw_info.personality) {
75 case QED_PCI_ETH:
76 p_ramrod->personality = PERSONALITY_ETH;
77 break;
78 case QED_PCI_ETH_ROCE:
79 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
80 break;
81 default:
82 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
83 p_hwfn->hw_info.personality);
84 return -EINVAL;
85 }
86
87 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
Yuval Mintza044df82016-08-22 13:25:09 +030088 if (fp_minor > ETH_HSI_VER_MINOR &&
89 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
Yuval Mintz1fe614d2016-06-05 13:11:11 +030090 DP_VERBOSE(p_hwfn,
91 QED_MSG_IOV,
92 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
93 p_vf->abs_vf_id,
94 ETH_HSI_VER_MAJOR,
95 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
96 fp_minor = ETH_HSI_VER_MINOR;
97 }
98
Yuval Mintz351a4ded2016-06-02 10:23:29 +030099 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
Yuval Mintz1fe614d2016-06-05 13:11:11 +0300100 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
101
102 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
103 "VF[%d] - Starting using HSI %02x.%02x\n",
104 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300105
106 return qed_spq_post(p_hwfn, p_ent, NULL);
107}
108
Yuval Mintz0b55e272016-05-11 16:36:15 +0300109static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
110 u32 concrete_vfid, u16 opaque_vfid)
111{
112 struct vf_stop_ramrod_data *p_ramrod = NULL;
113 struct qed_spq_entry *p_ent = NULL;
114 struct qed_sp_init_data init_data;
115 int rc = -EINVAL;
116
117 /* Get SPQ entry */
118 memset(&init_data, 0, sizeof(init_data));
119 init_data.cid = qed_spq_get_cid(p_hwfn);
120 init_data.opaque_fid = opaque_vfid;
121 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
122
123 rc = qed_sp_init_request(p_hwfn, &p_ent,
124 COMMON_RAMROD_VF_STOP,
125 PROTOCOLID_COMMON, &init_data);
126 if (rc)
127 return rc;
128
129 p_ramrod = &p_ent->ramrod.vf_stop;
130
131 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
132
133 return qed_spq_post(p_hwfn, p_ent, NULL);
134}
135
Baoyou Xieba569472016-09-09 09:21:15 +0800136static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400137 int rel_vf_id,
138 bool b_enabled_only, bool b_non_malicious)
Yuval Mintz32a47e72016-05-11 16:36:12 +0300139{
140 if (!p_hwfn->pf_iov_info) {
141 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
142 return false;
143 }
144
145 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
146 (rel_vf_id < 0))
147 return false;
148
149 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
150 b_enabled_only)
151 return false;
152
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400153 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
154 b_non_malicious)
155 return false;
156
Yuval Mintz32a47e72016-05-11 16:36:12 +0300157 return true;
158}
159
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300160static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
161 u16 relative_vf_id,
162 bool b_enabled_only)
163{
164 struct qed_vf_info *vf = NULL;
165
166 if (!p_hwfn->pf_iov_info) {
167 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
168 return NULL;
169 }
170
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400171 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
172 b_enabled_only, false))
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300173 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
174 else
175 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
176 relative_vf_id);
177
178 return vf;
179}
180
Mintz, Yuvalf109c242017-03-19 13:08:16 +0200181enum qed_iov_validate_q_mode {
182 QED_IOV_VALIDATE_Q_NA,
183 QED_IOV_VALIDATE_Q_ENABLE,
184 QED_IOV_VALIDATE_Q_DISABLE,
185};
186
187static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
188 struct qed_vf_info *p_vf,
189 u16 qid,
190 enum qed_iov_validate_q_mode mode,
191 bool b_is_tx)
Yuval Mintz41086462016-06-05 13:11:13 +0300192{
Mintz, Yuvalf109c242017-03-19 13:08:16 +0200193 if (mode == QED_IOV_VALIDATE_Q_NA)
194 return true;
195
196 if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) ||
197 (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid))
198 return mode == QED_IOV_VALIDATE_Q_ENABLE;
199
200 /* In case we haven't found any valid cid, then its disabled */
201 return mode == QED_IOV_VALIDATE_Q_DISABLE;
202}
203
204static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
205 struct qed_vf_info *p_vf,
206 u16 rx_qid,
207 enum qed_iov_validate_q_mode mode)
208{
209 if (rx_qid >= p_vf->num_rxqs) {
Yuval Mintz41086462016-06-05 13:11:13 +0300210 DP_VERBOSE(p_hwfn,
211 QED_MSG_IOV,
212 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
213 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
Mintz, Yuvalf109c242017-03-19 13:08:16 +0200214 return false;
215 }
216
217 return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
Yuval Mintz41086462016-06-05 13:11:13 +0300218}
219
220static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
Mintz, Yuvalf109c242017-03-19 13:08:16 +0200221 struct qed_vf_info *p_vf,
222 u16 tx_qid,
223 enum qed_iov_validate_q_mode mode)
Yuval Mintz41086462016-06-05 13:11:13 +0300224{
Mintz, Yuvalf109c242017-03-19 13:08:16 +0200225 if (tx_qid >= p_vf->num_txqs) {
Yuval Mintz41086462016-06-05 13:11:13 +0300226 DP_VERBOSE(p_hwfn,
227 QED_MSG_IOV,
228 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
229 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
Mintz, Yuvalf109c242017-03-19 13:08:16 +0200230 return false;
231 }
232
233 return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
Yuval Mintz41086462016-06-05 13:11:13 +0300234}
235
236static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
237 struct qed_vf_info *p_vf, u16 sb_idx)
238{
239 int i;
240
241 for (i = 0; i < p_vf->num_sbs; i++)
242 if (p_vf->igu_sbs[i] == sb_idx)
243 return true;
244
245 DP_VERBOSE(p_hwfn,
246 QED_MSG_IOV,
247 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
248 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
249
250 return false;
251}
252
Mintz, Yuvalf109c242017-03-19 13:08:16 +0200253static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
254 struct qed_vf_info *p_vf)
255{
256 u8 i;
257
258 for (i = 0; i < p_vf->num_rxqs; i++)
259 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
260 QED_IOV_VALIDATE_Q_ENABLE,
261 false))
262 return true;
263
264 return false;
265}
266
267static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
268 struct qed_vf_info *p_vf)
269{
270 u8 i;
271
272 for (i = 0; i < p_vf->num_txqs; i++)
273 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
274 QED_IOV_VALIDATE_Q_ENABLE,
275 true))
276 return true;
277
278 return false;
279}
280
Baoyou Xieba569472016-09-09 09:21:15 +0800281static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
282 int vfid, struct qed_ptt *p_ptt)
Yuval Mintz36558c32016-05-11 16:36:17 +0300283{
284 struct qed_bulletin_content *p_bulletin;
285 int crc_size = sizeof(p_bulletin->crc);
286 struct qed_dmae_params params;
287 struct qed_vf_info *p_vf;
288
289 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
290 if (!p_vf)
291 return -EINVAL;
292
293 if (!p_vf->vf_bulletin)
294 return -EINVAL;
295
296 p_bulletin = p_vf->bulletin.p_virt;
297
298 /* Increment bulletin board version and compute crc */
299 p_bulletin->version++;
300 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
301 p_vf->bulletin.size - crc_size);
302
303 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
304 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
305 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
306
307 /* propagate bulletin board via dmae to vm memory */
308 memset(&params, 0, sizeof(params));
309 params.flags = QED_DMAE_FLAG_VF_DST;
310 params.dst_vfid = p_vf->abs_vf_id;
311 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
312 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
313 &params);
314}
315
Yuval Mintz32a47e72016-05-11 16:36:12 +0300316static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
317{
318 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
319 int pos = iov->pos;
320
321 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
322 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
323
324 pci_read_config_word(cdev->pdev,
325 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
326 pci_read_config_word(cdev->pdev,
327 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
328
329 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
330 if (iov->num_vfs) {
331 DP_VERBOSE(cdev,
332 QED_MSG_IOV,
333 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
334 iov->num_vfs = 0;
335 }
336
337 pci_read_config_word(cdev->pdev,
338 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
339
340 pci_read_config_word(cdev->pdev,
341 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
342
343 pci_read_config_word(cdev->pdev,
344 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
345
346 pci_read_config_dword(cdev->pdev,
347 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
348
349 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
350
351 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
352
353 DP_VERBOSE(cdev,
354 QED_MSG_IOV,
355 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
356 iov->nres,
357 iov->cap,
358 iov->ctrl,
359 iov->total_vfs,
360 iov->initial_vfs,
361 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
362
363 /* Some sanity checks */
364 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
365 iov->total_vfs > NUM_OF_VFS(cdev)) {
366 /* This can happen only due to a bug. In this case we set
367 * num_vfs to zero to avoid memory corruption in the code that
368 * assumes max number of vfs
369 */
370 DP_NOTICE(cdev,
371 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
372 iov->num_vfs);
373
374 iov->num_vfs = 0;
375 iov->total_vfs = 0;
376 }
377
378 return 0;
379}
380
381static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
382 struct qed_ptt *p_ptt)
383{
384 struct qed_igu_block *p_sb;
385 u16 sb_id;
386 u32 val;
387
388 if (!p_hwfn->hw_info.p_igu_info) {
389 DP_ERR(p_hwfn,
390 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
391 return;
392 }
393
394 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
395 sb_id++) {
396 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
397 if ((p_sb->status & QED_IGU_STATUS_FREE) &&
398 !(p_sb->status & QED_IGU_STATUS_PF)) {
399 val = qed_rd(p_hwfn, p_ptt,
400 IGU_REG_MAPPING_MEMORY + sb_id * 4);
401 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
402 qed_wr(p_hwfn, p_ptt,
403 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
404 }
405 }
406}
407
408static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
409{
410 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
411 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
412 struct qed_bulletin_content *p_bulletin_virt;
413 dma_addr_t req_p, rply_p, bulletin_p;
414 union pfvf_tlvs *p_reply_virt_addr;
415 union vfpf_tlvs *p_req_virt_addr;
416 u8 idx = 0;
417
418 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
419
420 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
421 req_p = p_iov_info->mbx_msg_phys_addr;
422 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
423 rply_p = p_iov_info->mbx_reply_phys_addr;
424 p_bulletin_virt = p_iov_info->p_bulletins;
425 bulletin_p = p_iov_info->bulletins_phys;
426 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
427 DP_ERR(p_hwfn,
428 "qed_iov_setup_vfdb called without allocating mem first\n");
429 return;
430 }
431
432 for (idx = 0; idx < p_iov->total_vfs; idx++) {
433 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
434 u32 concrete;
435
436 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
437 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
438 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
439 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
440
441 vf->state = VF_STOPPED;
442 vf->b_init = false;
443
444 vf->bulletin.phys = idx *
445 sizeof(struct qed_bulletin_content) +
446 bulletin_p;
447 vf->bulletin.p_virt = p_bulletin_virt + idx;
448 vf->bulletin.size = sizeof(struct qed_bulletin_content);
449
450 vf->relative_vf_id = idx;
451 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
452 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
453 vf->concrete_fid = concrete;
454 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
455 (vf->abs_vf_id << 8);
456 vf->vport_id = idx + 1;
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +0300457
458 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
459 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300460 }
461}
462
463static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
464{
465 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
466 void **p_v_addr;
467 u16 num_vfs = 0;
468
469 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
470
471 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
472 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
473
474 /* Allocate PF Mailbox buffer (per-VF) */
475 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
476 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
477 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
478 p_iov_info->mbx_msg_size,
479 &p_iov_info->mbx_msg_phys_addr,
480 GFP_KERNEL);
481 if (!*p_v_addr)
482 return -ENOMEM;
483
484 /* Allocate PF Mailbox Reply buffer (per-VF) */
485 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
486 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
487 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
488 p_iov_info->mbx_reply_size,
489 &p_iov_info->mbx_reply_phys_addr,
490 GFP_KERNEL);
491 if (!*p_v_addr)
492 return -ENOMEM;
493
494 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
495 num_vfs;
496 p_v_addr = &p_iov_info->p_bulletins;
497 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
498 p_iov_info->bulletins_size,
499 &p_iov_info->bulletins_phys,
500 GFP_KERNEL);
501 if (!*p_v_addr)
502 return -ENOMEM;
503
504 DP_VERBOSE(p_hwfn,
505 QED_MSG_IOV,
506 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
507 p_iov_info->mbx_msg_virt_addr,
508 (u64) p_iov_info->mbx_msg_phys_addr,
509 p_iov_info->mbx_reply_virt_addr,
510 (u64) p_iov_info->mbx_reply_phys_addr,
511 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
512
513 return 0;
514}
515
516static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
517{
518 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
519
520 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
521 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
522 p_iov_info->mbx_msg_size,
523 p_iov_info->mbx_msg_virt_addr,
524 p_iov_info->mbx_msg_phys_addr);
525
526 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
527 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
528 p_iov_info->mbx_reply_size,
529 p_iov_info->mbx_reply_virt_addr,
530 p_iov_info->mbx_reply_phys_addr);
531
532 if (p_iov_info->p_bulletins)
533 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
534 p_iov_info->bulletins_size,
535 p_iov_info->p_bulletins,
536 p_iov_info->bulletins_phys);
537}
538
539int qed_iov_alloc(struct qed_hwfn *p_hwfn)
540{
541 struct qed_pf_iov *p_sriov;
542
543 if (!IS_PF_SRIOV(p_hwfn)) {
544 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
545 "No SR-IOV - no need for IOV db\n");
546 return 0;
547 }
548
549 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700550 if (!p_sriov)
Yuval Mintz32a47e72016-05-11 16:36:12 +0300551 return -ENOMEM;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300552
553 p_hwfn->pf_iov_info = p_sriov;
554
555 return qed_iov_allocate_vfdb(p_hwfn);
556}
557
558void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
559{
560 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
561 return;
562
563 qed_iov_setup_vfdb(p_hwfn);
564 qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
565}
566
567void qed_iov_free(struct qed_hwfn *p_hwfn)
568{
569 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
570 qed_iov_free_vfdb(p_hwfn);
571 kfree(p_hwfn->pf_iov_info);
572 }
573}
574
575void qed_iov_free_hw_info(struct qed_dev *cdev)
576{
577 kfree(cdev->p_iov_info);
578 cdev->p_iov_info = NULL;
579}
580
581int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
582{
583 struct qed_dev *cdev = p_hwfn->cdev;
584 int pos;
585 int rc;
586
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300587 if (IS_VF(p_hwfn->cdev))
588 return 0;
589
Yuval Mintz32a47e72016-05-11 16:36:12 +0300590 /* Learn the PCI configuration */
591 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
592 PCI_EXT_CAP_ID_SRIOV);
593 if (!pos) {
594 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
595 return 0;
596 }
597
598 /* Allocate a new struct for IOV information */
599 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700600 if (!cdev->p_iov_info)
Yuval Mintz32a47e72016-05-11 16:36:12 +0300601 return -ENOMEM;
Joe Perches2591c282016-09-04 14:24:03 -0700602
Yuval Mintz32a47e72016-05-11 16:36:12 +0300603 cdev->p_iov_info->pos = pos;
604
605 rc = qed_iov_pci_cfg_info(cdev);
606 if (rc)
607 return rc;
608
609 /* We want PF IOV to be synonemous with the existance of p_iov_info;
610 * In case the capability is published but there are no VFs, simply
611 * de-allocate the struct.
612 */
613 if (!cdev->p_iov_info->total_vfs) {
614 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
615 "IOV capabilities, but no VFs are published\n");
616 kfree(cdev->p_iov_info);
617 cdev->p_iov_info = NULL;
618 return 0;
619 }
620
Mintz, Yuval9c79dda2017-03-14 16:23:54 +0200621 /* First VF index based on offset is tricky:
622 * - If ARI is supported [likely], offset - (16 - pf_id) would
623 * provide the number for eng0. 2nd engine Vfs would begin
624 * after the first engine's VFs.
625 * - If !ARI, VFs would start on next device.
626 * so offset - (256 - pf_id) would provide the number.
627 * Utilize the fact that (256 - pf_id) is achieved only by later
Joe Perches8ac1ed72017-05-08 15:57:56 -0700628 * to differentiate between the two.
Yuval Mintz32a47e72016-05-11 16:36:12 +0300629 */
Mintz, Yuval9c79dda2017-03-14 16:23:54 +0200630
631 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
632 u32 first = p_hwfn->cdev->p_iov_info->offset +
633 p_hwfn->abs_pf_id - 16;
634
635 cdev->p_iov_info->first_vf_in_pf = first;
636
637 if (QED_PATH_ID(p_hwfn))
638 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
639 } else {
640 u32 first = p_hwfn->cdev->p_iov_info->offset +
641 p_hwfn->abs_pf_id - 256;
642
643 cdev->p_iov_info->first_vf_in_pf = first;
644 }
Yuval Mintz32a47e72016-05-11 16:36:12 +0300645
646 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
647 "First VF in hwfn 0x%08x\n",
648 cdev->p_iov_info->first_vf_in_pf);
649
650 return 0;
651}
652
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400653bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
654 int vfid, bool b_fail_malicious)
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300655{
656 /* Check PF supports sriov */
Yuval Mintzb0409fa2016-05-15 14:48:05 +0300657 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
658 !IS_PF_SRIOV_ALLOC(p_hwfn))
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300659 return false;
660
661 /* Check VF validity */
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400662 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300663 return false;
664
665 return true;
666}
667
Yuval Mintz7eff82b2016-10-14 05:19:22 -0400668bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
669{
670 return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
671}
672
Yuval Mintz0b55e272016-05-11 16:36:15 +0300673static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
674 u16 rel_vf_id, u8 to_disable)
675{
676 struct qed_vf_info *vf;
677 int i;
678
679 for_each_hwfn(cdev, i) {
680 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
681
682 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
683 if (!vf)
684 continue;
685
686 vf->to_disable = to_disable;
687 }
688}
689
Baoyou Xieba569472016-09-09 09:21:15 +0800690static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
Yuval Mintz0b55e272016-05-11 16:36:15 +0300691{
692 u16 i;
693
694 if (!IS_QED_SRIOV(cdev))
695 return;
696
697 for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
698 qed_iov_set_vf_to_disable(cdev, i, to_disable);
699}
700
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300701static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
702 struct qed_ptt *p_ptt, u8 abs_vfid)
703{
704 qed_wr(p_hwfn, p_ptt,
705 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
706 1 << (abs_vfid & 0x1f));
707}
708
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300709static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
710 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
711{
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300712 int i;
713
714 /* Set VF masks and configuration - pretend */
715 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
716
717 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
718
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300719 /* unpretend */
720 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
721
722 /* iterate over all queues, clear sb consumer */
Yuval Mintzb2b897e2016-05-15 14:48:06 +0300723 for (i = 0; i < vf->num_sbs; i++)
724 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
725 vf->igu_sbs[i],
726 vf->opaque_fid, true);
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300727}
728
Yuval Mintz0b55e272016-05-11 16:36:15 +0300729static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
730 struct qed_ptt *p_ptt,
731 struct qed_vf_info *vf, bool enable)
732{
733 u32 igu_vf_conf;
734
735 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
736
737 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
738
739 if (enable)
740 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
741 else
742 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
743
744 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
745
746 /* unpretend */
747 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
748}
749
Mintz, Yuval88072fd2017-05-29 09:53:08 +0300750static int
751qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
752 struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
753{
754 u8 current_max = 0;
755 int i;
756
757 /* For AH onward, configuration is per-PF. Find maximum of all
758 * the currently enabled child VFs, and set the number to be that.
759 */
760 if (!QED_IS_BB(p_hwfn->cdev)) {
761 qed_for_each_vf(p_hwfn, i) {
762 struct qed_vf_info *p_vf;
763
764 p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
765 if (!p_vf)
766 continue;
767
768 current_max = max_t(u8, current_max, p_vf->num_sbs);
769 }
770 }
771
772 if (num_sbs > current_max)
773 return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
774 abs_vf_id, num_sbs);
775
776 return 0;
777}
778
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300779static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
780 struct qed_ptt *p_ptt,
781 struct qed_vf_info *vf)
782{
783 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
784 int rc;
785
Mintz, Yuval4e9b2a62017-03-19 13:08:13 +0200786 /* It's possible VF was previously considered malicious -
787 * clear the indication even if we're only going to disable VF.
788 */
789 vf->b_malicious = false;
790
Yuval Mintz0b55e272016-05-11 16:36:15 +0300791 if (vf->to_disable)
792 return 0;
793
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300794 DP_VERBOSE(p_hwfn,
795 QED_MSG_IOV,
796 "Enable internal access for vf %x [abs %x]\n",
797 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
798
799 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
800
Yuval Mintzb2b897e2016-05-15 14:48:06 +0300801 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
802
Mintz, Yuval88072fd2017-05-29 09:53:08 +0300803 rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
804 vf->abs_vf_id, vf->num_sbs);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300805 if (rc)
806 return rc;
807
808 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
809
810 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
811 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
812
813 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
814 p_hwfn->hw_info.hw_mode);
815
816 /* unpretend */
817 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
818
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300819 vf->state = VF_FREE;
820
821 return rc;
822}
823
Yuval Mintz0b55e272016-05-11 16:36:15 +0300824/**
825 * @brief qed_iov_config_perm_table - configure the permission
826 * zone table.
827 * In E4, queue zone permission table size is 320x9. There
828 * are 320 VF queues for single engine device (256 for dual
829 * engine device), and each entry has the following format:
830 * {Valid, VF[7:0]}
831 * @param p_hwfn
832 * @param p_ptt
833 * @param vf
834 * @param enable
835 */
836static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
837 struct qed_ptt *p_ptt,
838 struct qed_vf_info *vf, u8 enable)
839{
840 u32 reg_addr, val;
841 u16 qzone_id = 0;
842 int qid;
843
844 for (qid = 0; qid < vf->num_rxqs; qid++) {
845 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
846 &qzone_id);
847
848 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
Yuval Mintz1a635e42016-08-15 10:42:43 +0300849 val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300850 qed_wr(p_hwfn, p_ptt, reg_addr, val);
851 }
852}
853
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300854static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
855 struct qed_ptt *p_ptt,
856 struct qed_vf_info *vf)
857{
858 /* Reset vf in IGU - interrupts are still disabled */
859 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
860
861 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
862
863 /* Permission Table */
864 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
865}
866
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300867static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
868 struct qed_ptt *p_ptt,
869 struct qed_vf_info *vf, u16 num_rx_queues)
870{
871 struct qed_igu_block *igu_blocks;
872 int qid = 0, igu_id = 0;
873 u32 val = 0;
874
875 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
876
877 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
878 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
879 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
880
881 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
882 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
883 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
884
885 while ((qid < num_rx_queues) &&
886 (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
887 if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
888 struct cau_sb_entry sb_entry;
889
890 vf->igu_sbs[qid] = (u16)igu_id;
891 igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
892
893 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
894
895 qed_wr(p_hwfn, p_ptt,
896 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
897 val);
898
899 /* Configure igu sb in CAU which were marked valid */
900 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
901 p_hwfn->rel_pf_id,
902 vf->abs_vf_id, 1);
903 qed_dmae_host2grc(p_hwfn, p_ptt,
904 (u64)(uintptr_t)&sb_entry,
905 CAU_REG_SB_VAR_MEMORY +
906 igu_id * sizeof(u64), 2, 0);
907 qid++;
908 }
909 igu_id++;
910 }
911
912 vf->num_sbs = (u8) num_rx_queues;
913
914 return vf->num_sbs;
915}
916
Yuval Mintz0b55e272016-05-11 16:36:15 +0300917static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
918 struct qed_ptt *p_ptt,
919 struct qed_vf_info *vf)
920{
921 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
922 int idx, igu_id;
923 u32 addr, val;
924
925 /* Invalidate igu CAM lines and mark them as free */
926 for (idx = 0; idx < vf->num_sbs; idx++) {
927 igu_id = vf->igu_sbs[idx];
928 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
929
930 val = qed_rd(p_hwfn, p_ptt, addr);
931 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
932 qed_wr(p_hwfn, p_ptt, addr, val);
933
934 p_info->igu_map.igu_blocks[igu_id].status |=
935 QED_IGU_STATUS_FREE;
936
937 p_hwfn->hw_info.p_igu_info->free_blks++;
938 }
939
940 vf->num_sbs = 0;
941}
942
Mintz, Yuval33b2fbd2017-02-20 22:43:36 +0200943static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
944 u16 vfid,
945 struct qed_mcp_link_params *params,
946 struct qed_mcp_link_state *link,
947 struct qed_mcp_link_capabilities *p_caps)
948{
949 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
950 vfid,
951 false);
952 struct qed_bulletin_content *p_bulletin;
953
954 if (!p_vf)
955 return;
956
957 p_bulletin = p_vf->bulletin.p_virt;
958 p_bulletin->req_autoneg = params->speed.autoneg;
959 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
960 p_bulletin->req_forced_speed = params->speed.forced_speed;
961 p_bulletin->req_autoneg_pause = params->pause.autoneg;
962 p_bulletin->req_forced_rx = params->pause.forced_rx;
963 p_bulletin->req_forced_tx = params->pause.forced_tx;
964 p_bulletin->req_loopback = params->loopback_mode;
965
966 p_bulletin->link_up = link->link_up;
967 p_bulletin->speed = link->speed;
968 p_bulletin->full_duplex = link->full_duplex;
969 p_bulletin->autoneg = link->an;
970 p_bulletin->autoneg_complete = link->an_complete;
971 p_bulletin->parallel_detection = link->parallel_detection;
972 p_bulletin->pfc_enabled = link->pfc_enabled;
973 p_bulletin->partner_adv_speed = link->partner_adv_speed;
974 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
975 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
976 p_bulletin->partner_adv_pause = link->partner_adv_pause;
977 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
978
979 p_bulletin->capability_speed = p_caps->speed_capabilities;
980}
981
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300982static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
983 struct qed_ptt *p_ptt,
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200984 struct qed_iov_vf_init_params *p_params)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300985{
Mintz, Yuval33b2fbd2017-02-20 22:43:36 +0200986 struct qed_mcp_link_capabilities link_caps;
987 struct qed_mcp_link_params link_params;
988 struct qed_mcp_link_state link_state;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300989 u8 num_of_vf_avaiable_chains = 0;
990 struct qed_vf_info *vf = NULL;
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200991 u16 qid, num_irqs;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300992 int rc = 0;
993 u32 cids;
994 u8 i;
995
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200996 vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300997 if (!vf) {
998 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
999 return -EINVAL;
1000 }
1001
1002 if (vf->b_init) {
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001003 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
1004 p_params->rel_vf_id);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001005 return -EINVAL;
1006 }
1007
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001008 /* Perform sanity checking on the requested queue_id */
1009 for (i = 0; i < p_params->num_queues; i++) {
1010 u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
1011 u16 max_vf_qzone = min_vf_qzone +
1012 FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
1013
1014 qid = p_params->req_rx_queue[i];
1015 if (qid < min_vf_qzone || qid > max_vf_qzone) {
1016 DP_NOTICE(p_hwfn,
1017 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1018 qid,
1019 p_params->rel_vf_id,
1020 min_vf_qzone, max_vf_qzone);
1021 return -EINVAL;
1022 }
1023
1024 qid = p_params->req_tx_queue[i];
1025 if (qid > max_vf_qzone) {
1026 DP_NOTICE(p_hwfn,
1027 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1028 qid, p_params->rel_vf_id, max_vf_qzone);
1029 return -EINVAL;
1030 }
1031
1032 /* If client *really* wants, Tx qid can be shared with PF */
1033 if (qid < min_vf_qzone)
1034 DP_VERBOSE(p_hwfn,
1035 QED_MSG_IOV,
1036 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1037 p_params->rel_vf_id, qid, i);
1038 }
1039
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001040 /* Limit number of queues according to number of CIDs */
1041 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1042 DP_VERBOSE(p_hwfn,
1043 QED_MSG_IOV,
1044 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001045 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1046 num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001047
1048 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
1049 p_ptt,
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001050 vf, num_irqs);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001051 if (!num_of_vf_avaiable_chains) {
1052 DP_ERR(p_hwfn, "no available igu sbs\n");
1053 return -ENOMEM;
1054 }
1055
1056 /* Choose queue number and index ranges */
1057 vf->num_rxqs = num_of_vf_avaiable_chains;
1058 vf->num_txqs = num_of_vf_avaiable_chains;
1059
1060 for (i = 0; i < vf->num_rxqs; i++) {
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001061 struct qed_vf_q_info *p_queue = &vf->vf_queues[i];
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001062
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001063 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1064 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001065
1066 /* CIDs are per-VF, so no problem having them 0-based. */
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001067 p_queue->fw_cid = i;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001068
1069 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001070 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
1071 vf->relative_vf_id,
1072 i, vf->igu_sbs[i],
1073 p_queue->fw_rx_qid,
1074 p_queue->fw_tx_qid, p_queue->fw_cid);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001075 }
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001076
Mintz, Yuval33b2fbd2017-02-20 22:43:36 +02001077 /* Update the link configuration in bulletin */
1078 memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
1079 sizeof(link_params));
1080 memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
1081 memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
1082 sizeof(link_caps));
1083 qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
1084 &link_params, &link_state, &link_caps);
1085
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001086 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1087 if (!rc) {
1088 vf->b_init = true;
1089
1090 if (IS_LEAD_HWFN(p_hwfn))
1091 p_hwfn->cdev->p_iov_info->num_vfs++;
1092 }
1093
1094 return rc;
1095}
1096
Yuval Mintz0b55e272016-05-11 16:36:15 +03001097static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
1098 struct qed_ptt *p_ptt, u16 rel_vf_id)
1099{
Manish Chopra079d20a2016-05-15 14:48:07 +03001100 struct qed_mcp_link_capabilities caps;
1101 struct qed_mcp_link_params params;
1102 struct qed_mcp_link_state link;
Yuval Mintz0b55e272016-05-11 16:36:15 +03001103 struct qed_vf_info *vf = NULL;
Yuval Mintz0b55e272016-05-11 16:36:15 +03001104
1105 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1106 if (!vf) {
1107 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
1108 return -EINVAL;
1109 }
1110
Yuval Mintz36558c32016-05-11 16:36:17 +03001111 if (vf->bulletin.p_virt)
1112 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
1113
1114 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1115
Manish Chopra079d20a2016-05-15 14:48:07 +03001116 /* Get the link configuration back in bulletin so
1117 * that when VFs are re-enabled they get the actual
1118 * link configuration.
1119 */
1120 memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
1121 memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
1122 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
1123 qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
1124
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001125 /* Forget the VF's acquisition message */
1126 memset(&vf->acquire, 0, sizeof(vf->acquire));
Yuval Mintz0b55e272016-05-11 16:36:15 +03001127
1128 /* disablng interrupts and resetting permission table was done during
1129 * vf-close, however, we could get here without going through vf_close
1130 */
1131 /* Disable Interrupts for VF */
1132 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1133
1134 /* Reset Permission table */
1135 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1136
1137 vf->num_rxqs = 0;
1138 vf->num_txqs = 0;
1139 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1140
1141 if (vf->b_init) {
1142 vf->b_init = false;
1143
1144 if (IS_LEAD_HWFN(p_hwfn))
1145 p_hwfn->cdev->p_iov_info->num_vfs--;
1146 }
1147
1148 return 0;
1149}
1150
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001151static bool qed_iov_tlv_supported(u16 tlvtype)
1152{
1153 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1154}
1155
1156/* place a given tlv on the tlv buffer, continuing current tlv list */
1157void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
1158{
1159 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1160
1161 tl->type = type;
1162 tl->length = length;
1163
1164 /* Offset should keep pointing to next TLV (the end of the last) */
1165 *offset += length;
1166
1167 /* Return a pointer to the start of the added tlv */
1168 return *offset - length;
1169}
1170
1171/* list the types and lengths of the tlvs on the buffer */
1172void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
1173{
1174 u16 i = 1, total_length = 0;
1175 struct channel_tlv *tlv;
1176
1177 do {
1178 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1179
1180 /* output tlv */
1181 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1182 "TLV number %d: type %d, length %d\n",
1183 i, tlv->type, tlv->length);
1184
1185 if (tlv->type == CHANNEL_TLV_LIST_END)
1186 return;
1187
1188 /* Validate entry - protect against malicious VFs */
1189 if (!tlv->length) {
1190 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1191 return;
1192 }
1193
1194 total_length += tlv->length;
1195
1196 if (total_length >= sizeof(struct tlv_buffer_size)) {
1197 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1198 return;
1199 }
1200
1201 i++;
1202 } while (1);
1203}
1204
1205static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1206 struct qed_ptt *p_ptt,
1207 struct qed_vf_info *p_vf,
1208 u16 length, u8 status)
1209{
1210 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1211 struct qed_dmae_params params;
1212 u8 eng_vf_id;
1213
1214 mbx->reply_virt->default_resp.hdr.status = status;
1215
1216 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1217
1218 eng_vf_id = p_vf->abs_vf_id;
1219
1220 memset(&params, 0, sizeof(struct qed_dmae_params));
1221 params.flags = QED_DMAE_FLAG_VF_DST;
1222 params.dst_vfid = eng_vf_id;
1223
1224 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1225 mbx->req_virt->first_tlv.reply_address +
1226 sizeof(u64),
1227 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1228 &params);
1229
Mintz, Yuvald9194082017-03-19 13:08:14 +02001230 /* Once PF copies the rc to the VF, the latter can continue
1231 * and send an additional message. So we have to make sure the
1232 * channel would be re-set to ready prior to that.
1233 */
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001234 REG_WR(p_hwfn,
1235 GTT_BAR0_MAP_REG_USDM_RAM +
1236 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
Mintz, Yuvald9194082017-03-19 13:08:14 +02001237
1238 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1239 mbx->req_virt->first_tlv.reply_address,
1240 sizeof(u64) / 4, &params);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001241}
1242
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001243static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1244 enum qed_iov_vport_update_flag flag)
1245{
1246 switch (flag) {
1247 case QED_IOV_VP_UPDATE_ACTIVATE:
1248 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
Yuval Mintz17b235c2016-05-11 16:36:18 +03001249 case QED_IOV_VP_UPDATE_VLAN_STRIP:
1250 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1251 case QED_IOV_VP_UPDATE_TX_SWITCH:
1252 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001253 case QED_IOV_VP_UPDATE_MCAST:
1254 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1255 case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1256 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1257 case QED_IOV_VP_UPDATE_RSS:
1258 return CHANNEL_TLV_VPORT_UPDATE_RSS;
Yuval Mintz17b235c2016-05-11 16:36:18 +03001259 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1260 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1261 case QED_IOV_VP_UPDATE_SGE_TPA:
1262 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001263 default:
1264 return 0;
1265 }
1266}
1267
1268static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1269 struct qed_vf_info *p_vf,
1270 struct qed_iov_vf_mbx *p_mbx,
1271 u8 status,
1272 u16 tlvs_mask, u16 tlvs_accepted)
1273{
1274 struct pfvf_def_resp_tlv *resp;
1275 u16 size, total_len, i;
1276
1277 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1278 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1279 size = sizeof(struct pfvf_def_resp_tlv);
1280 total_len = size;
1281
1282 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1283
1284 /* Prepare response for all extended tlvs if they are found by PF */
1285 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
Yuval Mintz1a635e42016-08-15 10:42:43 +03001286 if (!(tlvs_mask & BIT(i)))
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001287 continue;
1288
1289 resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1290 qed_iov_vport_to_tlv(p_hwfn, i), size);
1291
Yuval Mintz1a635e42016-08-15 10:42:43 +03001292 if (tlvs_accepted & BIT(i))
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001293 resp->hdr.status = status;
1294 else
1295 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1296
1297 DP_VERBOSE(p_hwfn,
1298 QED_MSG_IOV,
1299 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1300 p_vf->relative_vf_id,
1301 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1302
1303 total_len += size;
1304 }
1305
1306 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1307 sizeof(struct channel_list_end_tlv));
1308
1309 return total_len;
1310}
1311
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001312static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1313 struct qed_ptt *p_ptt,
1314 struct qed_vf_info *vf_info,
1315 u16 type, u16 length, u8 status)
1316{
1317 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1318
1319 mbx->offset = (u8 *)mbx->reply_virt;
1320
1321 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1322 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1323 sizeof(struct channel_list_end_tlv));
1324
1325 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1326}
1327
Baoyou Xieba569472016-09-09 09:21:15 +08001328static struct
1329qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1330 u16 relative_vf_id,
1331 bool b_enabled_only)
Yuval Mintz0b55e272016-05-11 16:36:15 +03001332{
1333 struct qed_vf_info *vf = NULL;
1334
1335 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1336 if (!vf)
1337 return NULL;
1338
1339 return &vf->p_vf_info;
1340}
1341
Baoyou Xieba569472016-09-09 09:21:15 +08001342static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
Yuval Mintz0b55e272016-05-11 16:36:15 +03001343{
1344 struct qed_public_vf_info *vf_info;
1345
1346 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1347
1348 if (!vf_info)
1349 return;
1350
1351 /* Clear the VF mac */
Shyam Saini0ee28e32017-01-17 07:35:04 +05301352 eth_zero_addr(vf_info->mac);
Mintz, Yuvalf990c822017-01-01 13:57:08 +02001353
1354 vf_info->rx_accept_mode = 0;
1355 vf_info->tx_accept_mode = 0;
Yuval Mintz0b55e272016-05-11 16:36:15 +03001356}
1357
1358static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1359 struct qed_vf_info *p_vf)
1360{
1361 u32 i;
1362
1363 p_vf->vf_bulletin = 0;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001364 p_vf->vport_instance = 0;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001365 p_vf->configured_features = 0;
Yuval Mintz0b55e272016-05-11 16:36:15 +03001366
1367 /* If VF previously requested less resources, go back to default */
1368 p_vf->num_rxqs = p_vf->num_sbs;
1369 p_vf->num_txqs = p_vf->num_sbs;
1370
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001371 p_vf->num_active_rxqs = 0;
1372
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001373 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1374 struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i];
1375
1376 if (p_queue->p_rx_cid) {
1377 qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
1378 p_queue->p_rx_cid = NULL;
1379 }
1380
1381 if (p_queue->p_tx_cid) {
1382 qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
1383 p_queue->p_tx_cid = NULL;
1384 }
1385 }
Yuval Mintz0b55e272016-05-11 16:36:15 +03001386
Yuval Mintz08feecd2016-05-11 16:36:20 +03001387 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001388 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
Yuval Mintz0b55e272016-05-11 16:36:15 +03001389 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1390}
1391
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001392static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1393 struct qed_ptt *p_ptt,
1394 struct qed_vf_info *p_vf,
1395 struct vf_pf_resc_request *p_req,
1396 struct pf_vf_resc *p_resp)
1397{
1398 int i;
1399
1400 /* Queue related information */
1401 p_resp->num_rxqs = p_vf->num_rxqs;
1402 p_resp->num_txqs = p_vf->num_txqs;
1403 p_resp->num_sbs = p_vf->num_sbs;
1404
1405 for (i = 0; i < p_resp->num_sbs; i++) {
1406 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1407 p_resp->hw_sbs[i].sb_qid = 0;
1408 }
1409
1410 /* These fields are filled for backward compatibility.
1411 * Unused by modern vfs.
1412 */
1413 for (i = 0; i < p_resp->num_rxqs; i++) {
1414 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1415 (u16 *)&p_resp->hw_qid[i]);
1416 p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1417 }
1418
1419 /* Filter related information */
1420 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1421 p_req->num_mac_filters);
1422 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1423 p_req->num_vlan_filters);
1424
1425 /* This isn't really needed/enforced, but some legacy VFs might depend
1426 * on the correct filling of this field.
1427 */
1428 p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1429
1430 /* Validate sufficient resources for VF */
1431 if (p_resp->num_rxqs < p_req->num_rxqs ||
1432 p_resp->num_txqs < p_req->num_txqs ||
1433 p_resp->num_sbs < p_req->num_sbs ||
1434 p_resp->num_mac_filters < p_req->num_mac_filters ||
1435 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1436 p_resp->num_mc_filters < p_req->num_mc_filters) {
1437 DP_VERBOSE(p_hwfn,
1438 QED_MSG_IOV,
1439 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
1440 p_vf->abs_vf_id,
1441 p_req->num_rxqs,
1442 p_resp->num_rxqs,
1443 p_req->num_rxqs,
1444 p_resp->num_txqs,
1445 p_req->num_sbs,
1446 p_resp->num_sbs,
1447 p_req->num_mac_filters,
1448 p_resp->num_mac_filters,
1449 p_req->num_vlan_filters,
1450 p_resp->num_vlan_filters,
1451 p_req->num_mc_filters, p_resp->num_mc_filters);
Yuval Mintza044df82016-08-22 13:25:09 +03001452
1453 /* Some legacy OSes are incapable of correctly handling this
1454 * failure.
1455 */
1456 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1457 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1458 (p_vf->acquire.vfdev_info.os_type ==
1459 VFPF_ACQUIRE_OS_WINDOWS))
1460 return PFVF_STATUS_SUCCESS;
1461
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001462 return PFVF_STATUS_NO_RESOURCE;
1463 }
1464
1465 return PFVF_STATUS_SUCCESS;
1466}
1467
1468static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1469 struct pfvf_stats_info *p_stats)
1470{
1471 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1472 offsetof(struct mstorm_vf_zone,
1473 non_trigger.eth_queue_stat);
1474 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1475 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1476 offsetof(struct ustorm_vf_zone,
1477 non_trigger.eth_queue_stat);
1478 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1479 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1480 offsetof(struct pstorm_vf_zone,
1481 non_trigger.eth_queue_stat);
1482 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1483 p_stats->tstats.address = 0;
1484 p_stats->tstats.len = 0;
1485}
1486
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001487static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1488 struct qed_ptt *p_ptt,
1489 struct qed_vf_info *vf)
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001490{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001491 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1492 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1493 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1494 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001495 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001496 struct pf_vf_resc *resc = &resp->resc;
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001497 int rc;
1498
1499 memset(resp, 0, sizeof(*resp));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001500
Yuval Mintz05fafbf2016-08-19 09:33:31 +03001501 /* Write the PF version so that VF would know which version
1502 * is supported - might be later overriden. This guarantees that
1503 * VF could recognize legacy PF based on lack of versions in reply.
1504 */
1505 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1506 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1507
Yuval Mintza044df82016-08-22 13:25:09 +03001508 if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1509 DP_VERBOSE(p_hwfn,
1510 QED_MSG_IOV,
1511 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1512 vf->abs_vf_id, vf->state);
1513 goto out;
1514 }
1515
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001516 /* Validate FW compatibility */
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001517 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
Yuval Mintza044df82016-08-22 13:25:09 +03001518 if (req->vfdev_info.capabilities &
1519 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1520 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001521
Yuval Mintza044df82016-08-22 13:25:09 +03001522 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1523 "VF[%d] is pre-fastpath HSI\n",
1524 vf->abs_vf_id);
1525 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1526 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1527 } else {
1528 DP_INFO(p_hwfn,
1529 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1530 vf->abs_vf_id,
1531 req->vfdev_info.eth_fp_hsi_major,
1532 req->vfdev_info.eth_fp_hsi_minor,
1533 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1534
1535 goto out;
1536 }
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001537 }
1538
1539 /* On 100g PFs, prevent old VFs from loading */
1540 if ((p_hwfn->cdev->num_hwfns > 1) &&
1541 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1542 DP_INFO(p_hwfn,
1543 "VF[%d] is running an old driver that doesn't support 100g\n",
1544 vf->abs_vf_id);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001545 goto out;
1546 }
1547
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001548 /* Store the acquire message */
1549 memcpy(&vf->acquire, req, sizeof(vf->acquire));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001550
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001551 vf->opaque_fid = req->vfdev_info.opaque_fid;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001552
1553 vf->vf_bulletin = req->bulletin_addr;
1554 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1555 vf->bulletin.size : req->bulletin_size;
1556
1557 /* fill in pfdev info */
1558 pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1559 pfdev_info->db_size = 0;
1560 pfdev_info->indices_per_sb = PIS_PER_SB;
1561
1562 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1563 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1564 if (p_hwfn->cdev->num_hwfns > 1)
1565 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1566
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001567 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001568
1569 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1570
1571 pfdev_info->fw_major = FW_MAJOR_VERSION;
1572 pfdev_info->fw_minor = FW_MINOR_VERSION;
1573 pfdev_info->fw_rev = FW_REVISION_VERSION;
1574 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
Yuval Mintza044df82016-08-22 13:25:09 +03001575
1576 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1577 * this field.
1578 */
Yuval Mintz1a635e42016-08-15 10:42:43 +03001579 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001580 req->vfdev_info.eth_fp_hsi_minor);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001581 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1582 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1583
1584 pfdev_info->dev_type = p_hwfn->cdev->type;
1585 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1586
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001587 /* Fill resources available to VF; Make sure there are enough to
1588 * satisfy the VF's request.
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001589 */
Yuval Mintz1cf2b1a2016-06-05 13:11:12 +03001590 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1591 &req->resc_request, resc);
1592 if (vfpf_status != PFVF_STATUS_SUCCESS)
1593 goto out;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001594
Yuval Mintz1fe614d2016-06-05 13:11:11 +03001595 /* Start the VF in FW */
1596 rc = qed_sp_vf_start(p_hwfn, vf);
1597 if (rc) {
1598 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1599 vfpf_status = PFVF_STATUS_FAILURE;
1600 goto out;
1601 }
1602
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001603 /* Fill agreed size of bulletin board in response */
1604 resp->bulletin_size = vf->bulletin.size;
Yuval Mintz36558c32016-05-11 16:36:17 +03001605 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001606
1607 DP_VERBOSE(p_hwfn,
1608 QED_MSG_IOV,
1609 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1610 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1611 vf->abs_vf_id,
1612 resp->pfdev_info.chip_num,
1613 resp->pfdev_info.db_size,
1614 resp->pfdev_info.indices_per_sb,
1615 resp->pfdev_info.capabilities,
1616 resc->num_rxqs,
1617 resc->num_txqs,
1618 resc->num_sbs,
1619 resc->num_mac_filters,
1620 resc->num_vlan_filters);
1621 vf->state = VF_ACQUIRED;
1622
1623 /* Prepare Response */
1624out:
1625 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1626 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03001627}
1628
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001629static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1630 struct qed_vf_info *p_vf, bool val)
1631{
1632 struct qed_sp_vport_update_params params;
1633 int rc;
1634
1635 if (val == p_vf->spoof_chk) {
1636 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1637 "Spoofchk value[%d] is already configured\n", val);
1638 return 0;
1639 }
1640
1641 memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
1642 params.opaque_fid = p_vf->opaque_fid;
1643 params.vport_id = p_vf->vport_id;
1644 params.update_anti_spoofing_en_flg = 1;
1645 params.anti_spoofing_en = val;
1646
1647 rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
Yuval Mintzcb1fa082016-07-27 14:45:20 +03001648 if (!rc) {
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001649 p_vf->spoof_chk = val;
1650 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1651 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1652 "Spoofchk val[%d] configured\n", val);
1653 } else {
1654 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1655 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1656 val, p_vf->relative_vf_id);
1657 }
1658
1659 return rc;
1660}
1661
Yuval Mintz08feecd2016-05-11 16:36:20 +03001662static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1663 struct qed_vf_info *p_vf)
1664{
1665 struct qed_filter_ucast filter;
1666 int rc = 0;
1667 int i;
1668
1669 memset(&filter, 0, sizeof(filter));
1670 filter.is_rx_filter = 1;
1671 filter.is_tx_filter = 1;
1672 filter.vport_to_add_to = p_vf->vport_id;
1673 filter.opcode = QED_FILTER_ADD;
1674
1675 /* Reconfigure vlans */
1676 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1677 if (!p_vf->shadow_config.vlans[i].used)
1678 continue;
1679
1680 filter.type = QED_FILTER_VLAN;
1681 filter.vlan = p_vf->shadow_config.vlans[i].vid;
Yuval Mintz1a635e42016-08-15 10:42:43 +03001682 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
Yuval Mintz08feecd2016-05-11 16:36:20 +03001683 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1684 filter.vlan, p_vf->relative_vf_id);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001685 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1686 &filter, QED_SPQ_MODE_CB, NULL);
Yuval Mintz08feecd2016-05-11 16:36:20 +03001687 if (rc) {
1688 DP_NOTICE(p_hwfn,
1689 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1690 filter.vlan, p_vf->relative_vf_id);
1691 break;
1692 }
1693 }
1694
1695 return rc;
1696}
1697
1698static int
1699qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1700 struct qed_vf_info *p_vf, u64 events)
1701{
1702 int rc = 0;
1703
Yuval Mintz1a635e42016-08-15 10:42:43 +03001704 if ((events & BIT(VLAN_ADDR_FORCED)) &&
Yuval Mintz08feecd2016-05-11 16:36:20 +03001705 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1706 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1707
1708 return rc;
1709}
1710
1711static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1712 struct qed_vf_info *p_vf, u64 events)
1713{
1714 int rc = 0;
1715 struct qed_filter_ucast filter;
1716
1717 if (!p_vf->vport_instance)
1718 return -EINVAL;
1719
Yuval Mintz1a635e42016-08-15 10:42:43 +03001720 if (events & BIT(MAC_ADDR_FORCED)) {
Yuval Mintzeff16962016-05-11 16:36:21 +03001721 /* Since there's no way [currently] of removing the MAC,
1722 * we can always assume this means we need to force it.
1723 */
1724 memset(&filter, 0, sizeof(filter));
1725 filter.type = QED_FILTER_MAC;
1726 filter.opcode = QED_FILTER_REPLACE;
1727 filter.is_rx_filter = 1;
1728 filter.is_tx_filter = 1;
1729 filter.vport_to_add_to = p_vf->vport_id;
1730 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1731
1732 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1733 &filter, QED_SPQ_MODE_CB, NULL);
1734 if (rc) {
1735 DP_NOTICE(p_hwfn,
1736 "PF failed to configure MAC for VF\n");
1737 return rc;
1738 }
1739
1740 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1741 }
1742
Yuval Mintz1a635e42016-08-15 10:42:43 +03001743 if (events & BIT(VLAN_ADDR_FORCED)) {
Yuval Mintz08feecd2016-05-11 16:36:20 +03001744 struct qed_sp_vport_update_params vport_update;
1745 u8 removal;
1746 int i;
1747
1748 memset(&filter, 0, sizeof(filter));
1749 filter.type = QED_FILTER_VLAN;
1750 filter.is_rx_filter = 1;
1751 filter.is_tx_filter = 1;
1752 filter.vport_to_add_to = p_vf->vport_id;
1753 filter.vlan = p_vf->bulletin.p_virt->pvid;
1754 filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1755 QED_FILTER_FLUSH;
1756
1757 /* Send the ramrod */
1758 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1759 &filter, QED_SPQ_MODE_CB, NULL);
1760 if (rc) {
1761 DP_NOTICE(p_hwfn,
1762 "PF failed to configure VLAN for VF\n");
1763 return rc;
1764 }
1765
1766 /* Update the default-vlan & silent vlan stripping */
1767 memset(&vport_update, 0, sizeof(vport_update));
1768 vport_update.opaque_fid = p_vf->opaque_fid;
1769 vport_update.vport_id = p_vf->vport_id;
1770 vport_update.update_default_vlan_enable_flg = 1;
1771 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1772 vport_update.update_default_vlan_flg = 1;
1773 vport_update.default_vlan = filter.vlan;
1774
1775 vport_update.update_inner_vlan_removal_flg = 1;
1776 removal = filter.vlan ? 1
1777 : p_vf->shadow_config.inner_vlan_removal;
1778 vport_update.inner_vlan_removal_flg = removal;
1779 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1780 rc = qed_sp_vport_update(p_hwfn,
1781 &vport_update,
1782 QED_SPQ_MODE_EBLOCK, NULL);
1783 if (rc) {
1784 DP_NOTICE(p_hwfn,
1785 "PF failed to configure VF vport for vlan\n");
1786 return rc;
1787 }
1788
1789 /* Update all the Rx queues */
1790 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001791 struct qed_queue_cid *p_cid;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001792
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001793 p_cid = p_vf->vf_queues[i].p_rx_cid;
1794 if (!p_cid)
Yuval Mintz08feecd2016-05-11 16:36:20 +03001795 continue;
1796
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001797 rc = qed_sp_eth_rx_queues_update(p_hwfn,
1798 (void **)&p_cid,
Yuval Mintz08feecd2016-05-11 16:36:20 +03001799 1, 0, 1,
1800 QED_SPQ_MODE_EBLOCK,
1801 NULL);
1802 if (rc) {
1803 DP_NOTICE(p_hwfn,
1804 "Failed to send Rx update fo queue[0x%04x]\n",
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001805 p_cid->rel.queue_id);
Yuval Mintz08feecd2016-05-11 16:36:20 +03001806 return rc;
1807 }
1808 }
1809
1810 if (filter.vlan)
1811 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1812 else
Yuval Mintz1a635e42016-08-15 10:42:43 +03001813 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
Yuval Mintz08feecd2016-05-11 16:36:20 +03001814 }
1815
1816 /* If forced features are terminated, we need to configure the shadow
1817 * configuration back again.
1818 */
1819 if (events)
1820 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1821
1822 return rc;
1823}
1824
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001825static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1826 struct qed_ptt *p_ptt,
1827 struct qed_vf_info *vf)
1828{
1829 struct qed_sp_vport_start_params params = { 0 };
1830 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1831 struct vfpf_vport_start_tlv *start;
1832 u8 status = PFVF_STATUS_SUCCESS;
1833 struct qed_vf_info *vf_info;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001834 u64 *p_bitmap;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001835 int sb_id;
1836 int rc;
1837
1838 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1839 if (!vf_info) {
1840 DP_NOTICE(p_hwfn->cdev,
1841 "Failed to get VF info, invalid vfid [%d]\n",
1842 vf->relative_vf_id);
1843 return;
1844 }
1845
1846 vf->state = VF_ENABLED;
1847 start = &mbx->req_virt->start_vport;
1848
Mintz, Yuvalb801b152017-03-19 13:08:15 +02001849 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1850
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001851 /* Initialize Status block in CAU */
1852 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1853 if (!start->sb_addr[sb_id]) {
1854 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1855 "VF[%d] did not fill the address of SB %d\n",
1856 vf->relative_vf_id, sb_id);
1857 break;
1858 }
1859
1860 qed_int_cau_conf_sb(p_hwfn, p_ptt,
1861 start->sb_addr[sb_id],
Yuval Mintz1a635e42016-08-15 10:42:43 +03001862 vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001863 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001864
1865 vf->mtu = start->mtu;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001866 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1867
1868 /* Take into consideration configuration forced by hypervisor;
1869 * If none is configured, use the supplied VF values [for old
1870 * vfs that would still be fine, since they passed '0' as padding].
1871 */
1872 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
Yuval Mintz1a635e42016-08-15 10:42:43 +03001873 if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
Yuval Mintz08feecd2016-05-11 16:36:20 +03001874 u8 vf_req = start->only_untagged;
1875
1876 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1877 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1878 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001879
1880 params.tpa_mode = start->tpa_mode;
1881 params.remove_inner_vlan = start->inner_vlan_removal;
Yuval Mintz831bfb0e2016-05-11 16:36:25 +03001882 params.tx_switching = true;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001883
Yuval Mintz08feecd2016-05-11 16:36:20 +03001884 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001885 params.drop_ttl0 = false;
1886 params.concrete_fid = vf->concrete_fid;
1887 params.opaque_fid = vf->opaque_fid;
1888 params.vport_id = vf->vport_id;
1889 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1890 params.mtu = vf->mtu;
Yuval Mintz11a85d72016-08-22 13:25:10 +03001891 params.check_mac = true;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001892
1893 rc = qed_sp_eth_vport_start(p_hwfn, &params);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001894 if (rc) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001895 DP_ERR(p_hwfn,
1896 "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1897 status = PFVF_STATUS_FAILURE;
1898 } else {
1899 vf->vport_instance++;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001900
1901 /* Force configuration if needed on the newly opened vport */
1902 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001903
1904 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001905 }
1906 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1907 sizeof(struct pfvf_def_resp_tlv), status);
1908}
1909
1910static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1911 struct qed_ptt *p_ptt,
1912 struct qed_vf_info *vf)
1913{
1914 u8 status = PFVF_STATUS_SUCCESS;
1915 int rc;
1916
1917 vf->vport_instance--;
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001918 vf->spoof_chk = false;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001919
Mintz, Yuvalf109c242017-03-19 13:08:16 +02001920 if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
1921 (qed_iov_validate_active_txq(p_hwfn, vf))) {
1922 vf->b_malicious = true;
1923 DP_NOTICE(p_hwfn,
1924 "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
1925 vf->abs_vf_id);
1926 status = PFVF_STATUS_MALICIOUS;
1927 goto out;
1928 }
1929
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001930 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
Yuval Mintz1a635e42016-08-15 10:42:43 +03001931 if (rc) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001932 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1933 rc);
1934 status = PFVF_STATUS_FAILURE;
1935 }
1936
Yuval Mintz08feecd2016-05-11 16:36:20 +03001937 /* Forget the configuration on the vport */
1938 vf->configured_features = 0;
1939 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
1940
Mintz, Yuvalf109c242017-03-19 13:08:16 +02001941out:
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001942 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1943 sizeof(struct pfvf_def_resp_tlv), status);
1944}
1945
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001946static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1947 struct qed_ptt *p_ptt,
Yuval Mintza044df82016-08-22 13:25:09 +03001948 struct qed_vf_info *vf,
1949 u8 status, bool b_legacy)
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001950{
1951 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1952 struct pfvf_start_queue_resp_tlv *p_tlv;
1953 struct vfpf_start_rxq_tlv *req;
Yuval Mintza044df82016-08-22 13:25:09 +03001954 u16 length;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001955
1956 mbx->offset = (u8 *)mbx->reply_virt;
1957
Yuval Mintza044df82016-08-22 13:25:09 +03001958 /* Taking a bigger struct instead of adding a TLV to list was a
1959 * mistake, but one which we're now stuck with, as some older
1960 * clients assume the size of the previous response.
1961 */
1962 if (!b_legacy)
1963 length = sizeof(*p_tlv);
1964 else
1965 length = sizeof(struct pfvf_def_resp_tlv);
1966
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001967 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
Yuval Mintza044df82016-08-22 13:25:09 +03001968 length);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001969 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1970 sizeof(struct channel_list_end_tlv));
1971
1972 /* Update the TLV with the response */
Yuval Mintza044df82016-08-22 13:25:09 +03001973 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001974 req = &mbx->req_virt->start_rxq;
Yuval Mintz351a4ded2016-06-02 10:23:29 +03001975 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
1976 offsetof(struct mstorm_vf_zone,
1977 non_trigger.eth_rx_queue_producers) +
1978 sizeof(struct eth_rx_prod_data) * req->rx_qid;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001979 }
1980
Yuval Mintza044df82016-08-22 13:25:09 +03001981 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001982}
1983
1984static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1985 struct qed_ptt *p_ptt,
1986 struct qed_vf_info *vf)
1987{
1988 struct qed_queue_start_common_params params;
1989 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
Yuval Mintz41086462016-06-05 13:11:13 +03001990 u8 status = PFVF_STATUS_NO_RESOURCE;
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001991 struct qed_vf_q_info *p_queue;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001992 struct vfpf_start_rxq_tlv *req;
Yuval Mintza044df82016-08-22 13:25:09 +03001993 bool b_legacy_vf = false;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001994 int rc;
1995
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001996 req = &mbx->req_virt->start_rxq;
Yuval Mintz41086462016-06-05 13:11:13 +03001997
Mintz, Yuvalf109c242017-03-19 13:08:16 +02001998 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
1999 QED_IOV_VALIDATE_Q_DISABLE) ||
Yuval Mintz41086462016-06-05 13:11:13 +03002000 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2001 goto out;
2002
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002003 /* Acquire a new queue-cid */
2004 p_queue = &vf->vf_queues[req->rx_qid];
2005
2006 memset(&params, 0, sizeof(params));
2007 params.queue_id = p_queue->fw_rx_qid;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002008 params.vport_id = vf->vport_id;
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002009 params.stats_id = vf->abs_vf_id + 0x10;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002010 params.sb = req->hw_sb;
2011 params.sb_idx = req->sb_index;
2012
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002013 p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn,
2014 vf->opaque_fid,
2015 p_queue->fw_cid,
2016 req->rx_qid, &params);
2017 if (!p_queue->p_rx_cid)
2018 goto out;
2019
Yuval Mintza044df82016-08-22 13:25:09 +03002020 /* Legacy VFs have their Producers in a different location, which they
2021 * calculate on their own and clean the producer prior to this.
2022 */
2023 if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2024 ETH_HSI_VER_NO_PKT_LEN_TUNN) {
2025 b_legacy_vf = true;
2026 } else {
2027 REG_WR(p_hwfn,
2028 GTT_BAR0_MAP_REG_MSDM_RAM +
2029 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2030 0);
2031 }
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002032 p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
Yuval Mintza044df82016-08-22 13:25:09 +03002033
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002034 rc = qed_eth_rxq_start_ramrod(p_hwfn,
2035 p_queue->p_rx_cid,
2036 req->bd_max_bytes,
2037 req->rxq_addr,
2038 req->cqe_pbl_addr, req->cqe_pbl_size);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002039 if (rc) {
2040 status = PFVF_STATUS_FAILURE;
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002041 qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
2042 p_queue->p_rx_cid = NULL;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002043 } else {
Yuval Mintz41086462016-06-05 13:11:13 +03002044 status = PFVF_STATUS_SUCCESS;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002045 vf->num_active_rxqs++;
2046 }
2047
Yuval Mintz41086462016-06-05 13:11:13 +03002048out:
Yuval Mintza044df82016-08-22 13:25:09 +03002049 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002050}
2051
Chopra, Manisheaf3c0c2017-04-24 10:00:49 -07002052static void
2053qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2054 struct qed_tunnel_info *p_tun,
2055 u16 tunn_feature_mask)
2056{
2057 p_resp->tunn_feature_mask = tunn_feature_mask;
2058 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2059 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2060 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2061 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2062 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2063 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2064 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2065 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2066 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2067 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2068 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2069 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2070}
2071
2072static void
2073__qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2074 struct qed_tunn_update_type *p_tun,
2075 enum qed_tunn_mode mask, u8 tun_cls)
2076{
2077 if (p_req->tun_mode_update_mask & BIT(mask)) {
2078 p_tun->b_update_mode = true;
2079
2080 if (p_req->tunn_mode & BIT(mask))
2081 p_tun->b_mode_enabled = true;
2082 }
2083
2084 p_tun->tun_cls = tun_cls;
2085}
2086
2087static void
2088qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2089 struct qed_tunn_update_type *p_tun,
2090 struct qed_tunn_update_udp_port *p_port,
2091 enum qed_tunn_mode mask,
2092 u8 tun_cls, u8 update_port, u16 port)
2093{
2094 if (update_port) {
2095 p_port->b_update_port = true;
2096 p_port->port = port;
2097 }
2098
2099 __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2100}
2101
2102static bool
2103qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2104{
2105 bool b_update_requested = false;
2106
2107 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2108 p_req->update_geneve_port || p_req->update_vxlan_port)
2109 b_update_requested = true;
2110
2111 return b_update_requested;
2112}
2113
2114static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
2115{
2116 if (tun->b_update_mode && !tun->b_mode_enabled) {
2117 tun->b_update_mode = false;
2118 *rc = -EINVAL;
2119 }
2120}
2121
2122static int
2123qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
2124 u16 *tun_features, bool *update,
2125 struct qed_tunnel_info *tun_src)
2126{
2127 struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
2128 struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
2129 u16 bultn_vxlan_port, bultn_geneve_port;
2130 void *cookie = p_hwfn->cdev->ops_cookie;
2131 int i, rc = 0;
2132
2133 *tun_features = p_hwfn->cdev->tunn_feature_mask;
2134 bultn_vxlan_port = tun->vxlan_port.port;
2135 bultn_geneve_port = tun->geneve_port.port;
2136 qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
2137 qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
2138 qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
2139 qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
2140 qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
2141
2142 if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
2143 (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2144 tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2145 tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2146 tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2147 tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
2148 tun_src->b_update_rx_cls = false;
2149 tun_src->b_update_tx_cls = false;
2150 rc = -EINVAL;
2151 }
2152
2153 if (tun_src->vxlan_port.b_update_port) {
2154 if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
2155 tun_src->vxlan_port.b_update_port = false;
2156 } else {
2157 *update = true;
2158 bultn_vxlan_port = tun_src->vxlan_port.port;
2159 }
2160 }
2161
2162 if (tun_src->geneve_port.b_update_port) {
2163 if (tun_src->geneve_port.port == tun->geneve_port.port) {
2164 tun_src->geneve_port.b_update_port = false;
2165 } else {
2166 *update = true;
2167 bultn_geneve_port = tun_src->geneve_port.port;
2168 }
2169 }
2170
2171 qed_for_each_vf(p_hwfn, i) {
2172 qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
2173 bultn_geneve_port);
2174 }
2175
2176 qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2177 ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
2178
2179 return rc;
2180}
2181
2182static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
2183 struct qed_ptt *p_ptt,
2184 struct qed_vf_info *p_vf)
2185{
2186 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
2187 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2188 struct pfvf_update_tunn_param_tlv *p_resp;
2189 struct vfpf_update_tunn_param_tlv *p_req;
2190 u8 status = PFVF_STATUS_SUCCESS;
2191 bool b_update_required = false;
2192 struct qed_tunnel_info tunn;
2193 u16 tunn_feature_mask = 0;
2194 int i, rc = 0;
2195
2196 mbx->offset = (u8 *)mbx->reply_virt;
2197
2198 memset(&tunn, 0, sizeof(tunn));
2199 p_req = &mbx->req_virt->tunn_param_update;
2200
2201 if (!qed_iov_pf_validate_tunn_param(p_req)) {
2202 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2203 "No tunnel update requested by VF\n");
2204 status = PFVF_STATUS_FAILURE;
2205 goto send_resp;
2206 }
2207
2208 tunn.b_update_rx_cls = p_req->update_tun_cls;
2209 tunn.b_update_tx_cls = p_req->update_tun_cls;
2210
2211 qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2212 QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2213 p_req->update_vxlan_port,
2214 p_req->vxlan_port);
2215 qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2216 QED_MODE_L2GENEVE_TUNN,
2217 p_req->l2geneve_clss,
2218 p_req->update_geneve_port,
2219 p_req->geneve_port);
2220 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2221 QED_MODE_IPGENEVE_TUNN,
2222 p_req->ipgeneve_clss);
2223 __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2224 QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
2225 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2226 QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
2227
2228 /* If PF modifies VF's req then it should
2229 * still return an error in case of partial configuration
2230 * or modified configuration as opposed to requested one.
2231 */
2232 rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
2233 &b_update_required, &tunn);
2234
2235 if (rc)
2236 status = PFVF_STATUS_FAILURE;
2237
2238 /* If QED client is willing to update anything ? */
2239 if (b_update_required) {
2240 u16 geneve_port;
2241
Manish Chopra4f646752017-05-23 09:41:20 +03002242 rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
Chopra, Manisheaf3c0c2017-04-24 10:00:49 -07002243 QED_SPQ_MODE_EBLOCK, NULL);
2244 if (rc)
2245 status = PFVF_STATUS_FAILURE;
2246
2247 geneve_port = p_tun->geneve_port.port;
2248 qed_for_each_vf(p_hwfn, i) {
2249 qed_iov_bulletin_set_udp_ports(p_hwfn, i,
2250 p_tun->vxlan_port.port,
2251 geneve_port);
2252 }
2253 }
2254
2255send_resp:
2256 p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
2257 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2258
2259 qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2260 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2261 sizeof(struct channel_list_end_tlv));
2262
2263 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2264}
2265
Yuval Mintz5040acf2016-06-05 13:11:14 +03002266static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
2267 struct qed_ptt *p_ptt,
2268 struct qed_vf_info *p_vf, u8 status)
2269{
2270 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2271 struct pfvf_start_queue_resp_tlv *p_tlv;
Yuval Mintza044df82016-08-22 13:25:09 +03002272 bool b_legacy = false;
2273 u16 length;
Yuval Mintz5040acf2016-06-05 13:11:14 +03002274
2275 mbx->offset = (u8 *)mbx->reply_virt;
2276
Yuval Mintza044df82016-08-22 13:25:09 +03002277 /* Taking a bigger struct instead of adding a TLV to list was a
2278 * mistake, but one which we're now stuck with, as some older
2279 * clients assume the size of the previous response.
2280 */
2281 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2282 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2283 b_legacy = true;
2284
2285 if (!b_legacy)
2286 length = sizeof(*p_tlv);
2287 else
2288 length = sizeof(struct pfvf_def_resp_tlv);
2289
Yuval Mintz5040acf2016-06-05 13:11:14 +03002290 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
Yuval Mintza044df82016-08-22 13:25:09 +03002291 length);
Yuval Mintz5040acf2016-06-05 13:11:14 +03002292 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2293 sizeof(struct channel_list_end_tlv));
2294
2295 /* Update the TLV with the response */
Yuval Mintza044df82016-08-22 13:25:09 +03002296 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
Yuval Mintz5040acf2016-06-05 13:11:14 +03002297 u16 qid = mbx->req_virt->start_txq.tx_qid;
2298
Ram Amrani51ff1722016-10-01 21:59:57 +03002299 p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
2300 DQ_DEMS_LEGACY);
Yuval Mintz5040acf2016-06-05 13:11:14 +03002301 }
2302
Yuval Mintza044df82016-08-22 13:25:09 +03002303 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
Yuval Mintz5040acf2016-06-05 13:11:14 +03002304}
2305
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002306static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
2307 struct qed_ptt *p_ptt,
2308 struct qed_vf_info *vf)
2309{
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002310 struct qed_queue_start_common_params params;
2311 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
Yuval Mintz41086462016-06-05 13:11:13 +03002312 u8 status = PFVF_STATUS_NO_RESOURCE;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002313 struct vfpf_start_txq_tlv *req;
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002314 struct qed_vf_q_info *p_queue;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002315 int rc;
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002316 u16 pq;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002317
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002318 memset(&params, 0, sizeof(params));
2319 req = &mbx->req_virt->start_txq;
Yuval Mintz41086462016-06-05 13:11:13 +03002320
Mintz, Yuvalf109c242017-03-19 13:08:16 +02002321 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2322 QED_IOV_VALIDATE_Q_DISABLE) ||
Yuval Mintz41086462016-06-05 13:11:13 +03002323 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2324 goto out;
2325
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002326 /* Acquire a new queue-cid */
2327 p_queue = &vf->vf_queues[req->tx_qid];
2328
2329 params.queue_id = p_queue->fw_tx_qid;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002330 params.vport_id = vf->vport_id;
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002331 params.stats_id = vf->abs_vf_id + 0x10;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002332 params.sb = req->hw_sb;
2333 params.sb_idx = req->sb_index;
2334
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002335 p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn,
2336 vf->opaque_fid,
2337 p_queue->fw_cid,
2338 req->tx_qid, &params);
2339 if (!p_queue->p_tx_cid)
2340 goto out;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002341
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03002342 pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002343 rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
2344 req->pbl_addr, req->pbl_size, pq);
Yuval Mintz41086462016-06-05 13:11:13 +03002345 if (rc) {
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002346 status = PFVF_STATUS_FAILURE;
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002347 qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
2348 p_queue->p_tx_cid = NULL;
Yuval Mintz41086462016-06-05 13:11:13 +03002349 } else {
2350 status = PFVF_STATUS_SUCCESS;
Yuval Mintz41086462016-06-05 13:11:13 +03002351 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002352
Yuval Mintz41086462016-06-05 13:11:13 +03002353out:
Yuval Mintz5040acf2016-06-05 13:11:14 +03002354 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002355}
2356
2357static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
2358 struct qed_vf_info *vf,
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002359 u16 rxq_id, bool cqe_completion)
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002360{
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002361 struct qed_vf_q_info *p_queue;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002362 int rc = 0;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002363
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002364 if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
2365 QED_IOV_VALIDATE_Q_ENABLE)) {
2366 DP_VERBOSE(p_hwfn,
2367 QED_MSG_IOV,
2368 "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
2369 vf->relative_vf_id, rxq_id);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002370 return -EINVAL;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002371 }
2372
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002373 p_queue = &vf->vf_queues[rxq_id];
2374
2375 rc = qed_eth_rx_queue_stop(p_hwfn,
2376 p_queue->p_rx_cid,
2377 false, cqe_completion);
2378 if (rc)
2379 return rc;
2380
2381 p_queue->p_rx_cid = NULL;
2382 vf->num_active_rxqs--;
2383
2384 return 0;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002385}
2386
2387static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002388 struct qed_vf_info *vf, u16 txq_id)
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002389{
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002390 struct qed_vf_q_info *p_queue;
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002391 int rc = 0;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002392
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002393 if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
2394 QED_IOV_VALIDATE_Q_ENABLE))
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002395 return -EINVAL;
2396
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002397 p_queue = &vf->vf_queues[txq_id];
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002398
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002399 rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
2400 if (rc)
2401 return rc;
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002402
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002403 p_queue->p_tx_cid = NULL;
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002404
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002405 return 0;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002406}
2407
2408static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2409 struct qed_ptt *p_ptt,
2410 struct qed_vf_info *vf)
2411{
2412 u16 length = sizeof(struct pfvf_def_resp_tlv);
2413 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002414 u8 status = PFVF_STATUS_FAILURE;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002415 struct vfpf_stop_rxqs_tlv *req;
2416 int rc;
2417
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002418 /* There has never been an official driver that used this interface
2419 * for stopping multiple queues, and it is now considered deprecated.
2420 * Validate this isn't used here.
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002421 */
2422 req = &mbx->req_virt->stop_rxqs;
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002423 if (req->num_rxqs != 1) {
2424 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2425 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2426 vf->relative_vf_id);
2427 status = PFVF_STATUS_NOT_SUPPORTED;
2428 goto out;
2429 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002430
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002431 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2432 req->cqe_completion);
2433 if (!rc)
2434 status = PFVF_STATUS_SUCCESS;
2435out:
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002436 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2437 length, status);
2438}
2439
2440static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2441 struct qed_ptt *p_ptt,
2442 struct qed_vf_info *vf)
2443{
2444 u16 length = sizeof(struct pfvf_def_resp_tlv);
2445 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002446 u8 status = PFVF_STATUS_FAILURE;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002447 struct vfpf_stop_txqs_tlv *req;
2448 int rc;
2449
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002450 /* There has never been an official driver that used this interface
2451 * for stopping multiple queues, and it is now considered deprecated.
2452 * Validate this isn't used here.
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002453 */
2454 req = &mbx->req_virt->stop_txqs;
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002455 if (req->num_txqs != 1) {
2456 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2457 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2458 vf->relative_vf_id);
2459 status = PFVF_STATUS_NOT_SUPPORTED;
2460 goto out;
2461 }
2462 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid);
2463 if (!rc)
2464 status = PFVF_STATUS_SUCCESS;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002465
Mintz, Yuval4c4fa792017-03-19 13:08:17 +02002466out:
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002467 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2468 length, status);
2469}
2470
Yuval Mintz17b235c2016-05-11 16:36:18 +03002471static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2472 struct qed_ptt *p_ptt,
2473 struct qed_vf_info *vf)
2474{
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002475 struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
Yuval Mintz17b235c2016-05-11 16:36:18 +03002476 u16 length = sizeof(struct pfvf_def_resp_tlv);
2477 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2478 struct vfpf_update_rxq_tlv *req;
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002479 u8 status = PFVF_STATUS_FAILURE;
Yuval Mintz17b235c2016-05-11 16:36:18 +03002480 u8 complete_event_flg;
2481 u8 complete_cqe_flg;
2482 u16 qid;
2483 int rc;
2484 u8 i;
2485
2486 req = &mbx->req_virt->update_rxq;
2487 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2488 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2489
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002490 /* Validate inputs */
Mintz, Yuvalf109c242017-03-19 13:08:16 +02002491 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
2492 if (!qed_iov_validate_rxq(p_hwfn, vf, i,
2493 QED_IOV_VALIDATE_Q_ENABLE)) {
2494 DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2495 vf->relative_vf_id, req->rx_qid, req->num_rxqs);
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002496 goto out;
2497 }
2498
Mintz, Yuvalf109c242017-03-19 13:08:16 +02002499 /* Prepare the handlers */
2500 for (i = 0; i < req->num_rxqs; i++) {
2501 qid = req->rx_qid + i;
Mintz, Yuval3da7a372016-11-29 16:47:06 +02002502 handlers[i] = vf->vf_queues[qid].p_rx_cid;
2503 }
2504
2505 rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2506 req->num_rxqs,
2507 complete_cqe_flg,
2508 complete_event_flg,
2509 QED_SPQ_MODE_EBLOCK, NULL);
2510 if (rc)
2511 goto out;
2512
2513 status = PFVF_STATUS_SUCCESS;
2514out:
Yuval Mintz17b235c2016-05-11 16:36:18 +03002515 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2516 length, status);
2517}
2518
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002519void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2520 void *p_tlvs_list, u16 req_type)
2521{
2522 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2523 int len = 0;
2524
2525 do {
2526 if (!p_tlv->length) {
2527 DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2528 return NULL;
2529 }
2530
2531 if (p_tlv->type == req_type) {
2532 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2533 "Extended tlv type %d, length %d found\n",
2534 p_tlv->type, p_tlv->length);
2535 return p_tlv;
2536 }
2537
2538 len += p_tlv->length;
2539 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2540
2541 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2542 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2543 return NULL;
2544 }
2545 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2546
2547 return NULL;
2548}
2549
2550static void
2551qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2552 struct qed_sp_vport_update_params *p_data,
2553 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2554{
2555 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2556 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2557
2558 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2559 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2560 if (!p_act_tlv)
2561 return;
2562
2563 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2564 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2565 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2566 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2567 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2568}
2569
2570static void
Yuval Mintz17b235c2016-05-11 16:36:18 +03002571qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2572 struct qed_sp_vport_update_params *p_data,
2573 struct qed_vf_info *p_vf,
2574 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2575{
2576 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2577 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2578
2579 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2580 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2581 if (!p_vlan_tlv)
2582 return;
2583
Yuval Mintz08feecd2016-05-11 16:36:20 +03002584 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2585
2586 /* Ignore the VF request if we're forcing a vlan */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002587 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
Yuval Mintz08feecd2016-05-11 16:36:20 +03002588 p_data->update_inner_vlan_removal_flg = 1;
2589 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2590 }
Yuval Mintz17b235c2016-05-11 16:36:18 +03002591
2592 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2593}
2594
2595static void
2596qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2597 struct qed_sp_vport_update_params *p_data,
2598 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2599{
2600 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2601 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2602
2603 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2604 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2605 tlv);
2606 if (!p_tx_switch_tlv)
2607 return;
2608
2609 p_data->update_tx_switching_flg = 1;
2610 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2611 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2612}
2613
2614static void
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002615qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2616 struct qed_sp_vport_update_params *p_data,
2617 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2618{
2619 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2620 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2621
2622 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2623 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2624 if (!p_mcast_tlv)
2625 return;
2626
2627 p_data->update_approx_mcast_flg = 1;
2628 memcpy(p_data->bins, p_mcast_tlv->bins,
2629 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2630 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2631}
2632
2633static void
2634qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2635 struct qed_sp_vport_update_params *p_data,
2636 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2637{
2638 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2639 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2640 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2641
2642 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2643 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2644 if (!p_accept_tlv)
2645 return;
2646
2647 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2648 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2649 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2650 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2651 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2652}
2653
2654static void
Yuval Mintz17b235c2016-05-11 16:36:18 +03002655qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2656 struct qed_sp_vport_update_params *p_data,
2657 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2658{
2659 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2660 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2661
2662 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2663 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2664 tlv);
2665 if (!p_accept_any_vlan)
2666 return;
2667
2668 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2669 p_data->update_accept_any_vlan_flg =
2670 p_accept_any_vlan->update_accept_any_vlan_flg;
2671 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2672}
2673
2674static void
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002675qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2676 struct qed_vf_info *vf,
2677 struct qed_sp_vport_update_params *p_data,
2678 struct qed_rss_params *p_rss,
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002679 struct qed_iov_vf_mbx *p_mbx,
2680 u16 *tlvs_mask, u16 *tlvs_accepted)
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002681{
2682 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2683 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002684 bool b_reject = false;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002685 u16 table_size;
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002686 u16 i, q_idx;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002687
2688 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2689 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2690 if (!p_rss_tlv) {
2691 p_data->rss_params = NULL;
2692 return;
2693 }
2694
2695 memset(p_rss, 0, sizeof(struct qed_rss_params));
2696
2697 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2698 VFPF_UPDATE_RSS_CONFIG_FLAG);
2699 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2700 VFPF_UPDATE_RSS_CAPS_FLAG);
2701 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2702 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2703 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2704 VFPF_UPDATE_RSS_KEY_FLAG);
2705
2706 p_rss->rss_enable = p_rss_tlv->rss_enable;
2707 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2708 p_rss->rss_caps = p_rss_tlv->rss_caps;
2709 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002710 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2711
2712 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2713 (1 << p_rss_tlv->rss_table_size_log));
2714
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002715 for (i = 0; i < table_size; i++) {
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002716 q_idx = p_rss_tlv->rss_ind_table[i];
Mintz, Yuvalf109c242017-03-19 13:08:16 +02002717 if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
2718 QED_IOV_VALIDATE_Q_ENABLE)) {
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002719 DP_VERBOSE(p_hwfn,
2720 QED_MSG_IOV,
2721 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2722 vf->relative_vf_id, q_idx);
2723 b_reject = true;
2724 goto out;
2725 }
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002726
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002727 p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002728 }
2729
2730 p_data->rss_params = p_rss;
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002731out:
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002732 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002733 if (!b_reject)
2734 *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002735}
2736
Yuval Mintz17b235c2016-05-11 16:36:18 +03002737static void
2738qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2739 struct qed_vf_info *vf,
2740 struct qed_sp_vport_update_params *p_data,
2741 struct qed_sge_tpa_params *p_sge_tpa,
2742 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2743{
2744 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2745 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2746
2747 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2748 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2749
2750 if (!p_sge_tpa_tlv) {
2751 p_data->sge_tpa_params = NULL;
2752 return;
2753 }
2754
2755 memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2756
2757 p_sge_tpa->update_tpa_en_flg =
2758 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2759 p_sge_tpa->update_tpa_param_flg =
2760 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2761 VFPF_UPDATE_TPA_PARAM_FLAG);
2762
2763 p_sge_tpa->tpa_ipv4_en_flg =
2764 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2765 p_sge_tpa->tpa_ipv6_en_flg =
2766 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2767 p_sge_tpa->tpa_pkt_split_flg =
2768 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2769 p_sge_tpa->tpa_hdr_data_split_flg =
2770 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2771 p_sge_tpa->tpa_gro_consistent_flg =
2772 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2773
2774 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2775 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2776 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2777 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2778 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2779
2780 p_data->sge_tpa_params = p_sge_tpa;
2781
2782 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2783}
2784
Mintz, Yuvalf990c822017-01-01 13:57:08 +02002785static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
2786 u8 vfid,
2787 struct qed_sp_vport_update_params *params,
2788 u16 *tlvs)
2789{
2790 u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
2791 struct qed_filter_accept_flags *flags = &params->accept_flags;
2792 struct qed_public_vf_info *vf_info;
2793
2794 /* Untrusted VFs can't even be trusted to know that fact.
2795 * Simply indicate everything is configured fine, and trace
2796 * configuration 'behind their back'.
2797 */
2798 if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
2799 return 0;
2800
2801 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
2802
2803 if (flags->update_rx_mode_config) {
2804 vf_info->rx_accept_mode = flags->rx_accept_filter;
2805 if (!vf_info->is_trusted_configured)
2806 flags->rx_accept_filter &= ~mask;
2807 }
2808
2809 if (flags->update_tx_mode_config) {
2810 vf_info->tx_accept_mode = flags->tx_accept_filter;
2811 if (!vf_info->is_trusted_configured)
2812 flags->tx_accept_filter &= ~mask;
2813 }
2814
2815 return 0;
2816}
2817
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002818static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
2819 struct qed_ptt *p_ptt,
2820 struct qed_vf_info *vf)
2821{
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002822 struct qed_rss_params *p_rss_params = NULL;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002823 struct qed_sp_vport_update_params params;
2824 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
Yuval Mintz17b235c2016-05-11 16:36:18 +03002825 struct qed_sge_tpa_params sge_tpa_params;
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002826 u16 tlvs_mask = 0, tlvs_accepted = 0;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002827 u8 status = PFVF_STATUS_SUCCESS;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002828 u16 length;
2829 int rc;
2830
Yuval Mintz41086462016-06-05 13:11:13 +03002831 /* Valiate PF can send such a request */
2832 if (!vf->vport_instance) {
2833 DP_VERBOSE(p_hwfn,
2834 QED_MSG_IOV,
2835 "No VPORT instance available for VF[%d], failing vport update\n",
2836 vf->abs_vf_id);
2837 status = PFVF_STATUS_FAILURE;
2838 goto out;
2839 }
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002840 p_rss_params = vzalloc(sizeof(*p_rss_params));
2841 if (p_rss_params == NULL) {
2842 status = PFVF_STATUS_FAILURE;
2843 goto out;
2844 }
Yuval Mintz41086462016-06-05 13:11:13 +03002845
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002846 memset(&params, 0, sizeof(params));
2847 params.opaque_fid = vf->opaque_fid;
2848 params.vport_id = vf->vport_id;
2849 params.rss_params = NULL;
2850
2851 /* Search for extended tlvs list and update values
2852 * from VF in struct qed_sp_vport_update_params.
2853 */
2854 qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
Yuval Mintz17b235c2016-05-11 16:36:18 +03002855 qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
2856 qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002857 qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
2858 qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
Yuval Mintz17b235c2016-05-11 16:36:18 +03002859 qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
2860 qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
2861 &sge_tpa_params, mbx, &tlvs_mask);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002862
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002863 tlvs_accepted = tlvs_mask;
2864
2865 /* Some of the extended TLVs need to be validated first; In that case,
2866 * they can update the mask without updating the accepted [so that
2867 * PF could communicate to VF it has rejected request].
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002868 */
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002869 qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
2870 mbx, &tlvs_mask, &tlvs_accepted);
2871
Mintz, Yuvalf990c822017-01-01 13:57:08 +02002872 if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
2873 &params, &tlvs_accepted)) {
2874 tlvs_accepted = 0;
2875 status = PFVF_STATUS_NOT_SUPPORTED;
2876 goto out;
2877 }
2878
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002879 if (!tlvs_accepted) {
2880 if (tlvs_mask)
2881 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2882 "Upper-layer prevents VF vport configuration\n");
2883 else
2884 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2885 "No feature tlvs found for vport update\n");
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002886 status = PFVF_STATUS_NOT_SUPPORTED;
2887 goto out;
2888 }
2889
2890 rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
2891
2892 if (rc)
2893 status = PFVF_STATUS_FAILURE;
2894
2895out:
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002896 vfree(p_rss_params);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002897 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02002898 tlvs_mask, tlvs_accepted);
Yuval Mintzdacd88d2016-05-11 16:36:16 +03002899 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2900}
2901
Yuval Mintz8246d0b2016-06-05 13:11:15 +03002902static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
2903 struct qed_vf_info *p_vf,
2904 struct qed_filter_ucast *p_params)
Yuval Mintz08feecd2016-05-11 16:36:20 +03002905{
2906 int i;
2907
Yuval Mintz08feecd2016-05-11 16:36:20 +03002908 /* First remove entries and then add new ones */
2909 if (p_params->opcode == QED_FILTER_REMOVE) {
2910 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2911 if (p_vf->shadow_config.vlans[i].used &&
2912 p_vf->shadow_config.vlans[i].vid ==
2913 p_params->vlan) {
2914 p_vf->shadow_config.vlans[i].used = false;
2915 break;
2916 }
2917 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2918 DP_VERBOSE(p_hwfn,
2919 QED_MSG_IOV,
2920 "VF [%d] - Tries to remove a non-existing vlan\n",
2921 p_vf->relative_vf_id);
2922 return -EINVAL;
2923 }
2924 } else if (p_params->opcode == QED_FILTER_REPLACE ||
2925 p_params->opcode == QED_FILTER_FLUSH) {
2926 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2927 p_vf->shadow_config.vlans[i].used = false;
2928 }
2929
2930 /* In forced mode, we're willing to remove entries - but we don't add
2931 * new ones.
2932 */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002933 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
Yuval Mintz08feecd2016-05-11 16:36:20 +03002934 return 0;
2935
2936 if (p_params->opcode == QED_FILTER_ADD ||
2937 p_params->opcode == QED_FILTER_REPLACE) {
2938 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
2939 if (p_vf->shadow_config.vlans[i].used)
2940 continue;
2941
2942 p_vf->shadow_config.vlans[i].used = true;
2943 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
2944 break;
2945 }
2946
2947 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2948 DP_VERBOSE(p_hwfn,
2949 QED_MSG_IOV,
2950 "VF [%d] - Tries to configure more than %d vlan filters\n",
2951 p_vf->relative_vf_id,
2952 QED_ETH_VF_NUM_VLAN_FILTERS + 1);
2953 return -EINVAL;
2954 }
2955 }
2956
2957 return 0;
2958}
2959
Yuval Mintz8246d0b2016-06-05 13:11:15 +03002960static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
2961 struct qed_vf_info *p_vf,
2962 struct qed_filter_ucast *p_params)
2963{
2964 int i;
2965
2966 /* If we're in forced-mode, we don't allow any change */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002967 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
Yuval Mintz8246d0b2016-06-05 13:11:15 +03002968 return 0;
2969
2970 /* First remove entries and then add new ones */
2971 if (p_params->opcode == QED_FILTER_REMOVE) {
2972 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2973 if (ether_addr_equal(p_vf->shadow_config.macs[i],
2974 p_params->mac)) {
Shyam Saini0ee28e32017-01-17 07:35:04 +05302975 eth_zero_addr(p_vf->shadow_config.macs[i]);
Yuval Mintz8246d0b2016-06-05 13:11:15 +03002976 break;
2977 }
2978 }
2979
2980 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2981 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2982 "MAC isn't configured\n");
2983 return -EINVAL;
2984 }
2985 } else if (p_params->opcode == QED_FILTER_REPLACE ||
2986 p_params->opcode == QED_FILTER_FLUSH) {
2987 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
Shyam Saini0ee28e32017-01-17 07:35:04 +05302988 eth_zero_addr(p_vf->shadow_config.macs[i]);
Yuval Mintz8246d0b2016-06-05 13:11:15 +03002989 }
2990
2991 /* List the new MAC address */
2992 if (p_params->opcode != QED_FILTER_ADD &&
2993 p_params->opcode != QED_FILTER_REPLACE)
2994 return 0;
2995
2996 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2997 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
2998 ether_addr_copy(p_vf->shadow_config.macs[i],
2999 p_params->mac);
3000 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3001 "Added MAC at %d entry in shadow\n", i);
3002 break;
3003 }
3004 }
3005
3006 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3007 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
3008 return -EINVAL;
3009 }
3010
3011 return 0;
3012}
3013
3014static int
3015qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
3016 struct qed_vf_info *p_vf,
3017 struct qed_filter_ucast *p_params)
3018{
3019 int rc = 0;
3020
3021 if (p_params->type == QED_FILTER_MAC) {
3022 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3023 if (rc)
3024 return rc;
3025 }
3026
3027 if (p_params->type == QED_FILTER_VLAN)
3028 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3029
3030 return rc;
3031}
3032
Baoyou Xieba569472016-09-09 09:21:15 +08003033static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
3034 int vfid, struct qed_filter_ucast *params)
Yuval Mintzdacd88d2016-05-11 16:36:16 +03003035{
3036 struct qed_public_vf_info *vf;
3037
3038 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
3039 if (!vf)
3040 return -EINVAL;
3041
3042 /* No real decision to make; Store the configured MAC */
3043 if (params->type == QED_FILTER_MAC ||
3044 params->type == QED_FILTER_MAC_VLAN)
3045 ether_addr_copy(vf->mac, params->mac);
3046
3047 return 0;
3048}
3049
3050static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
3051 struct qed_ptt *p_ptt,
3052 struct qed_vf_info *vf)
3053{
Yuval Mintz08feecd2016-05-11 16:36:20 +03003054 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03003055 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3056 struct vfpf_ucast_filter_tlv *req;
3057 u8 status = PFVF_STATUS_SUCCESS;
3058 struct qed_filter_ucast params;
3059 int rc;
3060
3061 /* Prepare the unicast filter params */
3062 memset(&params, 0, sizeof(struct qed_filter_ucast));
3063 req = &mbx->req_virt->ucast_filter;
3064 params.opcode = (enum qed_filter_opcode)req->opcode;
3065 params.type = (enum qed_filter_ucast_type)req->type;
3066
3067 params.is_rx_filter = 1;
3068 params.is_tx_filter = 1;
3069 params.vport_to_remove_from = vf->vport_id;
3070 params.vport_to_add_to = vf->vport_id;
3071 memcpy(params.mac, req->mac, ETH_ALEN);
3072 params.vlan = req->vlan;
3073
3074 DP_VERBOSE(p_hwfn,
3075 QED_MSG_IOV,
3076 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3077 vf->abs_vf_id, params.opcode, params.type,
3078 params.is_rx_filter ? "RX" : "",
3079 params.is_tx_filter ? "TX" : "",
3080 params.vport_to_add_to,
3081 params.mac[0], params.mac[1],
3082 params.mac[2], params.mac[3],
3083 params.mac[4], params.mac[5], params.vlan);
3084
3085 if (!vf->vport_instance) {
3086 DP_VERBOSE(p_hwfn,
3087 QED_MSG_IOV,
3088 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3089 vf->abs_vf_id);
3090 status = PFVF_STATUS_FAILURE;
3091 goto out;
3092 }
3093
Yuval Mintz08feecd2016-05-11 16:36:20 +03003094 /* Update shadow copy of the VF configuration */
3095 if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
3096 status = PFVF_STATUS_FAILURE;
3097 goto out;
3098 }
3099
3100 /* Determine if the unicast filtering is acceptible by PF */
Yuval Mintz1a635e42016-08-15 10:42:43 +03003101 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
Yuval Mintz08feecd2016-05-11 16:36:20 +03003102 (params.type == QED_FILTER_VLAN ||
3103 params.type == QED_FILTER_MAC_VLAN)) {
3104 /* Once VLAN is forced or PVID is set, do not allow
3105 * to add/replace any further VLANs.
3106 */
3107 if (params.opcode == QED_FILTER_ADD ||
3108 params.opcode == QED_FILTER_REPLACE)
3109 status = PFVF_STATUS_FORCED;
3110 goto out;
3111 }
3112
Yuval Mintz1a635e42016-08-15 10:42:43 +03003113 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
Yuval Mintzeff16962016-05-11 16:36:21 +03003114 (params.type == QED_FILTER_MAC ||
3115 params.type == QED_FILTER_MAC_VLAN)) {
3116 if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
3117 (params.opcode != QED_FILTER_ADD &&
3118 params.opcode != QED_FILTER_REPLACE))
3119 status = PFVF_STATUS_FORCED;
3120 goto out;
3121 }
3122
Yuval Mintzdacd88d2016-05-11 16:36:16 +03003123 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
3124 if (rc) {
3125 status = PFVF_STATUS_FAILURE;
3126 goto out;
3127 }
3128
3129 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
3130 QED_SPQ_MODE_CB, NULL);
3131 if (rc)
3132 status = PFVF_STATUS_FAILURE;
3133
3134out:
3135 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3136 sizeof(struct pfvf_def_resp_tlv), status);
3137}
3138
Yuval Mintz0b55e272016-05-11 16:36:15 +03003139static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
3140 struct qed_ptt *p_ptt,
3141 struct qed_vf_info *vf)
3142{
3143 int i;
3144
3145 /* Reset the SBs */
3146 for (i = 0; i < vf->num_sbs; i++)
3147 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3148 vf->igu_sbs[i],
3149 vf->opaque_fid, false);
3150
3151 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3152 sizeof(struct pfvf_def_resp_tlv),
3153 PFVF_STATUS_SUCCESS);
3154}
3155
3156static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
3157 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
3158{
3159 u16 length = sizeof(struct pfvf_def_resp_tlv);
3160 u8 status = PFVF_STATUS_SUCCESS;
3161
3162 /* Disable Interrupts for VF */
3163 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3164
3165 /* Reset Permission table */
3166 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3167
3168 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3169 length, status);
3170}
3171
3172static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
3173 struct qed_ptt *p_ptt,
3174 struct qed_vf_info *p_vf)
3175{
3176 u16 length = sizeof(struct pfvf_def_resp_tlv);
Yuval Mintz1fe614d2016-06-05 13:11:11 +03003177 u8 status = PFVF_STATUS_SUCCESS;
3178 int rc = 0;
Yuval Mintz0b55e272016-05-11 16:36:15 +03003179
3180 qed_iov_vf_cleanup(p_hwfn, p_vf);
3181
Yuval Mintz1fe614d2016-06-05 13:11:11 +03003182 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3183 /* Stopping the VF */
3184 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3185 p_vf->opaque_fid);
3186
3187 if (rc) {
3188 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
3189 rc);
3190 status = PFVF_STATUS_FAILURE;
3191 }
3192
3193 p_vf->state = VF_STOPPED;
3194 }
3195
Yuval Mintz0b55e272016-05-11 16:36:15 +03003196 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
Yuval Mintz1fe614d2016-06-05 13:11:11 +03003197 length, status);
Yuval Mintz0b55e272016-05-11 16:36:15 +03003198}
3199
3200static int
3201qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
3202 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3203{
3204 int cnt;
3205 u32 val;
3206
3207 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
3208
3209 for (cnt = 0; cnt < 50; cnt++) {
3210 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3211 if (!val)
3212 break;
3213 msleep(20);
3214 }
3215 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
3216
3217 if (cnt == 50) {
3218 DP_ERR(p_hwfn,
3219 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3220 p_vf->abs_vf_id, val);
3221 return -EBUSY;
3222 }
3223
3224 return 0;
3225}
3226
3227static int
3228qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
3229 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3230{
3231 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
3232 int i, cnt;
3233
3234 /* Read initial consumers & producers */
3235 for (i = 0; i < MAX_NUM_VOQS; i++) {
3236 u32 prod;
3237
3238 cons[i] = qed_rd(p_hwfn, p_ptt,
3239 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3240 i * 0x40);
3241 prod = qed_rd(p_hwfn, p_ptt,
3242 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3243 i * 0x40);
3244 distance[i] = prod - cons[i];
3245 }
3246
3247 /* Wait for consumers to pass the producers */
3248 i = 0;
3249 for (cnt = 0; cnt < 50; cnt++) {
3250 for (; i < MAX_NUM_VOQS; i++) {
3251 u32 tmp;
3252
3253 tmp = qed_rd(p_hwfn, p_ptt,
3254 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3255 i * 0x40);
3256 if (distance[i] > tmp - cons[i])
3257 break;
3258 }
3259
3260 if (i == MAX_NUM_VOQS)
3261 break;
3262
3263 msleep(20);
3264 }
3265
3266 if (cnt == 50) {
3267 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3268 p_vf->abs_vf_id, i);
3269 return -EBUSY;
3270 }
3271
3272 return 0;
3273}
3274
3275static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
3276 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3277{
3278 int rc;
3279
3280 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3281 if (rc)
3282 return rc;
3283
3284 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3285 if (rc)
3286 return rc;
3287
3288 return 0;
3289}
3290
3291static int
3292qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
3293 struct qed_ptt *p_ptt,
3294 u16 rel_vf_id, u32 *ack_vfs)
3295{
3296 struct qed_vf_info *p_vf;
3297 int rc = 0;
3298
3299 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3300 if (!p_vf)
3301 return 0;
3302
3303 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3304 (1ULL << (rel_vf_id % 64))) {
3305 u16 vfid = p_vf->abs_vf_id;
3306
3307 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3308 "VF[%d] - Handling FLR\n", vfid);
3309
3310 qed_iov_vf_cleanup(p_hwfn, p_vf);
3311
3312 /* If VF isn't active, no need for anything but SW */
3313 if (!p_vf->b_init)
3314 goto cleanup;
3315
3316 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3317 if (rc)
3318 goto cleanup;
3319
3320 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
3321 if (rc) {
3322 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3323 return rc;
3324 }
3325
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003326 /* Workaround to make VF-PF channel ready, as FW
3327 * doesn't do that as a part of FLR.
3328 */
3329 REG_WR(p_hwfn,
3330 GTT_BAR0_MAP_REG_USDM_RAM +
3331 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3332
Yuval Mintz0b55e272016-05-11 16:36:15 +03003333 /* VF_STOPPED has to be set only after final cleanup
3334 * but prior to re-enabling the VF.
3335 */
3336 p_vf->state = VF_STOPPED;
3337
3338 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3339 if (rc) {
3340 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3341 vfid);
3342 return rc;
3343 }
3344cleanup:
3345 /* Mark VF for ack and clean pending state */
3346 if (p_vf->state == VF_RESET)
3347 p_vf->state = VF_STOPPED;
Yuval Mintz1a635e42016-08-15 10:42:43 +03003348 ack_vfs[vfid / 32] |= BIT((vfid % 32));
Yuval Mintz0b55e272016-05-11 16:36:15 +03003349 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3350 ~(1ULL << (rel_vf_id % 64));
Mintz, Yuvalfd3c6152017-02-27 11:06:32 +02003351 p_vf->vf_mbx.b_pending_msg = false;
Yuval Mintz0b55e272016-05-11 16:36:15 +03003352 }
3353
3354 return rc;
3355}
3356
Baoyou Xieba569472016-09-09 09:21:15 +08003357static int
3358qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintz0b55e272016-05-11 16:36:15 +03003359{
3360 u32 ack_vfs[VF_MAX_STATIC / 32];
3361 int rc = 0;
3362 u16 i;
3363
3364 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3365
3366 /* Since BRB <-> PRS interface can't be tested as part of the flr
3367 * polling due to HW limitations, simply sleep a bit. And since
3368 * there's no need to wait per-vf, do it before looping.
3369 */
3370 msleep(100);
3371
3372 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
3373 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3374
3375 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3376 return rc;
3377}
3378
Mintz, Yuvalcccf6f52017-03-19 13:08:18 +02003379bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
Yuval Mintz0b55e272016-05-11 16:36:15 +03003380{
Mintz, Yuvalcccf6f52017-03-19 13:08:18 +02003381 bool found = false;
3382 u16 i;
Yuval Mintz0b55e272016-05-11 16:36:15 +03003383
3384 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
3385 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3386 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3387 "[%08x,...,%08x]: %08x\n",
3388 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3389
3390 if (!p_hwfn->cdev->p_iov_info) {
3391 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
Mintz, Yuvalcccf6f52017-03-19 13:08:18 +02003392 return false;
Yuval Mintz0b55e272016-05-11 16:36:15 +03003393 }
3394
3395 /* Mark VFs */
3396 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
3397 struct qed_vf_info *p_vf;
3398 u8 vfid;
3399
3400 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
3401 if (!p_vf)
3402 continue;
3403
3404 vfid = p_vf->abs_vf_id;
Yuval Mintz1a635e42016-08-15 10:42:43 +03003405 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
Yuval Mintz0b55e272016-05-11 16:36:15 +03003406 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3407 u16 rel_vf_id = p_vf->relative_vf_id;
3408
3409 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3410 "VF[%d] [rel %d] got FLR-ed\n",
3411 vfid, rel_vf_id);
3412
3413 p_vf->state = VF_RESET;
3414
3415 /* No need to lock here, since pending_flr should
3416 * only change here and before ACKing MFw. Since
3417 * MFW will not trigger an additional attention for
3418 * VF flr until ACKs, we're safe.
3419 */
3420 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
Mintz, Yuvalcccf6f52017-03-19 13:08:18 +02003421 found = true;
Yuval Mintz0b55e272016-05-11 16:36:15 +03003422 }
3423 }
3424
3425 return found;
3426}
3427
Yuval Mintz73390ac2016-05-11 16:36:24 +03003428static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
3429 u16 vfid,
3430 struct qed_mcp_link_params *p_params,
3431 struct qed_mcp_link_state *p_link,
3432 struct qed_mcp_link_capabilities *p_caps)
3433{
3434 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
3435 vfid,
3436 false);
3437 struct qed_bulletin_content *p_bulletin;
3438
3439 if (!p_vf)
3440 return;
3441
3442 p_bulletin = p_vf->bulletin.p_virt;
3443
3444 if (p_params)
3445 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3446 if (p_link)
3447 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3448 if (p_caps)
3449 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3450}
3451
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003452static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3453 struct qed_ptt *p_ptt, int vfid)
3454{
3455 struct qed_iov_vf_mbx *mbx;
3456 struct qed_vf_info *p_vf;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003457
3458 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3459 if (!p_vf)
3460 return;
3461
3462 mbx = &p_vf->vf_mbx;
3463
3464 /* qed_iov_process_mbx_request */
Mintz, Yuvalfd3c6152017-02-27 11:06:32 +02003465 if (!mbx->b_pending_msg) {
3466 DP_NOTICE(p_hwfn,
3467 "VF[%02x]: Trying to process mailbox message when none is pending\n",
3468 p_vf->abs_vf_id);
3469 return;
3470 }
3471 mbx->b_pending_msg = false;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003472
3473 mbx->first_tlv = mbx->req_virt->first_tlv;
3474
Mintz, Yuvalfd3c6152017-02-27 11:06:32 +02003475 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3476 "VF[%02x]: Processing mailbox message [type %04x]\n",
3477 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3478
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003479 /* check if tlv type is known */
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003480 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3481 !p_vf->b_malicious) {
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03003482 switch (mbx->first_tlv.tl.type) {
3483 case CHANNEL_TLV_ACQUIRE:
3484 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3485 break;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03003486 case CHANNEL_TLV_VPORT_START:
3487 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3488 break;
3489 case CHANNEL_TLV_VPORT_TEARDOWN:
3490 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3491 break;
3492 case CHANNEL_TLV_START_RXQ:
3493 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3494 break;
3495 case CHANNEL_TLV_START_TXQ:
3496 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3497 break;
3498 case CHANNEL_TLV_STOP_RXQS:
3499 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3500 break;
3501 case CHANNEL_TLV_STOP_TXQS:
3502 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3503 break;
Yuval Mintz17b235c2016-05-11 16:36:18 +03003504 case CHANNEL_TLV_UPDATE_RXQ:
3505 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3506 break;
Yuval Mintzdacd88d2016-05-11 16:36:16 +03003507 case CHANNEL_TLV_VPORT_UPDATE:
3508 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3509 break;
3510 case CHANNEL_TLV_UCAST_FILTER:
3511 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3512 break;
Yuval Mintz0b55e272016-05-11 16:36:15 +03003513 case CHANNEL_TLV_CLOSE:
3514 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3515 break;
3516 case CHANNEL_TLV_INT_CLEANUP:
3517 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3518 break;
3519 case CHANNEL_TLV_RELEASE:
3520 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3521 break;
Chopra, Manisheaf3c0c2017-04-24 10:00:49 -07003522 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3523 qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3524 break;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03003525 }
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003526 } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3527 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3528 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3529 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3530
3531 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3532 mbx->first_tlv.tl.type,
3533 sizeof(struct pfvf_def_resp_tlv),
3534 PFVF_STATUS_MALICIOUS);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003535 } else {
3536 /* unknown TLV - this may belong to a VF driver from the future
3537 * - a version written after this PF driver was written, which
3538 * supports features unknown as of yet. Too bad since we don't
3539 * support them. Or this may be because someone wrote a crappy
3540 * VF driver and is sending garbage over the channel.
3541 */
Yuval Mintz54fdd802016-06-05 13:11:16 +03003542 DP_NOTICE(p_hwfn,
3543 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3544 p_vf->abs_vf_id,
3545 mbx->first_tlv.tl.type,
3546 mbx->first_tlv.tl.length,
3547 mbx->first_tlv.padding, mbx->first_tlv.reply_address);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003548
Yuval Mintz54fdd802016-06-05 13:11:16 +03003549 /* Try replying in case reply address matches the acquisition's
3550 * posted address.
3551 */
3552 if (p_vf->acquire.first_tlv.reply_address &&
3553 (mbx->first_tlv.reply_address ==
3554 p_vf->acquire.first_tlv.reply_address)) {
3555 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3556 mbx->first_tlv.tl.type,
3557 sizeof(struct pfvf_def_resp_tlv),
3558 PFVF_STATUS_NOT_SUPPORTED);
3559 } else {
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003560 DP_VERBOSE(p_hwfn,
3561 QED_MSG_IOV,
Yuval Mintz54fdd802016-06-05 13:11:16 +03003562 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3563 p_vf->abs_vf_id);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003564 }
3565 }
3566}
3567
Mintz, Yuvalfd3c6152017-02-27 11:06:32 +02003568void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003569{
Mintz, Yuvalfd3c6152017-02-27 11:06:32 +02003570 int i;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003571
Mintz, Yuvalfd3c6152017-02-27 11:06:32 +02003572 memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003573
Mintz, Yuvalfd3c6152017-02-27 11:06:32 +02003574 qed_for_each_vf(p_hwfn, i) {
3575 struct qed_vf_info *p_vf;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003576
Mintz, Yuvalfd3c6152017-02-27 11:06:32 +02003577 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
3578 if (p_vf->vf_mbx.b_pending_msg)
3579 events[i / 64] |= 1ULL << (i % 64);
3580 }
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003581}
3582
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003583static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
3584 u16 abs_vfid)
3585{
3586 u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
3587
3588 if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3589 DP_VERBOSE(p_hwfn,
3590 QED_MSG_IOV,
3591 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
3592 abs_vfid);
3593 return NULL;
3594 }
3595
3596 return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
3597}
3598
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003599static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
3600 u16 abs_vfid, struct regpair *vf_msg)
3601{
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003602 struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003603 abs_vfid);
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003604
3605 if (!p_vf)
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003606 return 0;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003607
3608 /* List the physical address of the request so that handler
3609 * could later on copy the message from it.
3610 */
3611 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3612
3613 /* Mark the event and schedule the workqueue */
Mintz, Yuvalfd3c6152017-02-27 11:06:32 +02003614 p_vf->vf_mbx.b_pending_msg = true;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003615 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
3616
3617 return 0;
3618}
3619
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003620static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
3621 struct malicious_vf_eqe_data *p_data)
3622{
3623 struct qed_vf_info *p_vf;
3624
3625 p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
3626
3627 if (!p_vf)
3628 return;
3629
Mintz, Yuvale99a21c2017-03-19 13:08:19 +02003630 if (!p_vf->b_malicious) {
3631 DP_NOTICE(p_hwfn,
3632 "VF [%d] - Malicious behavior [%02x]\n",
3633 p_vf->abs_vf_id, p_data->err_id);
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003634
Mintz, Yuvale99a21c2017-03-19 13:08:19 +02003635 p_vf->b_malicious = true;
3636 } else {
3637 DP_INFO(p_hwfn,
3638 "VF [%d] - Malicious behavior [%02x]\n",
3639 p_vf->abs_vf_id, p_data->err_id);
3640 }
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003641}
3642
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003643int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
3644 u8 opcode, __le16 echo, union event_ring_data *data)
3645{
3646 switch (opcode) {
3647 case COMMON_EVENT_VF_PF_CHANNEL:
3648 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
3649 &data->vf_pf_channel.msg_addr);
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003650 case COMMON_EVENT_MALICIOUS_VF:
3651 qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
3652 return 0;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003653 default:
3654 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
3655 opcode);
3656 return -EINVAL;
3657 }
3658}
3659
Yuval Mintz32a47e72016-05-11 16:36:12 +03003660u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
3661{
3662 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
3663 u16 i;
3664
3665 if (!p_iov)
3666 goto out;
3667
3668 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003669 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
Yuval Mintz32a47e72016-05-11 16:36:12 +03003670 return i;
3671
3672out:
3673 return MAX_NUM_VFS;
3674}
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003675
3676static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
3677 int vfid)
3678{
3679 struct qed_dmae_params params;
3680 struct qed_vf_info *vf_info;
3681
3682 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3683 if (!vf_info)
3684 return -EINVAL;
3685
3686 memset(&params, 0, sizeof(struct qed_dmae_params));
3687 params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
3688 params.src_vfid = vf_info->abs_vf_id;
3689
3690 if (qed_dmae_host2host(p_hwfn, ptt,
3691 vf_info->vf_mbx.pending_req,
3692 vf_info->vf_mbx.req_phys,
3693 sizeof(union vfpf_tlvs) / 4, &params)) {
3694 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3695 "Failed to copy message from VF 0x%02x\n", vfid);
3696
3697 return -EIO;
3698 }
3699
3700 return 0;
3701}
3702
Yuval Mintzeff16962016-05-11 16:36:21 +03003703static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
3704 u8 *mac, int vfid)
3705{
3706 struct qed_vf_info *vf_info;
3707 u64 feature;
3708
3709 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3710 if (!vf_info) {
3711 DP_NOTICE(p_hwfn->cdev,
3712 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3713 return;
3714 }
3715
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003716 if (vf_info->b_malicious) {
3717 DP_NOTICE(p_hwfn->cdev,
3718 "Can't set forced MAC to malicious VF [%d]\n", vfid);
3719 return;
3720 }
3721
Yuval Mintzeff16962016-05-11 16:36:21 +03003722 feature = 1 << MAC_ADDR_FORCED;
3723 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3724
3725 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3726 /* Forced MAC will disable MAC_ADDR */
Yuval Mintz1a635e42016-08-15 10:42:43 +03003727 vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
Yuval Mintzeff16962016-05-11 16:36:21 +03003728
3729 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3730}
3731
Baoyou Xieba569472016-09-09 09:21:15 +08003732static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
3733 u16 pvid, int vfid)
Yuval Mintz08feecd2016-05-11 16:36:20 +03003734{
3735 struct qed_vf_info *vf_info;
3736 u64 feature;
3737
3738 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3739 if (!vf_info) {
3740 DP_NOTICE(p_hwfn->cdev,
3741 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3742 return;
3743 }
3744
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003745 if (vf_info->b_malicious) {
3746 DP_NOTICE(p_hwfn->cdev,
3747 "Can't set forced vlan to malicious VF [%d]\n", vfid);
3748 return;
3749 }
3750
Yuval Mintz08feecd2016-05-11 16:36:20 +03003751 feature = 1 << VLAN_ADDR_FORCED;
3752 vf_info->bulletin.p_virt->pvid = pvid;
3753 if (pvid)
3754 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3755 else
3756 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
3757
3758 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3759}
3760
Chopra, Manish97379f12017-04-24 10:00:48 -07003761void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
3762 int vfid, u16 vxlan_port, u16 geneve_port)
3763{
3764 struct qed_vf_info *vf_info;
3765
3766 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3767 if (!vf_info) {
3768 DP_NOTICE(p_hwfn->cdev,
3769 "Can not set udp ports, invalid vfid [%d]\n", vfid);
3770 return;
3771 }
3772
3773 if (vf_info->b_malicious) {
3774 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3775 "Can not set udp ports to malicious VF [%d]\n",
3776 vfid);
3777 return;
3778 }
3779
3780 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
3781 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
3782}
3783
Yuval Mintz6ddc7602016-05-11 16:36:23 +03003784static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
3785{
3786 struct qed_vf_info *p_vf_info;
3787
3788 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3789 if (!p_vf_info)
3790 return false;
3791
3792 return !!p_vf_info->vport_instance;
3793}
3794
Baoyou Xieba569472016-09-09 09:21:15 +08003795static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
Yuval Mintz0b55e272016-05-11 16:36:15 +03003796{
3797 struct qed_vf_info *p_vf_info;
3798
3799 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3800 if (!p_vf_info)
3801 return true;
3802
3803 return p_vf_info->state == VF_STOPPED;
3804}
3805
Yuval Mintz73390ac2016-05-11 16:36:24 +03003806static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
3807{
3808 struct qed_vf_info *vf_info;
3809
3810 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3811 if (!vf_info)
3812 return false;
3813
3814 return vf_info->spoof_chk;
3815}
3816
Baoyou Xieba569472016-09-09 09:21:15 +08003817static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
Yuval Mintz6ddc7602016-05-11 16:36:23 +03003818{
3819 struct qed_vf_info *vf;
3820 int rc = -EINVAL;
3821
3822 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3823 DP_NOTICE(p_hwfn,
3824 "SR-IOV sanity check failed, can't set spoofchk\n");
3825 goto out;
3826 }
3827
3828 vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3829 if (!vf)
3830 goto out;
3831
3832 if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
3833 /* After VF VPORT start PF will configure spoof check */
3834 vf->req_spoofchk_val = val;
3835 rc = 0;
3836 goto out;
3837 }
3838
3839 rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
3840
3841out:
3842 return rc;
3843}
3844
Yuval Mintzeff16962016-05-11 16:36:21 +03003845static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
3846 u16 rel_vf_id)
3847{
3848 struct qed_vf_info *p_vf;
3849
3850 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3851 if (!p_vf || !p_vf->bulletin.p_virt)
3852 return NULL;
3853
Yuval Mintz1a635e42016-08-15 10:42:43 +03003854 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
Yuval Mintzeff16962016-05-11 16:36:21 +03003855 return NULL;
3856
3857 return p_vf->bulletin.p_virt->mac;
3858}
3859
Baoyou Xieba569472016-09-09 09:21:15 +08003860static u16
3861qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
Yuval Mintz08feecd2016-05-11 16:36:20 +03003862{
3863 struct qed_vf_info *p_vf;
3864
3865 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3866 if (!p_vf || !p_vf->bulletin.p_virt)
3867 return 0;
3868
Yuval Mintz1a635e42016-08-15 10:42:43 +03003869 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
Yuval Mintz08feecd2016-05-11 16:36:20 +03003870 return 0;
3871
3872 return p_vf->bulletin.p_virt->pvid;
3873}
3874
Yuval Mintz733def62016-05-11 16:36:22 +03003875static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
3876 struct qed_ptt *p_ptt, int vfid, int val)
3877{
3878 struct qed_vf_info *vf;
3879 u8 abs_vp_id = 0;
3880 int rc;
3881
3882 vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3883 if (!vf)
3884 return -EINVAL;
3885
3886 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
3887 if (rc)
3888 return rc;
3889
3890 return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
3891}
3892
Baoyou Xieba569472016-09-09 09:21:15 +08003893static int
3894qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
Yuval Mintz733def62016-05-11 16:36:22 +03003895{
3896 struct qed_vf_info *vf;
3897 u8 vport_id;
3898 int i;
3899
3900 for_each_hwfn(cdev, i) {
3901 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3902
3903 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3904 DP_NOTICE(p_hwfn,
3905 "SR-IOV sanity check failed, can't set min rate\n");
3906 return -EINVAL;
3907 }
3908 }
3909
3910 vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
3911 vport_id = vf->vport_id;
3912
3913 return qed_configure_vport_wfq(cdev, vport_id, rate);
3914}
3915
Yuval Mintz73390ac2016-05-11 16:36:24 +03003916static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
3917{
3918 struct qed_wfq_data *vf_vp_wfq;
3919 struct qed_vf_info *vf_info;
3920
3921 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3922 if (!vf_info)
3923 return 0;
3924
3925 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
3926
3927 if (vf_vp_wfq->configured)
3928 return vf_vp_wfq->min_speed;
3929 else
3930 return 0;
3931}
3932
Yuval Mintz37bff2b2016-05-11 16:36:13 +03003933/**
3934 * qed_schedule_iov - schedules IOV task for VF and PF
3935 * @hwfn: hardware function pointer
3936 * @flag: IOV flag for VF/PF
3937 */
3938void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
3939{
3940 smp_mb__before_atomic();
3941 set_bit(flag, &hwfn->iov_task_flags);
3942 smp_mb__after_atomic();
3943 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3944 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
3945}
3946
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03003947void qed_vf_start_iov_wq(struct qed_dev *cdev)
3948{
3949 int i;
3950
3951 for_each_hwfn(cdev, i)
3952 queue_delayed_work(cdev->hwfns[i].iov_wq,
3953 &cdev->hwfns[i].iov_task, 0);
3954}
3955
Yuval Mintz0b55e272016-05-11 16:36:15 +03003956int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
3957{
3958 int i, j;
3959
3960 for_each_hwfn(cdev, i)
3961 if (cdev->hwfns[i].iov_wq)
3962 flush_workqueue(cdev->hwfns[i].iov_wq);
3963
3964 /* Mark VFs for disablement */
3965 qed_iov_set_vfs_to_disable(cdev, true);
3966
3967 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
3968 pci_disable_sriov(cdev->pdev);
3969
3970 for_each_hwfn(cdev, i) {
3971 struct qed_hwfn *hwfn = &cdev->hwfns[i];
3972 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3973
3974 /* Failure to acquire the ptt in 100g creates an odd error
3975 * where the first engine has already relased IOV.
3976 */
3977 if (!ptt) {
3978 DP_ERR(hwfn, "Failed to acquire ptt\n");
3979 return -EBUSY;
3980 }
3981
Yuval Mintz733def62016-05-11 16:36:22 +03003982 /* Clean WFQ db and configure equal weight for all vports */
3983 qed_clean_wfq_db(hwfn, ptt);
3984
Yuval Mintz0b55e272016-05-11 16:36:15 +03003985 qed_for_each_vf(hwfn, j) {
3986 int k;
3987
Yuval Mintz7eff82b2016-10-14 05:19:22 -04003988 if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
Yuval Mintz0b55e272016-05-11 16:36:15 +03003989 continue;
3990
3991 /* Wait until VF is disabled before releasing */
3992 for (k = 0; k < 100; k++) {
3993 if (!qed_iov_is_vf_stopped(hwfn, j))
3994 msleep(20);
3995 else
3996 break;
3997 }
3998
3999 if (k < 100)
4000 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
4001 ptt, j);
4002 else
4003 DP_ERR(hwfn,
4004 "Timeout waiting for VF's FLR to end\n");
4005 }
4006
4007 qed_ptt_release(hwfn, ptt);
4008 }
4009
4010 qed_iov_set_vfs_to_disable(cdev, false);
4011
4012 return 0;
4013}
4014
Mintz, Yuval3da7a372016-11-29 16:47:06 +02004015static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4016 u16 vfid,
4017 struct qed_iov_vf_init_params *params)
4018{
4019 u16 base, i;
4020
4021 /* Since we have an equal resource distribution per-VF, and we assume
4022 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4023 * sequentially from there.
4024 */
4025 base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
4026
4027 params->rel_vf_id = vfid;
4028 for (i = 0; i < params->num_queues; i++) {
4029 params->req_rx_queue[i] = base + i;
4030 params->req_tx_queue[i] = base + i;
4031 }
4032}
4033
Yuval Mintz0b55e272016-05-11 16:36:15 +03004034static int qed_sriov_enable(struct qed_dev *cdev, int num)
4035{
Mintz, Yuval3da7a372016-11-29 16:47:06 +02004036 struct qed_iov_vf_init_params params;
Yuval Mintz0b55e272016-05-11 16:36:15 +03004037 int i, j, rc;
4038
4039 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
4040 DP_NOTICE(cdev, "Can start at most %d VFs\n",
4041 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
4042 return -EINVAL;
4043 }
4044
Mintz, Yuval3da7a372016-11-29 16:47:06 +02004045 memset(&params, 0, sizeof(params));
4046
Yuval Mintz0b55e272016-05-11 16:36:15 +03004047 /* Initialize HW for VF access */
4048 for_each_hwfn(cdev, j) {
4049 struct qed_hwfn *hwfn = &cdev->hwfns[j];
4050 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
Mintz, Yuval5a1f9652016-10-31 07:14:26 +02004051
4052 /* Make sure not to use more than 16 queues per VF */
Mintz, Yuval3da7a372016-11-29 16:47:06 +02004053 params.num_queues = min_t(int,
4054 FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
4055 16);
Yuval Mintz0b55e272016-05-11 16:36:15 +03004056
4057 if (!ptt) {
4058 DP_ERR(hwfn, "Failed to acquire ptt\n");
4059 rc = -EBUSY;
4060 goto err;
4061 }
4062
Yuval Mintz0b55e272016-05-11 16:36:15 +03004063 for (i = 0; i < num; i++) {
Yuval Mintz7eff82b2016-10-14 05:19:22 -04004064 if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
Yuval Mintz0b55e272016-05-11 16:36:15 +03004065 continue;
4066
Mintz, Yuval3da7a372016-11-29 16:47:06 +02004067 qed_sriov_enable_qid_config(hwfn, i, &params);
4068 rc = qed_iov_init_hw_for_vf(hwfn, ptt, &params);
Yuval Mintz0b55e272016-05-11 16:36:15 +03004069 if (rc) {
4070 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
4071 qed_ptt_release(hwfn, ptt);
4072 goto err;
4073 }
4074 }
4075
4076 qed_ptt_release(hwfn, ptt);
4077 }
4078
4079 /* Enable SRIOV PCIe functions */
4080 rc = pci_enable_sriov(cdev->pdev, num);
4081 if (rc) {
4082 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
4083 goto err;
4084 }
4085
4086 return num;
4087
4088err:
4089 qed_sriov_disable(cdev, false);
4090 return rc;
4091}
4092
4093static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
4094{
4095 if (!IS_QED_SRIOV(cdev)) {
4096 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
4097 return -EOPNOTSUPP;
4098 }
4099
4100 if (num_vfs_param)
4101 return qed_sriov_enable(cdev, num_vfs_param);
4102 else
4103 return qed_sriov_disable(cdev, true);
4104}
4105
Yuval Mintzeff16962016-05-11 16:36:21 +03004106static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
4107{
4108 int i;
4109
4110 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4111 DP_VERBOSE(cdev, QED_MSG_IOV,
4112 "Cannot set a VF MAC; Sriov is not enabled\n");
4113 return -EINVAL;
4114 }
4115
Yuval Mintz7eff82b2016-10-14 05:19:22 -04004116 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
Yuval Mintzeff16962016-05-11 16:36:21 +03004117 DP_VERBOSE(cdev, QED_MSG_IOV,
4118 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4119 return -EINVAL;
4120 }
4121
4122 for_each_hwfn(cdev, i) {
4123 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4124 struct qed_public_vf_info *vf_info;
4125
4126 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4127 if (!vf_info)
4128 continue;
4129
4130 /* Set the forced MAC, and schedule the IOV task */
4131 ether_addr_copy(vf_info->forced_mac, mac);
4132 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4133 }
4134
4135 return 0;
4136}
4137
Yuval Mintz08feecd2016-05-11 16:36:20 +03004138static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
4139{
4140 int i;
4141
4142 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4143 DP_VERBOSE(cdev, QED_MSG_IOV,
4144 "Cannot set a VF MAC; Sriov is not enabled\n");
4145 return -EINVAL;
4146 }
4147
Yuval Mintz7eff82b2016-10-14 05:19:22 -04004148 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
Yuval Mintz08feecd2016-05-11 16:36:20 +03004149 DP_VERBOSE(cdev, QED_MSG_IOV,
4150 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4151 return -EINVAL;
4152 }
4153
4154 for_each_hwfn(cdev, i) {
4155 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4156 struct qed_public_vf_info *vf_info;
4157
4158 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4159 if (!vf_info)
4160 continue;
4161
4162 /* Set the forced vlan, and schedule the IOV task */
4163 vf_info->forced_vlan = vid;
4164 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4165 }
4166
4167 return 0;
4168}
4169
Yuval Mintz73390ac2016-05-11 16:36:24 +03004170static int qed_get_vf_config(struct qed_dev *cdev,
4171 int vf_id, struct ifla_vf_info *ivi)
4172{
4173 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
4174 struct qed_public_vf_info *vf_info;
4175 struct qed_mcp_link_state link;
4176 u32 tx_rate;
4177
4178 /* Sanitize request */
4179 if (IS_VF(cdev))
4180 return -EINVAL;
4181
Yuval Mintz7eff82b2016-10-14 05:19:22 -04004182 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
Yuval Mintz73390ac2016-05-11 16:36:24 +03004183 DP_VERBOSE(cdev, QED_MSG_IOV,
4184 "VF index [%d] isn't active\n", vf_id);
4185 return -EINVAL;
4186 }
4187
4188 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4189
4190 qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
4191
4192 /* Fill information about VF */
4193 ivi->vf = vf_id;
4194
4195 if (is_valid_ether_addr(vf_info->forced_mac))
4196 ether_addr_copy(ivi->mac, vf_info->forced_mac);
4197 else
4198 ether_addr_copy(ivi->mac, vf_info->mac);
4199
4200 ivi->vlan = vf_info->forced_vlan;
4201 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
4202 ivi->linkstate = vf_info->link_state;
4203 tx_rate = vf_info->tx_rate;
4204 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
4205 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
4206
4207 return 0;
4208}
4209
Yuval Mintz36558c32016-05-11 16:36:17 +03004210void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
4211{
Mintz, Yuvale50728e2017-03-19 13:08:20 +02004212 struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
Yuval Mintz36558c32016-05-11 16:36:17 +03004213 struct qed_mcp_link_capabilities caps;
4214 struct qed_mcp_link_params params;
4215 struct qed_mcp_link_state link;
4216 int i;
4217
4218 if (!hwfn->pf_iov_info)
4219 return;
4220
4221 /* Update bulletin of all future possible VFs with link configuration */
4222 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
Yuval Mintz733def62016-05-11 16:36:22 +03004223 struct qed_public_vf_info *vf_info;
4224
4225 vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
4226 if (!vf_info)
4227 continue;
4228
Mintz, Yuvale50728e2017-03-19 13:08:20 +02004229 /* Only hwfn0 is actually interested in the link speed.
4230 * But since only it would receive an MFW indication of link,
4231 * need to take configuration from it - otherwise things like
4232 * rate limiting for hwfn1 VF would not work.
4233 */
4234 memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
4235 sizeof(params));
4236 memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
4237 memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
Yuval Mintz36558c32016-05-11 16:36:17 +03004238 sizeof(caps));
4239
Yuval Mintz733def62016-05-11 16:36:22 +03004240 /* Modify link according to the VF's configured link state */
4241 switch (vf_info->link_state) {
4242 case IFLA_VF_LINK_STATE_DISABLE:
4243 link.link_up = false;
4244 break;
4245 case IFLA_VF_LINK_STATE_ENABLE:
4246 link.link_up = true;
4247 /* Set speed according to maximum supported by HW.
4248 * that is 40G for regular devices and 100G for CMT
4249 * mode devices.
4250 */
4251 link.speed = (hwfn->cdev->num_hwfns > 1) ?
4252 100000 : 40000;
4253 default:
4254 /* In auto mode pass PF link image to VF */
4255 break;
4256 }
4257
4258 if (link.link_up && vf_info->tx_rate) {
4259 struct qed_ptt *ptt;
4260 int rate;
4261
4262 rate = min_t(int, vf_info->tx_rate, link.speed);
4263
4264 ptt = qed_ptt_acquire(hwfn);
4265 if (!ptt) {
4266 DP_NOTICE(hwfn, "Failed to acquire PTT\n");
4267 return;
4268 }
4269
4270 if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
4271 vf_info->tx_rate = rate;
4272 link.speed = rate;
4273 }
4274
4275 qed_ptt_release(hwfn, ptt);
4276 }
4277
Yuval Mintz36558c32016-05-11 16:36:17 +03004278 qed_iov_set_link(hwfn, i, &params, &link, &caps);
4279 }
4280
4281 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4282}
4283
Yuval Mintz733def62016-05-11 16:36:22 +03004284static int qed_set_vf_link_state(struct qed_dev *cdev,
4285 int vf_id, int link_state)
4286{
4287 int i;
4288
4289 /* Sanitize request */
4290 if (IS_VF(cdev))
4291 return -EINVAL;
4292
Yuval Mintz7eff82b2016-10-14 05:19:22 -04004293 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
Yuval Mintz733def62016-05-11 16:36:22 +03004294 DP_VERBOSE(cdev, QED_MSG_IOV,
4295 "VF index [%d] isn't active\n", vf_id);
4296 return -EINVAL;
4297 }
4298
4299 /* Handle configuration of link state */
4300 for_each_hwfn(cdev, i) {
4301 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4302 struct qed_public_vf_info *vf;
4303
4304 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4305 if (!vf)
4306 continue;
4307
4308 if (vf->link_state == link_state)
4309 continue;
4310
4311 vf->link_state = link_state;
4312 qed_inform_vf_link_state(&cdev->hwfns[i]);
4313 }
4314
4315 return 0;
4316}
4317
Yuval Mintz6ddc7602016-05-11 16:36:23 +03004318static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
4319{
4320 int i, rc = -EINVAL;
4321
4322 for_each_hwfn(cdev, i) {
4323 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4324
4325 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
4326 if (rc)
4327 break;
4328 }
4329
4330 return rc;
4331}
4332
Yuval Mintz733def62016-05-11 16:36:22 +03004333static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
4334{
4335 int i;
4336
4337 for_each_hwfn(cdev, i) {
4338 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4339 struct qed_public_vf_info *vf;
4340
4341 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4342 DP_NOTICE(p_hwfn,
4343 "SR-IOV sanity check failed, can't set tx rate\n");
4344 return -EINVAL;
4345 }
4346
4347 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
4348
4349 vf->tx_rate = rate;
4350
4351 qed_inform_vf_link_state(p_hwfn);
4352 }
4353
4354 return 0;
4355}
4356
4357static int qed_set_vf_rate(struct qed_dev *cdev,
4358 int vfid, u32 min_rate, u32 max_rate)
4359{
4360 int rc_min = 0, rc_max = 0;
4361
4362 if (max_rate)
4363 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
4364
4365 if (min_rate)
4366 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
4367
4368 if (rc_max | rc_min)
4369 return -EINVAL;
4370
4371 return 0;
4372}
4373
Mintz, Yuvalf990c822017-01-01 13:57:08 +02004374static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
4375{
4376 int i;
4377
4378 for_each_hwfn(cdev, i) {
4379 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4380 struct qed_public_vf_info *vf;
4381
4382 if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
4383 DP_NOTICE(hwfn,
4384 "SR-IOV sanity check failed, can't set trust\n");
4385 return -EINVAL;
4386 }
4387
4388 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
4389
4390 if (vf->is_trusted_request == trust)
4391 return 0;
4392 vf->is_trusted_request = trust;
4393
4394 qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
4395 }
4396
4397 return 0;
4398}
4399
Yuval Mintz37bff2b2016-05-11 16:36:13 +03004400static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4401{
4402 u64 events[QED_VF_ARRAY_LENGTH];
4403 struct qed_ptt *ptt;
4404 int i;
4405
4406 ptt = qed_ptt_acquire(hwfn);
4407 if (!ptt) {
4408 DP_VERBOSE(hwfn, QED_MSG_IOV,
4409 "Can't acquire PTT; re-scheduling\n");
4410 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
4411 return;
4412 }
4413
Mintz, Yuvalfd3c6152017-02-27 11:06:32 +02004414 qed_iov_pf_get_pending_events(hwfn, events);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03004415
4416 DP_VERBOSE(hwfn, QED_MSG_IOV,
4417 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4418 events[0], events[1], events[2]);
4419
4420 qed_for_each_vf(hwfn, i) {
4421 /* Skip VFs with no pending messages */
4422 if (!(events[i / 64] & (1ULL << (i % 64))))
4423 continue;
4424
4425 DP_VERBOSE(hwfn, QED_MSG_IOV,
4426 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4427 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4428
4429 /* Copy VF's message to PF's request buffer for that VF */
4430 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
4431 continue;
4432
4433 qed_iov_process_mbx_req(hwfn, ptt, i);
4434 }
4435
4436 qed_ptt_release(hwfn, ptt);
4437}
4438
Yuval Mintz08feecd2016-05-11 16:36:20 +03004439static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
4440{
4441 int i;
4442
4443 qed_for_each_vf(hwfn, i) {
4444 struct qed_public_vf_info *info;
4445 bool update = false;
Yuval Mintzeff16962016-05-11 16:36:21 +03004446 u8 *mac;
Yuval Mintz08feecd2016-05-11 16:36:20 +03004447
4448 info = qed_iov_get_public_vf_info(hwfn, i, true);
4449 if (!info)
4450 continue;
4451
4452 /* Update data on bulletin board */
Yuval Mintzeff16962016-05-11 16:36:21 +03004453 mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
4454 if (is_valid_ether_addr(info->forced_mac) &&
4455 (!mac || !ether_addr_equal(mac, info->forced_mac))) {
4456 DP_VERBOSE(hwfn,
4457 QED_MSG_IOV,
4458 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4459 i,
4460 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4461
4462 /* Update bulletin board with forced MAC */
4463 qed_iov_bulletin_set_forced_mac(hwfn,
4464 info->forced_mac, i);
4465 update = true;
4466 }
Yuval Mintz08feecd2016-05-11 16:36:20 +03004467
4468 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
4469 info->forced_vlan) {
4470 DP_VERBOSE(hwfn,
4471 QED_MSG_IOV,
4472 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
4473 info->forced_vlan,
4474 i,
4475 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4476 qed_iov_bulletin_set_forced_vlan(hwfn,
4477 info->forced_vlan, i);
4478 update = true;
4479 }
4480
4481 if (update)
4482 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4483 }
4484}
4485
Yuval Mintz36558c32016-05-11 16:36:17 +03004486static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
4487{
4488 struct qed_ptt *ptt;
4489 int i;
4490
4491 ptt = qed_ptt_acquire(hwfn);
4492 if (!ptt) {
4493 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
4494 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4495 return;
4496 }
4497
4498 qed_for_each_vf(hwfn, i)
4499 qed_iov_post_vf_bulletin(hwfn, i, ptt);
4500
4501 qed_ptt_release(hwfn, ptt);
4502}
4503
Mintz, Yuvalf990c822017-01-01 13:57:08 +02004504static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
4505{
4506 struct qed_sp_vport_update_params params;
4507 struct qed_filter_accept_flags *flags;
4508 struct qed_public_vf_info *vf_info;
4509 struct qed_vf_info *vf;
4510 u8 mask;
4511 int i;
4512
4513 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
4514 flags = &params.accept_flags;
4515
4516 qed_for_each_vf(hwfn, i) {
4517 /* Need to make sure current requested configuration didn't
4518 * flip so that we'll end up configuring something that's not
4519 * needed.
4520 */
4521 vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
4522 if (vf_info->is_trusted_configured ==
4523 vf_info->is_trusted_request)
4524 continue;
4525 vf_info->is_trusted_configured = vf_info->is_trusted_request;
4526
4527 /* Validate that the VF has a configured vport */
4528 vf = qed_iov_get_vf_info(hwfn, i, true);
4529 if (!vf->vport_instance)
4530 continue;
4531
4532 memset(&params, 0, sizeof(params));
4533 params.opaque_fid = vf->opaque_fid;
4534 params.vport_id = vf->vport_id;
4535
4536 if (vf_info->rx_accept_mode & mask) {
4537 flags->update_rx_mode_config = 1;
4538 flags->rx_accept_filter = vf_info->rx_accept_mode;
4539 }
4540
4541 if (vf_info->tx_accept_mode & mask) {
4542 flags->update_tx_mode_config = 1;
4543 flags->tx_accept_filter = vf_info->tx_accept_mode;
4544 }
4545
4546 /* Remove if needed; Otherwise this would set the mask */
4547 if (!vf_info->is_trusted_configured) {
4548 flags->rx_accept_filter &= ~mask;
4549 flags->tx_accept_filter &= ~mask;
4550 }
4551
4552 if (flags->update_rx_mode_config ||
4553 flags->update_tx_mode_config)
4554 qed_sp_vport_update(hwfn, &params,
4555 QED_SPQ_MODE_EBLOCK, NULL);
4556 }
4557}
4558
Baoyou Xieba569472016-09-09 09:21:15 +08004559static void qed_iov_pf_task(struct work_struct *work)
4560
Yuval Mintz37bff2b2016-05-11 16:36:13 +03004561{
4562 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
4563 iov_task.work);
Yuval Mintz0b55e272016-05-11 16:36:15 +03004564 int rc;
Yuval Mintz37bff2b2016-05-11 16:36:13 +03004565
4566 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
4567 return;
4568
Yuval Mintz0b55e272016-05-11 16:36:15 +03004569 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
4570 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4571
4572 if (!ptt) {
4573 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
4574 return;
4575 }
4576
4577 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
4578 if (rc)
4579 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
4580
4581 qed_ptt_release(hwfn, ptt);
4582 }
4583
Yuval Mintz37bff2b2016-05-11 16:36:13 +03004584 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
4585 qed_handle_vf_msg(hwfn);
Yuval Mintz08feecd2016-05-11 16:36:20 +03004586
4587 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
4588 &hwfn->iov_task_flags))
4589 qed_handle_pf_set_vf_unicast(hwfn);
4590
Yuval Mintz36558c32016-05-11 16:36:17 +03004591 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
4592 &hwfn->iov_task_flags))
4593 qed_handle_bulletin_post(hwfn);
Mintz, Yuvalf990c822017-01-01 13:57:08 +02004594
4595 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
4596 qed_iov_handle_trust_change(hwfn);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03004597}
4598
4599void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
4600{
4601 int i;
4602
4603 for_each_hwfn(cdev, i) {
4604 if (!cdev->hwfns[i].iov_wq)
4605 continue;
4606
4607 if (schedule_first) {
4608 qed_schedule_iov(&cdev->hwfns[i],
4609 QED_IOV_WQ_STOP_WQ_FLAG);
4610 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
4611 }
4612
4613 flush_workqueue(cdev->hwfns[i].iov_wq);
4614 destroy_workqueue(cdev->hwfns[i].iov_wq);
4615 }
4616}
4617
4618int qed_iov_wq_start(struct qed_dev *cdev)
4619{
4620 char name[NAME_SIZE];
4621 int i;
4622
4623 for_each_hwfn(cdev, i) {
4624 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4625
Yuval Mintz36558c32016-05-11 16:36:17 +03004626 /* PFs needs a dedicated workqueue only if they support IOV.
4627 * VFs always require one.
4628 */
4629 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
Yuval Mintz37bff2b2016-05-11 16:36:13 +03004630 continue;
4631
4632 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
4633 cdev->pdev->bus->number,
4634 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
4635
4636 p_hwfn->iov_wq = create_singlethread_workqueue(name);
4637 if (!p_hwfn->iov_wq) {
4638 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
4639 return -ENOMEM;
4640 }
4641
Yuval Mintz36558c32016-05-11 16:36:17 +03004642 if (IS_PF(cdev))
4643 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
4644 else
4645 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
Yuval Mintz37bff2b2016-05-11 16:36:13 +03004646 }
4647
4648 return 0;
4649}
Yuval Mintz0b55e272016-05-11 16:36:15 +03004650
4651const struct qed_iov_hv_ops qed_iov_ops_pass = {
4652 .configure = &qed_sriov_configure,
Yuval Mintzeff16962016-05-11 16:36:21 +03004653 .set_mac = &qed_sriov_pf_set_mac,
Yuval Mintz08feecd2016-05-11 16:36:20 +03004654 .set_vlan = &qed_sriov_pf_set_vlan,
Yuval Mintz73390ac2016-05-11 16:36:24 +03004655 .get_config = &qed_get_vf_config,
Yuval Mintz733def62016-05-11 16:36:22 +03004656 .set_link_state = &qed_set_vf_link_state,
Yuval Mintz6ddc7602016-05-11 16:36:23 +03004657 .set_spoof = &qed_spoof_configure,
Yuval Mintz733def62016-05-11 16:36:22 +03004658 .set_rate = &qed_set_vf_rate,
Mintz, Yuvalf990c822017-01-01 13:57:08 +02004659 .set_trust = &qed_set_vf_trust,
Yuval Mintz0b55e272016-05-11 16:36:15 +03004660};