blob: 35337b186aa56438047b75253a6a745ac525030f [file] [log] [blame]
Yuval Mintz32a47e72016-05-11 16:36:12 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef _QED_VF_H
10#define _QED_VF_H
11
Yuval Mintzdacd88d2016-05-11 16:36:16 +030012#include "qed_l2.h"
13
14#define T_ETH_INDIRECTION_TABLE_SIZE 128
15#define T_ETH_RSS_KEY_SIZE 10
16
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030017struct vf_pf_resc_request {
18 u8 num_rxqs;
19 u8 num_txqs;
20 u8 num_sbs;
21 u8 num_mac_filters;
22 u8 num_vlan_filters;
23 u8 num_mc_filters;
24 u16 padding;
25};
26
27struct hw_sb_info {
28 u16 hw_sb_id;
29 u8 sb_qid;
30 u8 padding[5];
31};
32
Yuval Mintzdacd88d2016-05-11 16:36:16 +030033#define TLV_BUFFER_SIZE 1024
34
Yuval Mintz37bff2b2016-05-11 16:36:13 +030035enum {
36 PFVF_STATUS_WAITING,
37 PFVF_STATUS_SUCCESS,
38 PFVF_STATUS_FAILURE,
39 PFVF_STATUS_NOT_SUPPORTED,
40 PFVF_STATUS_NO_RESOURCE,
41 PFVF_STATUS_FORCED,
42};
43
44/* vf pf channel tlvs */
45/* general tlv header (used for both vf->pf request and pf->vf response) */
46struct channel_tlv {
47 u16 type;
48 u16 length;
49};
50
51/* header of first vf->pf tlv carries the offset used to calculate reponse
52 * buffer address
53 */
54struct vfpf_first_tlv {
55 struct channel_tlv tl;
56 u32 padding;
57 u64 reply_address;
58};
59
60/* header of pf->vf tlvs, carries the status of handling the request */
61struct pfvf_tlv {
62 struct channel_tlv tl;
63 u8 status;
64 u8 padding[3];
65};
66
67/* response tlv used for most tlvs */
68struct pfvf_def_resp_tlv {
69 struct pfvf_tlv hdr;
70};
71
72/* used to terminate and pad a tlv list */
73struct channel_list_end_tlv {
74 struct channel_tlv tl;
75 u8 padding[4];
76};
77
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030078#define VFPF_ACQUIRE_OS_LINUX (0)
79#define VFPF_ACQUIRE_OS_WINDOWS (1)
80#define VFPF_ACQUIRE_OS_ESX (2)
81#define VFPF_ACQUIRE_OS_SOLARIS (3)
82#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
83
84struct vfpf_acquire_tlv {
85 struct vfpf_first_tlv first_tlv;
86
87 struct vf_pf_vfdev_info {
88#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
89#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
90 u64 capabilities;
91 u8 fw_major;
92 u8 fw_minor;
93 u8 fw_revision;
94 u8 fw_engineering;
95 u32 driver_version;
96 u16 opaque_fid; /* ME register value */
97 u8 os_type; /* VFPF_ACQUIRE_OS_* value */
98 u8 padding[5];
99 } vfdev_info;
100
101 struct vf_pf_resc_request resc_request;
102
103 u64 bulletin_addr;
104 u32 bulletin_size;
105 u32 padding;
106};
107
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300108/* receive side scaling tlv */
109struct vfpf_vport_update_rss_tlv {
110 struct channel_tlv tl;
111
112 u8 update_rss_flags;
113#define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0)
114#define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1)
115#define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2)
116#define VFPF_UPDATE_RSS_KEY_FLAG BIT(3)
117
118 u8 rss_enable;
119 u8 rss_caps;
120 u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
121 u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
122 u32 rss_key[T_ETH_RSS_KEY_SIZE];
123};
124
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300125struct pfvf_storm_stats {
126 u32 address;
127 u32 len;
128};
129
130struct pfvf_stats_info {
131 struct pfvf_storm_stats mstats;
132 struct pfvf_storm_stats pstats;
133 struct pfvf_storm_stats tstats;
134 struct pfvf_storm_stats ustats;
135};
136
137struct pfvf_acquire_resp_tlv {
138 struct pfvf_tlv hdr;
139
140 struct pf_vf_pfdev_info {
141 u32 chip_num;
142 u32 mfw_ver;
143
144 u16 fw_major;
145 u16 fw_minor;
146 u16 fw_rev;
147 u16 fw_eng;
148
149 u64 capabilities;
150#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0)
151#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */
152/* There are old PF versions where the PF might mistakenly override the sanity
153 * mechanism [version-based] and allow a VF that can't be supported to pass
154 * the acquisition phase.
155 * To overcome this, PFs now indicate that they're past that point and the new
156 * VFs would fail probe on the older PFs that fail to do so.
157 */
158#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
159
160 u16 db_size;
161 u8 indices_per_sb;
162 u8 os_type;
163
164 /* These should match the PF's qed_dev values */
165 u16 chip_rev;
166 u8 dev_type;
167
168 u8 padding;
169
170 struct pfvf_stats_info stats_info;
171
172 u8 port_mac[ETH_ALEN];
173 u8 padding2[2];
174 } pfdev_info;
175
176 struct pf_vf_resc {
177#define PFVF_MAX_QUEUES_PER_VF 16
178#define PFVF_MAX_SBS_PER_VF 16
179 struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
180 u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
181 u8 cid[PFVF_MAX_QUEUES_PER_VF];
182
183 u8 num_rxqs;
184 u8 num_txqs;
185 u8 num_sbs;
186 u8 num_mac_filters;
187 u8 num_vlan_filters;
188 u8 num_mc_filters;
189 u8 padding[2];
190 } resc;
191
192 u32 bulletin_size;
193 u32 padding;
194};
195
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300196struct pfvf_start_queue_resp_tlv {
197 struct pfvf_tlv hdr;
198 u32 offset; /* offset to consumer/producer of queue */
199 u8 padding[4];
200};
201
202/* Setup Queue */
203struct vfpf_start_rxq_tlv {
204 struct vfpf_first_tlv first_tlv;
205
206 /* physical addresses */
207 u64 rxq_addr;
208 u64 deprecated_sge_addr;
209 u64 cqe_pbl_addr;
210
211 u16 cqe_pbl_size;
212 u16 hw_sb;
213 u16 rx_qid;
214 u16 hc_rate; /* desired interrupts per sec. */
215
216 u16 bd_max_bytes;
217 u16 stat_id;
218 u8 sb_index;
219 u8 padding[3];
220};
221
222struct vfpf_start_txq_tlv {
223 struct vfpf_first_tlv first_tlv;
224
225 /* physical addresses */
226 u64 pbl_addr;
227 u16 pbl_size;
228 u16 stat_id;
229 u16 tx_qid;
230 u16 hw_sb;
231
232 u32 flags; /* VFPF_QUEUE_FLG_X flags */
233 u16 hc_rate; /* desired interrupts per sec. */
234 u8 sb_index;
235 u8 padding[3];
236};
237
238/* Stop RX Queue */
239struct vfpf_stop_rxqs_tlv {
240 struct vfpf_first_tlv first_tlv;
241
242 u16 rx_qid;
243 u8 num_rxqs;
244 u8 cqe_completion;
245 u8 padding[4];
246};
247
248/* Stop TX Queues */
249struct vfpf_stop_txqs_tlv {
250 struct vfpf_first_tlv first_tlv;
251
252 u16 tx_qid;
253 u8 num_txqs;
254 u8 padding[5];
255};
256
257struct vfpf_update_rxq_tlv {
258 struct vfpf_first_tlv first_tlv;
259
260 u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
261
262 u16 rx_qid;
263 u8 num_rxqs;
264 u8 flags;
265#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0)
266#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1)
267#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2)
268
269 u8 padding[4];
270};
271
272/* Set Queue Filters */
273struct vfpf_q_mac_vlan_filter {
274 u32 flags;
275#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
276#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
277#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
278
279 u8 mac[ETH_ALEN];
280 u16 vlan_tag;
281
282 u8 padding[4];
283};
284
285/* Start a vport */
286struct vfpf_vport_start_tlv {
287 struct vfpf_first_tlv first_tlv;
288
289 u64 sb_addr[PFVF_MAX_SBS_PER_VF];
290
291 u32 tpa_mode;
292 u16 dep1;
293 u16 mtu;
294
295 u8 vport_id;
296 u8 inner_vlan_removal;
297
298 u8 only_untagged;
299 u8 max_buffers_per_cqe;
300
301 u8 padding[4];
302};
303
304/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
305struct vfpf_vport_update_activate_tlv {
306 struct channel_tlv tl;
307 u8 update_rx;
308 u8 update_tx;
309 u8 active_rx;
310 u8 active_tx;
311};
312
313struct vfpf_vport_update_mcast_bin_tlv {
314 struct channel_tlv tl;
315 u8 padding[4];
316
317 u64 bins[8];
318};
319
320struct vfpf_vport_update_accept_param_tlv {
321 struct channel_tlv tl;
322 u8 update_rx_mode;
323 u8 update_tx_mode;
324 u8 rx_accept_filter;
325 u8 tx_accept_filter;
326};
327
328/* Primary tlv as a header for various extended tlvs for
329 * various functionalities in vport update ramrod.
330 */
331struct vfpf_vport_update_tlv {
332 struct vfpf_first_tlv first_tlv;
333};
334
335struct vfpf_ucast_filter_tlv {
336 struct vfpf_first_tlv first_tlv;
337
338 u8 opcode;
339 u8 type;
340
341 u8 mac[ETH_ALEN];
342
343 u16 vlan;
344 u16 padding[3];
345};
346
Yuval Mintz32a47e72016-05-11 16:36:12 +0300347struct tlv_buffer_size {
348 u8 tlv_buffer[TLV_BUFFER_SIZE];
349};
350
351union vfpf_tlvs {
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300352 struct vfpf_first_tlv first_tlv;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300353 struct vfpf_acquire_tlv acquire;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300354 struct vfpf_start_rxq_tlv start_rxq;
355 struct vfpf_start_txq_tlv start_txq;
356 struct vfpf_stop_rxqs_tlv stop_rxqs;
357 struct vfpf_stop_txqs_tlv stop_txqs;
358 struct vfpf_vport_start_tlv start_vport;
359 struct vfpf_vport_update_tlv vport_update;
360 struct vfpf_ucast_filter_tlv ucast_filter;
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300361 struct channel_list_end_tlv list_end;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300362 struct tlv_buffer_size tlv_buf_size;
363};
364
365union pfvf_tlvs {
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300366 struct pfvf_def_resp_tlv default_resp;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300367 struct pfvf_acquire_resp_tlv acquire_resp;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300368 struct tlv_buffer_size tlv_buf_size;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300369 struct pfvf_start_queue_resp_tlv queue_start;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300370};
371
372struct qed_bulletin_content {
373 /* crc of structure to ensure is not in mid-update */
374 u32 crc;
375
376 u32 version;
377
378 /* bitmap indicating which fields hold valid values */
379 u64 valid_bitmap;
380};
381
382struct qed_bulletin {
383 dma_addr_t phys;
384 struct qed_bulletin_content *p_virt;
385 u32 size;
386};
387
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300388enum {
389 CHANNEL_TLV_NONE, /* ends tlv sequence */
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300390 CHANNEL_TLV_ACQUIRE,
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300391 CHANNEL_TLV_VPORT_START,
392 CHANNEL_TLV_VPORT_UPDATE,
393 CHANNEL_TLV_VPORT_TEARDOWN,
394 CHANNEL_TLV_START_RXQ,
395 CHANNEL_TLV_START_TXQ,
396 CHANNEL_TLV_STOP_RXQS,
397 CHANNEL_TLV_STOP_TXQS,
Yuval Mintz0b55e272016-05-11 16:36:15 +0300398 CHANNEL_TLV_INT_CLEANUP,
399 CHANNEL_TLV_CLOSE,
400 CHANNEL_TLV_RELEASE,
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300401 CHANNEL_TLV_LIST_END,
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300402 CHANNEL_TLV_UCAST_FILTER,
403 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
404 CHANNEL_TLV_VPORT_UPDATE_MCAST,
405 CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
406 CHANNEL_TLV_VPORT_UPDATE_RSS,
407 CHANNEL_TLV_MAX,
408
409 /* Required for iterating over vport-update tlvs.
410 * Will break in case non-sequential vport-update tlvs.
411 */
412 CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_RSS + 1,
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300413};
414
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300415/* This data is held in the qed_hwfn structure for VFs only. */
416struct qed_vf_iov {
417 union vfpf_tlvs *vf2pf_request;
418 dma_addr_t vf2pf_request_phys;
419 union pfvf_tlvs *pf2vf_reply;
420 dma_addr_t pf2vf_reply_phys;
421
422 /* Should be taken whenever the mailbox buffers are accessed */
423 struct mutex mutex;
424 u8 *offset;
425
426 /* Bulletin Board */
427 struct qed_bulletin bulletin;
428 struct qed_bulletin_content bulletin_shadow;
429
430 /* we set aside a copy of the acquire response */
431 struct pfvf_acquire_resp_tlv acquire_resp;
432};
433
434#ifdef CONFIG_QED_SRIOV
435/**
436 * @brief Get number of Rx queues allocated for VF by qed
437 *
438 * @param p_hwfn
439 * @param num_rxqs - allocated RX queues
440 */
441void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
442
443/**
444 * @brief Get port mac address for VF
445 *
446 * @param p_hwfn
447 * @param port_mac - destination location for port mac
448 */
449void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
450
451/**
452 * @brief Get number of VLAN filters allocated for VF by qed
453 *
454 * @param p_hwfn
455 * @param num_rxqs - allocated VLAN filters
456 */
457void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
458 u8 *num_vlan_filters);
459
460/**
461 * @brief Set firmware version information in dev_info from VFs acquire response tlv
462 *
463 * @param p_hwfn
464 * @param fw_major
465 * @param fw_minor
466 * @param fw_rev
467 * @param fw_eng
468 */
469void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
470 u16 *fw_major, u16 *fw_minor,
471 u16 *fw_rev, u16 *fw_eng);
472
473/**
474 * @brief hw preparation for VF
475 * sends ACQUIRE message
476 *
477 * @param p_hwfn
478 *
479 * @return int
480 */
481int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
482
483/**
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300484 * @brief VF - start the RX Queue by sending a message to the PF
485 * @param p_hwfn
486 * @param cid - zero based within the VF
487 * @param rx_queue_id - zero based within the VF
488 * @param sb - VF status block for this queue
489 * @param sb_index - Index within the status block
490 * @param bd_max_bytes - maximum number of bytes per bd
491 * @param bd_chain_phys_addr - physical address of bd chain
492 * @param cqe_pbl_addr - physical address of pbl
493 * @param cqe_pbl_size - pbl size
494 * @param pp_prod - pointer to the producer to be
495 * used in fastpath
496 *
497 * @return int
498 */
499int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
500 u8 rx_queue_id,
501 u16 sb,
502 u8 sb_index,
503 u16 bd_max_bytes,
504 dma_addr_t bd_chain_phys_addr,
505 dma_addr_t cqe_pbl_addr,
506 u16 cqe_pbl_size, void __iomem **pp_prod);
507
508/**
509 * @brief VF - start the TX queue by sending a message to the
510 * PF.
511 *
512 * @param p_hwfn
513 * @param tx_queue_id - zero based within the VF
514 * @param sb - status block for this queue
515 * @param sb_index - index within the status block
516 * @param bd_chain_phys_addr - physical address of tx chain
517 * @param pp_doorbell - pointer to address to which to
518 * write the doorbell too..
519 *
520 * @return int
521 */
522int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
523 u16 tx_queue_id,
524 u16 sb,
525 u8 sb_index,
526 dma_addr_t pbl_addr,
527 u16 pbl_size, void __iomem **pp_doorbell);
528
529/**
530 * @brief VF - stop the RX queue by sending a message to the PF
531 *
532 * @param p_hwfn
533 * @param rx_qid
534 * @param cqe_completion
535 *
536 * @return int
537 */
538int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
539 u16 rx_qid, bool cqe_completion);
540
541/**
542 * @brief VF - stop the TX queue by sending a message to the PF
543 *
544 * @param p_hwfn
545 * @param tx_qid
546 *
547 * @return int
548 */
549int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
550
551/**
552 * @brief VF - send a vport update command
553 *
554 * @param p_hwfn
555 * @param params
556 *
557 * @return int
558 */
559int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
560 struct qed_sp_vport_update_params *p_params);
561
562/**
Yuval Mintz0b55e272016-05-11 16:36:15 +0300563 *
564 * @brief VF - send a close message to PF
565 *
566 * @param p_hwfn
567 *
568 * @return enum _qed_status
569 */
570int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
571
572/**
573 * @brief VF - free vf`s memories
574 *
575 * @param p_hwfn
576 *
577 * @return enum _qed_status
578 */
579int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
580/**
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300581 * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
582 * sb_id. For VFs igu sbs don't have to be contiguous
583 *
584 * @param p_hwfn
585 * @param sb_id
586 *
587 * @return INLINE u16
588 */
589u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
Yuval Mintz0b55e272016-05-11 16:36:15 +0300590
591/**
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300592 * @brief qed_vf_pf_vport_start - perform vport start for VF.
593 *
594 * @param p_hwfn
595 * @param vport_id
596 * @param mtu
597 * @param inner_vlan_removal
598 * @param tpa_mode
599 * @param max_buffers_per_cqe,
600 * @param only_untagged - default behavior regarding vlan acceptance
601 *
602 * @return enum _qed_status
603 */
604int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
605 u8 vport_id,
606 u16 mtu,
607 u8 inner_vlan_removal,
608 enum qed_tpa_mode tpa_mode,
609 u8 max_buffers_per_cqe);
610
611/**
612 * @brief qed_vf_pf_vport_stop - stop the VF's vport
613 *
614 * @param p_hwfn
615 *
616 * @return enum _qed_status
617 */
618int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
619
620int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
621 struct qed_filter_ucast *p_param);
622
623void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
624 struct qed_filter_mcast *p_filter_cmd);
625
626/**
Yuval Mintz0b55e272016-05-11 16:36:15 +0300627 * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
628 *
629 * @param p_hwfn
630 *
631 * @return enum _qed_status
632 */
633int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300634#else
635static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
636{
637}
638
639static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
640{
641}
642
643static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
644 u8 *num_vlan_filters)
645{
646}
647
648static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
649 u16 *fw_major, u16 *fw_minor,
650 u16 *fw_rev, u16 *fw_eng)
651{
652}
653
654static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
655{
656 return -EINVAL;
657}
658
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300659static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
660 u8 rx_queue_id,
661 u16 sb,
662 u8 sb_index,
663 u16 bd_max_bytes,
664 dma_addr_t bd_chain_phys_adr,
665 dma_addr_t cqe_pbl_addr,
666 u16 cqe_pbl_size, void __iomem **pp_prod)
667{
668 return -EINVAL;
669}
670
671static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
672 u16 tx_queue_id,
673 u16 sb,
674 u8 sb_index,
675 dma_addr_t pbl_addr,
676 u16 pbl_size, void __iomem **pp_doorbell)
677{
678 return -EINVAL;
679}
680
681static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
682 u16 rx_qid, bool cqe_completion)
683{
684 return -EINVAL;
685}
686
687static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
688{
689 return -EINVAL;
690}
691
692static inline int
693qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
694 struct qed_sp_vport_update_params *p_params)
695{
696 return -EINVAL;
697}
698
Yuval Mintz0b55e272016-05-11 16:36:15 +0300699static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
700{
701 return -EINVAL;
702}
703
704static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
705{
706 return -EINVAL;
707}
708
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300709static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
710{
711 return 0;
712}
Yuval Mintz0b55e272016-05-11 16:36:15 +0300713
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300714static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
715 u8 vport_id,
716 u16 mtu,
717 u8 inner_vlan_removal,
718 enum qed_tpa_mode tpa_mode,
719 u8 max_buffers_per_cqe)
720{
721 return -EINVAL;
722}
723
724static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
725{
726 return -EINVAL;
727}
728
729static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
730 struct qed_filter_ucast *p_param)
731{
732 return -EINVAL;
733}
734
735static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
736 struct qed_filter_mcast *p_filter_cmd)
737{
738}
739
Yuval Mintz0b55e272016-05-11 16:36:15 +0300740static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
741{
742 return -EINVAL;
743}
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300744#endif
745
Yuval Mintz32a47e72016-05-11 16:36:12 +0300746#endif