blob: 10794b08fd214a5fd2ebc49b2c2b95d3ece77b77 [file] [log] [blame]
Yuval Mintz32a47e72016-05-11 16:36:12 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef _QED_SRIOV_H
10#define _QED_SRIOV_H
11#include <linux/types.h>
12#include "qed_vf.h"
13#define QED_VF_ARRAY_LENGTH (3)
14
15#define IS_VF(cdev) ((cdev)->b_is_vf)
16#define IS_PF(cdev) (!((cdev)->b_is_vf))
17#ifdef CONFIG_QED_SRIOV
18#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
19#else
20#define IS_PF_SRIOV(p_hwfn) (0)
21#endif
22#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
23
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030024#define QED_MAX_VF_CHAINS_PER_PF 16
25#define QED_ETH_VF_NUM_VLAN_FILTERS 2
26
Yuval Mintz0b55e272016-05-11 16:36:15 +030027struct qed_public_vf_info {
28 /* These copies will later be reflected in the bulletin board,
29 * but this copy should be newer.
30 */
31 u8 mac[ETH_ALEN];
32};
33
Yuval Mintz32a47e72016-05-11 16:36:12 +030034/* This struct is part of qed_dev and contains data relevant to all hwfns;
35 * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
36 */
37struct qed_hw_sriov_info {
38 int pos; /* capability position */
39 int nres; /* number of resources */
40 u32 cap; /* SR-IOV Capabilities */
41 u16 ctrl; /* SR-IOV Control */
42 u16 total_vfs; /* total VFs associated with the PF */
43 u16 num_vfs; /* number of vfs that have been started */
44 u16 initial_vfs; /* initial VFs associated with the PF */
45 u16 nr_virtfn; /* number of VFs available */
46 u16 offset; /* first VF Routing ID offset */
47 u16 stride; /* following VF stride */
48 u16 vf_device_id; /* VF device id */
49 u32 pgsz; /* page size for BAR alignment */
50 u8 link; /* Function Dependency Link */
51
52 u32 first_vf_in_pf;
53};
54
55/* This mailbox is maintained per VF in its PF contains all information
56 * required for sending / receiving a message.
57 */
58struct qed_iov_vf_mbx {
59 union vfpf_tlvs *req_virt;
60 dma_addr_t req_phys;
61 union pfvf_tlvs *reply_virt;
62 dma_addr_t reply_phys;
Yuval Mintz37bff2b2016-05-11 16:36:13 +030063
64 /* Address in VF where a pending message is located */
65 dma_addr_t pending_req;
66
67 u8 *offset;
68
69 /* saved VF request header */
70 struct vfpf_first_tlv first_tlv;
Yuval Mintz32a47e72016-05-11 16:36:12 +030071};
72
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030073struct qed_vf_q_info {
74 u16 fw_rx_qid;
75 u16 fw_tx_qid;
76 u8 fw_cid;
77 u8 rxq_active;
78 u8 txq_active;
79};
80
Yuval Mintz32a47e72016-05-11 16:36:12 +030081enum vf_state {
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030082 VF_FREE = 0, /* VF ready to be acquired holds no resc */
83 VF_ACQUIRED, /* VF, acquired, but not initalized */
Yuval Mintz0b55e272016-05-11 16:36:15 +030084 VF_RESET, /* VF, FLR'd, pending cleanup */
Yuval Mintz32a47e72016-05-11 16:36:12 +030085 VF_STOPPED /* VF, Stopped */
86};
87
88/* PFs maintain an array of this structure, per VF */
89struct qed_vf_info {
90 struct qed_iov_vf_mbx vf_mbx;
91 enum vf_state state;
92 bool b_init;
Yuval Mintz0b55e272016-05-11 16:36:15 +030093 u8 to_disable;
Yuval Mintz32a47e72016-05-11 16:36:12 +030094
95 struct qed_bulletin bulletin;
96 dma_addr_t vf_bulletin;
97
98 u32 concrete_fid;
99 u16 opaque_fid;
100
101 u8 vport_id;
102 u8 relative_vf_id;
103 u8 abs_vf_id;
104#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
105 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
106 (p_vf)->abs_vf_id)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300107
108 u8 num_rxqs;
109 u8 num_txqs;
110
111 u8 num_sbs;
112
113 u8 num_mac_filters;
114 u8 num_vlan_filters;
115 struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
116 u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
Yuval Mintz0b55e272016-05-11 16:36:15 +0300117 struct qed_public_vf_info p_vf_info;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300118};
119
120/* This structure is part of qed_hwfn and used only for PFs that have sriov
121 * capability enabled.
122 */
123struct qed_pf_iov {
124 struct qed_vf_info vfs_array[MAX_NUM_VFS];
125 u64 pending_events[QED_VF_ARRAY_LENGTH];
126 u64 pending_flr[QED_VF_ARRAY_LENGTH];
127
128 /* Allocate message address continuosuly and split to each VF */
129 void *mbx_msg_virt_addr;
130 dma_addr_t mbx_msg_phys_addr;
131 u32 mbx_msg_size;
132 void *mbx_reply_virt_addr;
133 dma_addr_t mbx_reply_phys_addr;
134 u32 mbx_reply_size;
135 void *p_bulletins;
136 dma_addr_t bulletins_phys;
137 u32 bulletins_size;
138};
139
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300140enum qed_iov_wq_flag {
141 QED_IOV_WQ_MSG_FLAG,
142 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
143 QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
144 QED_IOV_WQ_STOP_WQ_FLAG,
145 QED_IOV_WQ_FLR_FLAG,
146};
147
Yuval Mintz32a47e72016-05-11 16:36:12 +0300148#ifdef CONFIG_QED_SRIOV
149/**
150 * @brief - Given a VF index, return index of next [including that] active VF.
151 *
152 * @param p_hwfn
153 * @param rel_vf_id
154 *
155 * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
156 */
157u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
158
159/**
160 * @brief Read sriov related information and allocated resources
161 * reads from configuraiton space, shmem, etc.
162 *
163 * @param p_hwfn
164 *
165 * @return int
166 */
167int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
168
169/**
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300170 * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
171 *
172 * @param p_hwfn
173 * @param p_iov
174 * @param type
175 * @param length
176 *
177 * @return pointer to the newly placed tlv
178 */
179void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
180
181/**
182 * @brief list the types and lengths of the tlvs on the buffer
183 *
184 * @param p_hwfn
185 * @param tlvs_list
186 */
187void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
188
189/**
Yuval Mintz32a47e72016-05-11 16:36:12 +0300190 * @brief qed_iov_alloc - allocate sriov related resources
191 *
192 * @param p_hwfn
193 *
194 * @return int
195 */
196int qed_iov_alloc(struct qed_hwfn *p_hwfn);
197
198/**
199 * @brief qed_iov_setup - setup sriov related resources
200 *
201 * @param p_hwfn
202 * @param p_ptt
203 */
204void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
205
206/**
207 * @brief qed_iov_free - free sriov related resources
208 *
209 * @param p_hwfn
210 */
211void qed_iov_free(struct qed_hwfn *p_hwfn);
212
213/**
214 * @brief free sriov related memory that was allocated during hw_prepare
215 *
216 * @param cdev
217 */
218void qed_iov_free_hw_info(struct qed_dev *cdev);
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300219
220/**
221 * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
222 *
223 * @param p_hwfn
224 * @param opcode
225 * @param echo
226 * @param data
227 */
228int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
229 u8 opcode, __le16 echo, union event_ring_data *data);
230
Yuval Mintz0b55e272016-05-11 16:36:15 +0300231/**
232 * @brief Mark structs of vfs that have been FLR-ed.
233 *
234 * @param p_hwfn
235 * @param disabled_vfs - bitmask of all VFs on path that were FLRed
236 *
237 * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
238 */
239int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
240
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300241void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
242int qed_iov_wq_start(struct qed_dev *cdev);
243
244void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300245void qed_vf_start_iov_wq(struct qed_dev *cdev);
Yuval Mintz0b55e272016-05-11 16:36:15 +0300246int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
Yuval Mintz32a47e72016-05-11 16:36:12 +0300247#else
248static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
249 u16 rel_vf_id)
250{
251 return MAX_NUM_VFS;
252}
253
254static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
255{
256 return 0;
257}
258
259static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
260{
261 return 0;
262}
263
264static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
265{
266}
267
268static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
269{
270}
271
272static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
273{
274}
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300275
276static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
277 u8 opcode,
278 __le16 echo, union event_ring_data *data)
279{
280 return -EINVAL;
281}
282
Yuval Mintz0b55e272016-05-11 16:36:15 +0300283static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
284 u32 *disabled_vfs)
285{
286 return 0;
287}
288
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300289static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
290{
291}
292
293static inline int qed_iov_wq_start(struct qed_dev *cdev)
294{
295 return 0;
296}
297
298static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
299 enum qed_iov_wq_flag flag)
300{
301}
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300302
303static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
304{
305}
Yuval Mintz0b55e272016-05-11 16:36:15 +0300306
307static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
308{
309 return 0;
310}
Yuval Mintz32a47e72016-05-11 16:36:12 +0300311#endif
312
313#define qed_for_each_vf(_p_hwfn, _i) \
314 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
315 _i < MAX_NUM_VFS; \
316 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
317
318#endif