blob: 4f190d25ee14ed91d56cbb28e0c5ba964ed71ec1 [file] [log] [blame]
Yuval Mintz32a47e72016-05-11 16:36:12 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef _QED_SRIOV_H
10#define _QED_SRIOV_H
11#include <linux/types.h>
12#include "qed_vf.h"
13#define QED_VF_ARRAY_LENGTH (3)
14
15#define IS_VF(cdev) ((cdev)->b_is_vf)
16#define IS_PF(cdev) (!((cdev)->b_is_vf))
17#ifdef CONFIG_QED_SRIOV
18#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
19#else
20#define IS_PF_SRIOV(p_hwfn) (0)
21#endif
22#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
23
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030024#define QED_MAX_VF_CHAINS_PER_PF 16
25#define QED_ETH_VF_NUM_VLAN_FILTERS 2
26
Yuval Mintz32a47e72016-05-11 16:36:12 +030027/* This struct is part of qed_dev and contains data relevant to all hwfns;
28 * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
29 */
30struct qed_hw_sriov_info {
31 int pos; /* capability position */
32 int nres; /* number of resources */
33 u32 cap; /* SR-IOV Capabilities */
34 u16 ctrl; /* SR-IOV Control */
35 u16 total_vfs; /* total VFs associated with the PF */
36 u16 num_vfs; /* number of vfs that have been started */
37 u16 initial_vfs; /* initial VFs associated with the PF */
38 u16 nr_virtfn; /* number of VFs available */
39 u16 offset; /* first VF Routing ID offset */
40 u16 stride; /* following VF stride */
41 u16 vf_device_id; /* VF device id */
42 u32 pgsz; /* page size for BAR alignment */
43 u8 link; /* Function Dependency Link */
44
45 u32 first_vf_in_pf;
46};
47
48/* This mailbox is maintained per VF in its PF contains all information
49 * required for sending / receiving a message.
50 */
51struct qed_iov_vf_mbx {
52 union vfpf_tlvs *req_virt;
53 dma_addr_t req_phys;
54 union pfvf_tlvs *reply_virt;
55 dma_addr_t reply_phys;
Yuval Mintz37bff2b2016-05-11 16:36:13 +030056
57 /* Address in VF where a pending message is located */
58 dma_addr_t pending_req;
59
60 u8 *offset;
61
62 /* saved VF request header */
63 struct vfpf_first_tlv first_tlv;
Yuval Mintz32a47e72016-05-11 16:36:12 +030064};
65
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030066struct qed_vf_q_info {
67 u16 fw_rx_qid;
68 u16 fw_tx_qid;
69 u8 fw_cid;
70 u8 rxq_active;
71 u8 txq_active;
72};
73
Yuval Mintz32a47e72016-05-11 16:36:12 +030074enum vf_state {
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030075 VF_FREE = 0, /* VF ready to be acquired holds no resc */
76 VF_ACQUIRED, /* VF, acquired, but not initalized */
Yuval Mintz32a47e72016-05-11 16:36:12 +030077 VF_STOPPED /* VF, Stopped */
78};
79
80/* PFs maintain an array of this structure, per VF */
81struct qed_vf_info {
82 struct qed_iov_vf_mbx vf_mbx;
83 enum vf_state state;
84 bool b_init;
85
86 struct qed_bulletin bulletin;
87 dma_addr_t vf_bulletin;
88
89 u32 concrete_fid;
90 u16 opaque_fid;
91
92 u8 vport_id;
93 u8 relative_vf_id;
94 u8 abs_vf_id;
95#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
96 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
97 (p_vf)->abs_vf_id)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030098
99 u8 num_rxqs;
100 u8 num_txqs;
101
102 u8 num_sbs;
103
104 u8 num_mac_filters;
105 u8 num_vlan_filters;
106 struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
107 u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
108
Yuval Mintz32a47e72016-05-11 16:36:12 +0300109};
110
111/* This structure is part of qed_hwfn and used only for PFs that have sriov
112 * capability enabled.
113 */
114struct qed_pf_iov {
115 struct qed_vf_info vfs_array[MAX_NUM_VFS];
116 u64 pending_events[QED_VF_ARRAY_LENGTH];
117 u64 pending_flr[QED_VF_ARRAY_LENGTH];
118
119 /* Allocate message address continuosuly and split to each VF */
120 void *mbx_msg_virt_addr;
121 dma_addr_t mbx_msg_phys_addr;
122 u32 mbx_msg_size;
123 void *mbx_reply_virt_addr;
124 dma_addr_t mbx_reply_phys_addr;
125 u32 mbx_reply_size;
126 void *p_bulletins;
127 dma_addr_t bulletins_phys;
128 u32 bulletins_size;
129};
130
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300131enum qed_iov_wq_flag {
132 QED_IOV_WQ_MSG_FLAG,
133 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
134 QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
135 QED_IOV_WQ_STOP_WQ_FLAG,
136 QED_IOV_WQ_FLR_FLAG,
137};
138
Yuval Mintz32a47e72016-05-11 16:36:12 +0300139#ifdef CONFIG_QED_SRIOV
140/**
141 * @brief - Given a VF index, return index of next [including that] active VF.
142 *
143 * @param p_hwfn
144 * @param rel_vf_id
145 *
146 * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
147 */
148u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
149
150/**
151 * @brief Read sriov related information and allocated resources
152 * reads from configuraiton space, shmem, etc.
153 *
154 * @param p_hwfn
155 *
156 * @return int
157 */
158int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
159
160/**
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300161 * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
162 *
163 * @param p_hwfn
164 * @param p_iov
165 * @param type
166 * @param length
167 *
168 * @return pointer to the newly placed tlv
169 */
170void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
171
172/**
173 * @brief list the types and lengths of the tlvs on the buffer
174 *
175 * @param p_hwfn
176 * @param tlvs_list
177 */
178void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
179
180/**
Yuval Mintz32a47e72016-05-11 16:36:12 +0300181 * @brief qed_iov_alloc - allocate sriov related resources
182 *
183 * @param p_hwfn
184 *
185 * @return int
186 */
187int qed_iov_alloc(struct qed_hwfn *p_hwfn);
188
189/**
190 * @brief qed_iov_setup - setup sriov related resources
191 *
192 * @param p_hwfn
193 * @param p_ptt
194 */
195void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
196
197/**
198 * @brief qed_iov_free - free sriov related resources
199 *
200 * @param p_hwfn
201 */
202void qed_iov_free(struct qed_hwfn *p_hwfn);
203
204/**
205 * @brief free sriov related memory that was allocated during hw_prepare
206 *
207 * @param cdev
208 */
209void qed_iov_free_hw_info(struct qed_dev *cdev);
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300210
211/**
212 * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
213 *
214 * @param p_hwfn
215 * @param opcode
216 * @param echo
217 * @param data
218 */
219int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
220 u8 opcode, __le16 echo, union event_ring_data *data);
221
222void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
223int qed_iov_wq_start(struct qed_dev *cdev);
224
225void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300226void qed_vf_start_iov_wq(struct qed_dev *cdev);
Yuval Mintz32a47e72016-05-11 16:36:12 +0300227#else
228static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
229 u16 rel_vf_id)
230{
231 return MAX_NUM_VFS;
232}
233
234static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
235{
236 return 0;
237}
238
239static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
240{
241 return 0;
242}
243
244static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
245{
246}
247
248static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
249{
250}
251
252static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
253{
254}
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300255
256static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
257 u8 opcode,
258 __le16 echo, union event_ring_data *data)
259{
260 return -EINVAL;
261}
262
263static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
264{
265}
266
267static inline int qed_iov_wq_start(struct qed_dev *cdev)
268{
269 return 0;
270}
271
272static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
273 enum qed_iov_wq_flag flag)
274{
275}
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300276
277static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
278{
279}
Yuval Mintz32a47e72016-05-11 16:36:12 +0300280#endif
281
282#define qed_for_each_vf(_p_hwfn, _i) \
283 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
284 _i < MAX_NUM_VFS; \
285 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
286
287#endif