blob: 2c94b445d07f0bd8dafdfd6f214837a36efbf715 [file] [log] [blame]
Yuval Mintz32a47e72016-05-11 16:36:12 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef _QED_SRIOV_H
10#define _QED_SRIOV_H
11#include <linux/types.h>
12#include "qed_vf.h"
13#define QED_VF_ARRAY_LENGTH (3)
14
15#define IS_VF(cdev) ((cdev)->b_is_vf)
16#define IS_PF(cdev) (!((cdev)->b_is_vf))
17#ifdef CONFIG_QED_SRIOV
18#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
19#else
20#define IS_PF_SRIOV(p_hwfn) (0)
21#endif
22#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
23
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030024#define QED_MAX_VF_CHAINS_PER_PF 16
25#define QED_ETH_VF_NUM_VLAN_FILTERS 2
26
Yuval Mintzdacd88d2016-05-11 16:36:16 +030027enum qed_iov_vport_update_flag {
28 QED_IOV_VP_UPDATE_ACTIVATE,
Yuval Mintz17b235c2016-05-11 16:36:18 +030029 QED_IOV_VP_UPDATE_VLAN_STRIP,
30 QED_IOV_VP_UPDATE_TX_SWITCH,
Yuval Mintzdacd88d2016-05-11 16:36:16 +030031 QED_IOV_VP_UPDATE_MCAST,
32 QED_IOV_VP_UPDATE_ACCEPT_PARAM,
33 QED_IOV_VP_UPDATE_RSS,
Yuval Mintz17b235c2016-05-11 16:36:18 +030034 QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
35 QED_IOV_VP_UPDATE_SGE_TPA,
Yuval Mintzdacd88d2016-05-11 16:36:16 +030036 QED_IOV_VP_UPDATE_MAX,
37};
38
Yuval Mintz0b55e272016-05-11 16:36:15 +030039struct qed_public_vf_info {
40 /* These copies will later be reflected in the bulletin board,
41 * but this copy should be newer.
42 */
43 u8 mac[ETH_ALEN];
44};
45
Yuval Mintz32a47e72016-05-11 16:36:12 +030046/* This struct is part of qed_dev and contains data relevant to all hwfns;
47 * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
48 */
49struct qed_hw_sriov_info {
50 int pos; /* capability position */
51 int nres; /* number of resources */
52 u32 cap; /* SR-IOV Capabilities */
53 u16 ctrl; /* SR-IOV Control */
54 u16 total_vfs; /* total VFs associated with the PF */
55 u16 num_vfs; /* number of vfs that have been started */
56 u16 initial_vfs; /* initial VFs associated with the PF */
57 u16 nr_virtfn; /* number of VFs available */
58 u16 offset; /* first VF Routing ID offset */
59 u16 stride; /* following VF stride */
60 u16 vf_device_id; /* VF device id */
61 u32 pgsz; /* page size for BAR alignment */
62 u8 link; /* Function Dependency Link */
63
64 u32 first_vf_in_pf;
65};
66
67/* This mailbox is maintained per VF in its PF contains all information
68 * required for sending / receiving a message.
69 */
70struct qed_iov_vf_mbx {
71 union vfpf_tlvs *req_virt;
72 dma_addr_t req_phys;
73 union pfvf_tlvs *reply_virt;
74 dma_addr_t reply_phys;
Yuval Mintz37bff2b2016-05-11 16:36:13 +030075
76 /* Address in VF where a pending message is located */
77 dma_addr_t pending_req;
78
79 u8 *offset;
80
81 /* saved VF request header */
82 struct vfpf_first_tlv first_tlv;
Yuval Mintz32a47e72016-05-11 16:36:12 +030083};
84
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030085struct qed_vf_q_info {
86 u16 fw_rx_qid;
87 u16 fw_tx_qid;
88 u8 fw_cid;
89 u8 rxq_active;
90 u8 txq_active;
91};
92
Yuval Mintz32a47e72016-05-11 16:36:12 +030093enum vf_state {
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030094 VF_FREE = 0, /* VF ready to be acquired holds no resc */
95 VF_ACQUIRED, /* VF, acquired, but not initalized */
Yuval Mintzdacd88d2016-05-11 16:36:16 +030096 VF_ENABLED, /* VF, Enabled */
Yuval Mintz0b55e272016-05-11 16:36:15 +030097 VF_RESET, /* VF, FLR'd, pending cleanup */
Yuval Mintz32a47e72016-05-11 16:36:12 +030098 VF_STOPPED /* VF, Stopped */
99};
100
101/* PFs maintain an array of this structure, per VF */
102struct qed_vf_info {
103 struct qed_iov_vf_mbx vf_mbx;
104 enum vf_state state;
105 bool b_init;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300106 u8 to_disable;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300107
108 struct qed_bulletin bulletin;
109 dma_addr_t vf_bulletin;
110
111 u32 concrete_fid;
112 u16 opaque_fid;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300113 u16 mtu;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300114
115 u8 vport_id;
116 u8 relative_vf_id;
117 u8 abs_vf_id;
118#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
119 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
120 (p_vf)->abs_vf_id)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300121
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300122 u8 vport_instance;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300123 u8 num_rxqs;
124 u8 num_txqs;
125
126 u8 num_sbs;
127
128 u8 num_mac_filters;
129 u8 num_vlan_filters;
130 struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
131 u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300132 u8 num_active_rxqs;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300133 struct qed_public_vf_info p_vf_info;
Yuval Mintz32a47e72016-05-11 16:36:12 +0300134};
135
136/* This structure is part of qed_hwfn and used only for PFs that have sriov
137 * capability enabled.
138 */
139struct qed_pf_iov {
140 struct qed_vf_info vfs_array[MAX_NUM_VFS];
141 u64 pending_events[QED_VF_ARRAY_LENGTH];
142 u64 pending_flr[QED_VF_ARRAY_LENGTH];
143
144 /* Allocate message address continuosuly and split to each VF */
145 void *mbx_msg_virt_addr;
146 dma_addr_t mbx_msg_phys_addr;
147 u32 mbx_msg_size;
148 void *mbx_reply_virt_addr;
149 dma_addr_t mbx_reply_phys_addr;
150 u32 mbx_reply_size;
151 void *p_bulletins;
152 dma_addr_t bulletins_phys;
153 u32 bulletins_size;
154};
155
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300156enum qed_iov_wq_flag {
157 QED_IOV_WQ_MSG_FLAG,
158 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
159 QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
160 QED_IOV_WQ_STOP_WQ_FLAG,
161 QED_IOV_WQ_FLR_FLAG,
162};
163
Yuval Mintz32a47e72016-05-11 16:36:12 +0300164#ifdef CONFIG_QED_SRIOV
165/**
166 * @brief - Given a VF index, return index of next [including that] active VF.
167 *
168 * @param p_hwfn
169 * @param rel_vf_id
170 *
171 * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
172 */
173u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
174
175/**
176 * @brief Read sriov related information and allocated resources
177 * reads from configuraiton space, shmem, etc.
178 *
179 * @param p_hwfn
180 *
181 * @return int
182 */
183int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
184
185/**
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300186 * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
187 *
188 * @param p_hwfn
189 * @param p_iov
190 * @param type
191 * @param length
192 *
193 * @return pointer to the newly placed tlv
194 */
195void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
196
197/**
198 * @brief list the types and lengths of the tlvs on the buffer
199 *
200 * @param p_hwfn
201 * @param tlvs_list
202 */
203void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
204
205/**
Yuval Mintz32a47e72016-05-11 16:36:12 +0300206 * @brief qed_iov_alloc - allocate sriov related resources
207 *
208 * @param p_hwfn
209 *
210 * @return int
211 */
212int qed_iov_alloc(struct qed_hwfn *p_hwfn);
213
214/**
215 * @brief qed_iov_setup - setup sriov related resources
216 *
217 * @param p_hwfn
218 * @param p_ptt
219 */
220void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
221
222/**
223 * @brief qed_iov_free - free sriov related resources
224 *
225 * @param p_hwfn
226 */
227void qed_iov_free(struct qed_hwfn *p_hwfn);
228
229/**
230 * @brief free sriov related memory that was allocated during hw_prepare
231 *
232 * @param cdev
233 */
234void qed_iov_free_hw_info(struct qed_dev *cdev);
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300235
236/**
237 * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
238 *
239 * @param p_hwfn
240 * @param opcode
241 * @param echo
242 * @param data
243 */
244int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
245 u8 opcode, __le16 echo, union event_ring_data *data);
246
Yuval Mintz0b55e272016-05-11 16:36:15 +0300247/**
248 * @brief Mark structs of vfs that have been FLR-ed.
249 *
250 * @param p_hwfn
251 * @param disabled_vfs - bitmask of all VFs on path that were FLRed
252 *
253 * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
254 */
255int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
256
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300257/**
258 * @brief Search extended TLVs in request/reply buffer.
259 *
260 * @param p_hwfn
261 * @param p_tlvs_list - Pointer to tlvs list
262 * @param req_type - Type of TLV
263 *
264 * @return pointer to tlv type if found, otherwise returns NULL.
265 */
266void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
267 void *p_tlvs_list, u16 req_type);
268
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300269void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
270int qed_iov_wq_start(struct qed_dev *cdev);
271
272void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300273void qed_vf_start_iov_wq(struct qed_dev *cdev);
Yuval Mintz0b55e272016-05-11 16:36:15 +0300274int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
Yuval Mintz36558c32016-05-11 16:36:17 +0300275void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
Yuval Mintz32a47e72016-05-11 16:36:12 +0300276#else
277static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
278 u16 rel_vf_id)
279{
280 return MAX_NUM_VFS;
281}
282
283static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
284{
285 return 0;
286}
287
288static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
289{
290 return 0;
291}
292
293static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
294{
295}
296
297static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
298{
299}
300
301static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
302{
303}
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300304
305static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
306 u8 opcode,
307 __le16 echo, union event_ring_data *data)
308{
309 return -EINVAL;
310}
311
Yuval Mintz0b55e272016-05-11 16:36:15 +0300312static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
313 u32 *disabled_vfs)
314{
315 return 0;
316}
317
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300318static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
319{
320}
321
322static inline int qed_iov_wq_start(struct qed_dev *cdev)
323{
324 return 0;
325}
326
327static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
328 enum qed_iov_wq_flag flag)
329{
330}
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300331
332static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
333{
334}
Yuval Mintz0b55e272016-05-11 16:36:15 +0300335
336static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
337{
338 return 0;
339}
Yuval Mintz36558c32016-05-11 16:36:17 +0300340
341static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
342{
343}
Yuval Mintz32a47e72016-05-11 16:36:12 +0300344#endif
345
346#define qed_for_each_vf(_p_hwfn, _i) \
347 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
348 _i < MAX_NUM_VFS; \
349 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
350
351#endif