blob: 0021145434511f445c8c973d8df963fe29fc9f35 [file] [log] [blame]
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8#ifndef _QED_L2_H
9#define _QED_L2_H
10#include <linux/types.h>
11#include <linux/io.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/qed/qed_eth_if.h>
15#include "qed.h"
16#include "qed_hw.h"
17#include "qed_sp.h"
18
Yuval Mintz17b235c2016-05-11 16:36:18 +030019struct qed_sge_tpa_params {
20 u8 max_buffers_per_cqe;
21
22 u8 update_tpa_en_flg;
23 u8 tpa_ipv4_en_flg;
24 u8 tpa_ipv6_en_flg;
25 u8 tpa_ipv4_tunn_en_flg;
26 u8 tpa_ipv6_tunn_en_flg;
27
28 u8 update_tpa_param_flg;
29 u8 tpa_pkt_split_flg;
30 u8 tpa_hdr_data_split_flg;
31 u8 tpa_gro_consistent_flg;
32 u8 tpa_max_aggs_num;
33 u16 tpa_max_size;
34 u16 tpa_min_size_to_start;
35 u16 tpa_min_size_to_cont;
36};
37
Yuval Mintzdacd88d2016-05-11 16:36:16 +030038enum qed_filter_opcode {
39 QED_FILTER_ADD,
40 QED_FILTER_REMOVE,
41 QED_FILTER_MOVE,
42 QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
43 QED_FILTER_FLUSH, /* Removes all filters */
44};
45
46enum qed_filter_ucast_type {
47 QED_FILTER_MAC,
48 QED_FILTER_VLAN,
49 QED_FILTER_MAC_VLAN,
50 QED_FILTER_INNER_MAC,
51 QED_FILTER_INNER_VLAN,
52 QED_FILTER_INNER_PAIR,
53 QED_FILTER_INNER_MAC_VNI_PAIR,
54 QED_FILTER_MAC_VNI_PAIR,
55 QED_FILTER_VNI,
56};
57
58struct qed_filter_ucast {
59 enum qed_filter_opcode opcode;
60 enum qed_filter_ucast_type type;
61 u8 is_rx_filter;
62 u8 is_tx_filter;
63 u8 vport_to_add_to;
64 u8 vport_to_remove_from;
65 unsigned char mac[ETH_ALEN];
66 u8 assert_on_error;
67 u16 vlan;
68 u32 vni;
69};
70
71struct qed_filter_mcast {
72 /* MOVE is not supported for multicast */
73 enum qed_filter_opcode opcode;
74 u8 vport_to_add_to;
75 u8 vport_to_remove_from;
76 u8 num_mc_addrs;
77#define QED_MAX_MC_ADDRS 64
78 unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
79};
80
81int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
82 u16 rx_queue_id,
83 bool eq_completion_only, bool cqe_completion);
84
85int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
86
87enum qed_tpa_mode {
88 QED_TPA_MODE_NONE,
89 QED_TPA_MODE_UNUSED,
90 QED_TPA_MODE_GRO,
91 QED_TPA_MODE_MAX
92};
93
94struct qed_sp_vport_start_params {
95 enum qed_tpa_mode tpa_mode;
96 bool remove_inner_vlan;
Yuval Mintz831bfb0e2016-05-11 16:36:25 +030097 bool tx_switching;
Yuval Mintz08feecd2016-05-11 16:36:20 +030098 bool only_untagged;
Yuval Mintzdacd88d2016-05-11 16:36:16 +030099 bool drop_ttl0;
100 u8 max_buffers_per_cqe;
101 u32 concrete_fid;
102 u16 opaque_fid;
103 u8 vport_id;
104 u16 mtu;
105};
106
107int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
108 struct qed_sp_vport_start_params *p_params);
109
110struct qed_rss_params {
111 u8 update_rss_config;
112 u8 rss_enable;
113 u8 rss_eng_id;
114 u8 update_rss_capabilities;
115 u8 update_rss_ind_table;
116 u8 update_rss_key;
117 u8 rss_caps;
118 u8 rss_table_size_log;
119 u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
120 u32 rss_key[QED_RSS_KEY_SIZE];
121};
122
123struct qed_filter_accept_flags {
124 u8 update_rx_mode_config;
125 u8 update_tx_mode_config;
126 u8 rx_accept_filter;
127 u8 tx_accept_filter;
128#define QED_ACCEPT_NONE 0x01
129#define QED_ACCEPT_UCAST_MATCHED 0x02
130#define QED_ACCEPT_UCAST_UNMATCHED 0x04
131#define QED_ACCEPT_MCAST_MATCHED 0x08
132#define QED_ACCEPT_MCAST_UNMATCHED 0x10
133#define QED_ACCEPT_BCAST 0x20
134};
135
136struct qed_sp_vport_update_params {
137 u16 opaque_fid;
138 u8 vport_id;
139 u8 update_vport_active_rx_flg;
140 u8 vport_active_rx_flg;
141 u8 update_vport_active_tx_flg;
142 u8 vport_active_tx_flg;
Yuval Mintz17b235c2016-05-11 16:36:18 +0300143 u8 update_inner_vlan_removal_flg;
144 u8 inner_vlan_removal_flg;
Yuval Mintz08feecd2016-05-11 16:36:20 +0300145 u8 silent_vlan_removal_flg;
146 u8 update_default_vlan_enable_flg;
147 u8 default_vlan_enable_flg;
148 u8 update_default_vlan_flg;
149 u16 default_vlan;
Yuval Mintz17b235c2016-05-11 16:36:18 +0300150 u8 update_tx_switching_flg;
151 u8 tx_switching_flg;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300152 u8 update_approx_mcast_flg;
Yuval Mintz6ddc7602016-05-11 16:36:23 +0300153 u8 update_anti_spoofing_en_flg;
154 u8 anti_spoofing_en;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300155 u8 update_accept_any_vlan_flg;
156 u8 accept_any_vlan;
157 unsigned long bins[8];
158 struct qed_rss_params *rss_params;
159 struct qed_filter_accept_flags accept_flags;
Yuval Mintz17b235c2016-05-11 16:36:18 +0300160 struct qed_sge_tpa_params *sge_tpa_params;
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300161};
162
163int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
164 struct qed_sp_vport_update_params *p_params,
165 enum spq_mode comp_mode,
166 struct qed_spq_comp_cb *p_comp_data);
167
168/**
169 * @brief qed_sp_vport_stop -
170 *
171 * This ramrod closes a VPort after all its RX and TX queues are terminated.
172 * An Assert is generated if any queues are left open.
173 *
174 * @param p_hwfn
175 * @param opaque_fid
176 * @param vport_id VPort ID
177 *
178 * @return int
179 */
180int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
181
182int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
183 u16 opaque_fid,
184 struct qed_filter_ucast *p_filter_cmd,
185 enum spq_mode comp_mode,
186 struct qed_spq_comp_cb *p_comp_data);
187
Yuval Mintz17b235c2016-05-11 16:36:18 +0300188/**
189 * @brief qed_sp_rx_eth_queues_update -
190 *
191 * This ramrod updates an RX queue. It is used for setting the active state
192 * of the queue and updating the TPA and SGE parameters.
193 *
194 * @note At the moment - only used by non-linux VFs.
195 *
196 * @param p_hwfn
197 * @param rx_queue_id RX Queue ID
198 * @param num_rxqs Allow to update multiple rx
199 * queues, from rx_queue_id to
200 * (rx_queue_id + num_rxqs)
201 * @param complete_cqe_flg Post completion to the CQE Ring if set
202 * @param complete_event_flg Post completion to the Event Ring if set
203 *
204 * @return int
205 */
206
207int
208qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
209 u16 rx_queue_id,
210 u8 num_rxqs,
211 u8 complete_cqe_flg,
212 u8 complete_event_flg,
213 enum spq_mode comp_mode,
214 struct qed_spq_comp_cb *p_comp_data);
215
Yuval Mintzdacd88d2016-05-11 16:36:16 +0300216int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
217 struct qed_sp_vport_start_params *p_params);
218
219int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
220 u16 opaque_fid,
221 u32 cid,
222 struct qed_queue_start_common_params *params,
223 u8 stats_id,
224 u16 bd_max_bytes,
225 dma_addr_t bd_chain_phys_addr,
226 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
227
228int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
229 u16 opaque_fid,
230 u32 cid,
231 struct qed_queue_start_common_params *p_params,
232 u8 stats_id,
233 dma_addr_t pbl_addr,
234 u16 pbl_size,
235 union qed_qm_pq_params *p_pq_params);
236
237u8 qed_mcast_bin_from_mac(u8 *mac);
238
239#endif /* _QED_L2_H */