blob: ab5549f4e5ea2567ed027a0a6b68f42804aeb1dd [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef _QED_SP_H
10#define _QED_SP_H
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/list.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <linux/qed/qed_chain.h>
18#include "qed.h"
19#include "qed_hsi.h"
20
21enum spq_mode {
22 QED_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
23 QED_SPQ_MODE_CB, /* Client supplies a callback */
24 QED_SPQ_MODE_EBLOCK, /* QED should block until completion */
25};
26
27struct qed_spq_comp_cb {
28 void (*function)(struct qed_hwfn *,
29 void *,
30 union event_ring_data *,
31 u8 fw_return_code);
32 void *cookie;
33};
34
Manish Chopracee4d262015-10-26 11:02:28 +020035/**
36 * @brief qed_eth_cqe_completion - handles the completion of a
37 * ramrod on the cqe ring
38 *
39 * @param p_hwfn
40 * @param cqe
41 *
42 * @return int
43 */
44int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
45 struct eth_slow_path_rx_cqe *cqe);
46
47/**
48 * @file
49 *
50 * QED Slow-hwfn queue interface
51 */
52
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020053union ramrod_data {
54 struct pf_start_ramrod_data pf_start;
Manish Chopra464f6642016-04-14 01:38:29 -040055 struct pf_update_ramrod_data pf_update;
Manish Chopracee4d262015-10-26 11:02:28 +020056 struct rx_queue_start_ramrod_data rx_queue_start;
57 struct rx_queue_update_ramrod_data rx_queue_update;
58 struct rx_queue_stop_ramrod_data rx_queue_stop;
59 struct tx_queue_start_ramrod_data tx_queue_start;
60 struct tx_queue_stop_ramrod_data tx_queue_stop;
61 struct vport_start_ramrod_data vport_start;
62 struct vport_stop_ramrod_data vport_stop;
63 struct vport_update_ramrod_data vport_update;
64 struct vport_filter_update_ramrod_data vport_filter_update;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030065
66 struct vf_start_ramrod_data vf_start;
Yuval Mintz0b55e272016-05-11 16:36:15 +030067 struct vf_stop_ramrod_data vf_stop;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020068};
69
70#define EQ_MAX_CREDIT 0xffffffff
71
72enum spq_priority {
73 QED_SPQ_PRIORITY_NORMAL,
74 QED_SPQ_PRIORITY_HIGH,
75};
76
77union qed_spq_req_comp {
78 struct qed_spq_comp_cb cb;
79 u64 *done_addr;
80};
81
82struct qed_spq_comp_done {
83 u64 done;
84 u8 fw_return_code;
85};
86
87struct qed_spq_entry {
88 struct list_head list;
89
90 u8 flags;
91
92 /* HSI slow path element */
93 struct slow_path_element elem;
94
95 union ramrod_data ramrod;
96
97 enum spq_priority priority;
98
99 /* pending queue for this entry */
100 struct list_head *queue;
101
102 enum spq_mode comp_mode;
103 struct qed_spq_comp_cb comp_cb;
104 struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
105};
106
107struct qed_eq {
108 struct qed_chain chain;
109 u8 eq_sb_index; /* index within the SB */
110 __le16 *p_fw_cons; /* ptr to index value */
111};
112
113struct qed_consq {
114 struct qed_chain chain;
115};
116
117struct qed_spq {
118 spinlock_t lock; /* SPQ lock */
119
120 struct list_head unlimited_pending;
121 struct list_head pending;
122 struct list_head completion_pending;
123 struct list_head free_pool;
124
125 struct qed_chain chain;
126
127 /* allocated dma-able memory for spq entries (+ramrod data) */
128 dma_addr_t p_phys;
129 struct qed_spq_entry *p_virt;
130
Tomer Tayar76a9a362015-12-07 06:25:57 -0500131#define SPQ_RING_SIZE \
132 (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
133
134 /* Bitmap for handling out-of-order completions */
135 DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
136 u8 comp_bitmap_idx;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200137
138 /* Statistics */
139 u32 unlimited_pending_count;
140 u32 normal_count;
141 u32 high_count;
142 u32 comp_sent_count;
143 u32 comp_count;
144
145 u32 cid;
146};
147
148/**
149 * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
150 * Pends it to the future list.
151 *
152 * @param p_hwfn
153 * @param p_req
154 *
155 * @return int
156 */
157int qed_spq_post(struct qed_hwfn *p_hwfn,
158 struct qed_spq_entry *p_ent,
159 u8 *fw_return_code);
160
161/**
162 * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
163 *
164 * @param p_hwfn
165 *
166 * @return int
167 */
168int qed_spq_alloc(struct qed_hwfn *p_hwfn);
169
170/**
171 * @brief qed_spq_setup - Reset the SPQ to its start state.
172 *
173 * @param p_hwfn
174 */
175void qed_spq_setup(struct qed_hwfn *p_hwfn);
176
177/**
178 * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
179 *
180 * @param p_hwfn
181 */
182void qed_spq_free(struct qed_hwfn *p_hwfn);
183
184/**
185 * @brief qed_spq_get_entry - Obtain an entrry from the spq
186 * free pool list.
187 *
188 *
189 *
190 * @param p_hwfn
191 * @param pp_ent
192 *
193 * @return int
194 */
195int
196qed_spq_get_entry(struct qed_hwfn *p_hwfn,
197 struct qed_spq_entry **pp_ent);
198
199/**
200 * @brief qed_spq_return_entry - Return an entry to spq free
201 * pool list
202 *
203 * @param p_hwfn
204 * @param p_ent
205 */
206void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
207 struct qed_spq_entry *p_ent);
208/**
209 * @brief qed_eq_allocate - Allocates & initializes an EQ struct
210 *
211 * @param p_hwfn
212 * @param num_elem number of elements in the eq
213 *
214 * @return struct qed_eq* - a newly allocated structure; NULL upon error.
215 */
216struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
217 u16 num_elem);
218
219/**
220 * @brief qed_eq_setup - Reset the SPQ to its start state.
221 *
222 * @param p_hwfn
223 * @param p_eq
224 */
225void qed_eq_setup(struct qed_hwfn *p_hwfn,
226 struct qed_eq *p_eq);
227
228/**
229 * @brief qed_eq_deallocate - deallocates the given EQ struct.
230 *
231 * @param p_hwfn
232 * @param p_eq
233 */
234void qed_eq_free(struct qed_hwfn *p_hwfn,
235 struct qed_eq *p_eq);
236
237/**
238 * @brief qed_eq_prod_update - update the FW with default EQ producer
239 *
240 * @param p_hwfn
241 * @param prod
242 */
243void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
244 u16 prod);
245
246/**
247 * @brief qed_eq_completion - Completes currently pending EQ elements
248 *
249 * @param p_hwfn
250 * @param cookie
251 *
252 * @return int
253 */
254int qed_eq_completion(struct qed_hwfn *p_hwfn,
255 void *cookie);
256
257/**
258 * @brief qed_spq_completion - Completes a single event
259 *
260 * @param p_hwfn
261 * @param echo - echo value from cookie (used for determining completion)
262 * @param p_data - data from cookie (used in callback function if applicable)
263 *
264 * @return int
265 */
266int qed_spq_completion(struct qed_hwfn *p_hwfn,
267 __le16 echo,
268 u8 fw_return_code,
269 union event_ring_data *p_data);
270
271/**
272 * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
273 *
274 * @param p_hwfn
275 *
276 * @return u32 - SPQ CID
277 */
278u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
279
280/**
281 * @brief qed_consq_alloc - Allocates & initializes an ConsQ
282 * struct
283 *
284 * @param p_hwfn
285 *
286 * @return struct qed_eq* - a newly allocated structure; NULL upon error.
287 */
288struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
289
290/**
291 * @brief qed_consq_setup - Reset the ConsQ to its start
292 * state.
293 *
294 * @param p_hwfn
295 * @param p_eq
296 */
297void qed_consq_setup(struct qed_hwfn *p_hwfn,
298 struct qed_consq *p_consq);
299
300/**
301 * @brief qed_consq_free - deallocates the given ConsQ struct.
302 *
303 * @param p_hwfn
304 * @param p_eq
305 */
306void qed_consq_free(struct qed_hwfn *p_hwfn,
307 struct qed_consq *p_consq);
308
309/**
310 * @file
311 *
312 * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
313 */
314
315#define QED_SP_EQ_COMPLETION 0x01
316#define QED_SP_CQE_COMPLETION 0x02
317
Yuval Mintz06f56b82016-02-21 11:40:09 +0200318struct qed_sp_init_data {
319 u32 cid;
320 u16 opaque_fid;
321
322 /* Information regarding operation upon sending & completion */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200323 enum spq_mode comp_mode;
324 struct qed_spq_comp_cb *p_comp_data;
325};
326
327int qed_sp_init_request(struct qed_hwfn *p_hwfn,
328 struct qed_spq_entry **pp_ent,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200329 u8 cmd,
330 u8 protocol,
Yuval Mintz06f56b82016-02-21 11:40:09 +0200331 struct qed_sp_init_data *p_data);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200332
333/**
334 * @brief qed_sp_pf_start - PF Function Start Ramrod
335 *
336 * This ramrod is sent to initialize a physical function (PF). It will
337 * configure the function related parameters and write its completion to the
338 * event ring specified in the parameters.
339 *
340 * Ramrods complete on the common event ring for the PF. This ring is
341 * allocated by the driver on host memory and its parameters are written
342 * to the internal RAM of the UStorm by the Function Start Ramrod.
343 *
344 * @param p_hwfn
Manish Chopra464f6642016-04-14 01:38:29 -0400345 * @param p_tunn
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200346 * @param mode
Yuval Mintz831bfb0e2016-05-11 16:36:25 +0300347 * @param allow_npar_tx_switch
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200348 *
349 * @return int
350 */
351
352int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
Manish Chopra464f6642016-04-14 01:38:29 -0400353 struct qed_tunn_start_params *p_tunn,
Yuval Mintz831bfb0e2016-05-11 16:36:25 +0300354 enum qed_mf_mode mode, bool allow_npar_tx_switch);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200355
356/**
357 * @brief qed_sp_pf_stop - PF Function Stop Ramrod
358 *
359 * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
360 * sent and the last completion written to the PFs Event Ring. This ramrod also
361 * deletes the context for the Slowhwfn connection on this PF.
362 *
363 * @note Not required for first packet.
364 *
365 * @param p_hwfn
366 *
367 * @return int
368 */
369
370int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
371
Manish Chopra464f6642016-04-14 01:38:29 -0400372int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
373 struct qed_tunn_update_params *p_tunn,
374 enum spq_mode comp_mode,
375 struct qed_spq_comp_cb *p_comp_data);
Sudarsana Reddy Kalluru03dc76c2016-04-28 20:20:52 -0400376/**
377 * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
378 *
379 * @param p_hwfn
380 *
381 * @return int
382 */
383
384int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
385
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200386#endif