blob: 49d452e9117457b37da97c93359f9eb5aee6a254 [file] [log] [blame]
Ariel Elior1ab44342013-01-01 05:22:23 +00001/* bnx2x_sriov.h: Broadcom Everest network driver.
2 *
3 * Copyright 2009-2012 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
18 */
19#ifndef BNX2X_SRIOV_H
20#define BNX2X_SRIOV_H
21
Ariel Elior8ca5e172013-01-01 05:22:34 +000022#include "bnx2x_vfpf.h"
23#include "bnx2x_cmn.h"
24
Ariel Elior290ca2b2013-01-01 05:22:31 +000025/* The bnx2x device structure holds vfdb structure described below.
26 * The VF array is indexed by the relative vfid.
27 */
Ariel Elior8ca5e172013-01-01 05:22:34 +000028#define BNX2X_VF_MAX_QUEUES 16
Ariel Elior8db573b2013-01-01 05:22:37 +000029#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8
30
Ariel Elior290ca2b2013-01-01 05:22:31 +000031struct bnx2x_sriov {
32 u32 first_vf_in_pf;
33
34 /* standard SRIOV capability fields, mostly for debugging */
35 int pos; /* capability position */
36 int nres; /* number of resources */
37 u32 cap; /* SR-IOV Capabilities */
38 u16 ctrl; /* SR-IOV Control */
39 u16 total; /* total VFs associated with the PF */
40 u16 initial; /* initial VFs associated with the PF */
41 u16 nr_virtfn; /* number of VFs available */
42 u16 offset; /* first VF Routing ID offset */
43 u16 stride; /* following VF stride */
44 u32 pgsz; /* page size for BAR alignment */
45 u8 link; /* Function Dependency Link */
46};
47
48/* bars */
49struct bnx2x_vf_bar {
50 u64 bar;
51 u32 size;
52};
53
54/* vf queue (used both for rx or tx) */
55struct bnx2x_vf_queue {
56 struct eth_context *cxt;
57
58 /* MACs object */
59 struct bnx2x_vlan_mac_obj mac_obj;
60
61 /* VLANs object */
62 struct bnx2x_vlan_mac_obj vlan_obj;
63 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
64
65 /* Queue Slow-path State object */
66 struct bnx2x_queue_sp_obj sp_obj;
67
68 u32 cid;
69 u16 index;
70 u16 sb_idx;
71};
72
73/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
74 * q-init, q-setup and SB index
75 */
76struct bnx2x_vfop_qctor_params {
77 struct bnx2x_queue_state_params qstate;
78 struct bnx2x_queue_setup_params prep_qsetup;
79};
80
81/* VFOP parameters (one copy per VF) */
82union bnx2x_vfop_params {
83 struct bnx2x_vlan_mac_ramrod_params vlan_mac;
84 struct bnx2x_rx_mode_ramrod_params rx_mode;
85 struct bnx2x_mcast_ramrod_params mcast;
86 struct bnx2x_config_rss_params rss;
87 struct bnx2x_vfop_qctor_params qctor;
88};
89
90/* forward */
91struct bnx2x_virtf;
Ariel Eliorfd1fc792013-01-01 05:22:33 +000092
93/* VFOP definitions */
94typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
95
Ariel Elior8db573b2013-01-01 05:22:37 +000096struct bnx2x_vfop_cmd {
97 vfop_handler_t done;
98 bool block;
99};
100
Ariel Eliorfd1fc792013-01-01 05:22:33 +0000101/* VFOP queue filters command additional arguments */
102struct bnx2x_vfop_filter {
103 struct list_head link;
104 int type;
105#define BNX2X_VFOP_FILTER_MAC 1
106#define BNX2X_VFOP_FILTER_VLAN 2
107
108 bool add;
109 u8 *mac;
110 u16 vid;
111};
112
113struct bnx2x_vfop_filters {
114 int add_cnt;
115 struct list_head head;
116 struct bnx2x_vfop_filter filters[];
117};
118
119/* transient list allocated, built and saved until its
120 * passed to the SP-VERBs layer.
121 */
122struct bnx2x_vfop_args_mcast {
123 int mc_num;
124 struct bnx2x_mcast_list_elem *mc;
125};
126
127struct bnx2x_vfop_args_qctor {
128 int qid;
129 u16 sb_idx;
130};
131
132struct bnx2x_vfop_args_qdtor {
133 int qid;
134 struct eth_context *cxt;
135};
136
137struct bnx2x_vfop_args_defvlan {
138 int qid;
139 bool enable;
140 u16 vid;
141 u8 prio;
142};
143
144struct bnx2x_vfop_args_qx {
145 int qid;
146 bool en_add;
147};
148
149struct bnx2x_vfop_args_filters {
150 struct bnx2x_vfop_filters *multi_filter;
151 atomic_t *credit; /* non NULL means 'don't consume credit' */
152};
153
154union bnx2x_vfop_args {
155 struct bnx2x_vfop_args_mcast mc_list;
156 struct bnx2x_vfop_args_qctor qctor;
157 struct bnx2x_vfop_args_qdtor qdtor;
158 struct bnx2x_vfop_args_defvlan defvlan;
159 struct bnx2x_vfop_args_qx qx;
160 struct bnx2x_vfop_args_filters filters;
161};
162
163struct bnx2x_vfop {
164 struct list_head link;
165 int rc; /* return code */
166 int state; /* next state */
167 union bnx2x_vfop_args args; /* extra arguments */
168 union bnx2x_vfop_params *op_p; /* ramrod params */
169
170 /* state machine callbacks */
171 vfop_handler_t transition;
172 vfop_handler_t done;
173};
174
Ariel Elior290ca2b2013-01-01 05:22:31 +0000175/* vf context */
176struct bnx2x_virtf {
177 u16 cfg_flags;
178#define VF_CFG_STATS 0x0001
179#define VF_CFG_FW_FC 0x0002
180#define VF_CFG_TPA 0x0004
181#define VF_CFG_INT_SIMD 0x0008
182#define VF_CACHE_LINE 0x0010
183
184 u8 state;
185#define VF_FREE 0 /* VF ready to be acquired holds no resc */
186#define VF_ACQUIRED 1 /* VF aquired, but not initalized */
187#define VF_ENABLED 2 /* VF Enabled */
188#define VF_RESET 3 /* VF FLR'd, pending cleanup */
189
190 /* non 0 during flr cleanup */
191 u8 flr_clnup_stage;
192#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup'
193 * sans the end-wait
194 */
195#define VF_FLR_ACK 2 /* ACK flr notification */
196#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW
197 * ~ final cleanup' end wait
198 */
199
200 /* dma */
201 dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
202 dma_addr_t spq_map;
203 dma_addr_t bulletin_map;
204
205 /* Allocated resources counters. Before the VF is acquired, the
206 * counters hold the following values:
207 *
208 * - xxq_count = 0 as the queues memory is not allocated yet.
209 *
210 * - sb_count = The number of status blocks configured for this VF in
211 * the IGU CAM. Initially read during probe.
212 *
213 * - xx_rules_count = The number of rules statically and equally
214 * allocated for each VF, during PF load.
215 */
216 struct vf_pf_resc_request alloc_resc;
217#define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs)
218#define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs)
219#define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs)
220#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
221#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
222#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
223
224 u8 sb_count; /* actual number of SBs */
225 u8 igu_base_id; /* base igu status block id */
226
227 struct bnx2x_vf_queue *vfqs;
228#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
229
230 u8 index; /* index in the vf array */
231 u8 abs_vfid;
232 u8 sp_cl_id;
233 u32 error; /* 0 means all's-well */
234
235 /* BDF */
236 unsigned int bus;
237 unsigned int devfn;
238
239 /* bars */
240 struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
241
242 /* set-mac ramrod state 1-pending, 0-done */
243 unsigned long filter_state;
244
245 /* leading rss client id ~~ the client id of the first rxq, must be
246 * set for each txq.
247 */
248 int leading_rss;
249
250 /* MCAST object */
251 struct bnx2x_mcast_obj mcast_obj;
252
253 /* RSS configuration object */
254 struct bnx2x_rss_config_obj rss_conf_obj;
255
256 /* slow-path operations */
257 atomic_t op_in_progress;
258 int op_rc;
259 bool op_wait_blocking;
260 struct list_head op_list_head;
261 union bnx2x_vfop_params op_params;
262 struct mutex op_mutex; /* one vfop at a time mutex */
263 enum channel_tlvs op_current;
264};
265
266#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
267
268#define for_each_vf(bp, var) \
269 for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++)
270
Ariel Elior8ca5e172013-01-01 05:22:34 +0000271#define for_each_vfq(vf, var) \
272 for ((var) = 0; (var) < vf_rxq_count(vf); (var)++)
273
274#define for_each_vf_sb(vf, var) \
275 for ((var) = 0; (var) < vf_sb_count(vf); (var)++)
276
Ariel Eliorb93288d2013-01-01 05:22:35 +0000277#define is_vf_multi(vf) (vf_rxq_count(vf) > 1)
278
Ariel Eliorb56e9672013-01-01 05:22:32 +0000279#define HW_VF_HANDLE(bp, abs_vfid) \
280 (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4))
281
282#define FW_PF_MAX_HANDLE 8
283
284#define FW_VF_HANDLE(abs_vfid) \
285 (abs_vfid + FW_PF_MAX_HANDLE)
286
Ariel Elior8ca5e172013-01-01 05:22:34 +0000287/* locking and unlocking the channel mutex */
288void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
289 enum channel_tlvs tlv);
290
291void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
292 enum channel_tlvs expected_tlv);
293
Ariel Eliorb56e9672013-01-01 05:22:32 +0000294/* VF mail box (aka vf-pf channel) */
295
296/* a container for the bi-directional vf<-->pf messages.
297 * The actual response will be placed according to the offset parameter
298 * provided in the request
299 */
300
301#define MBX_MSG_ALIGN 8
302#define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \
303 MBX_MSG_ALIGN))
304
Ariel Elior1ab44342013-01-01 05:22:23 +0000305struct bnx2x_vf_mbx_msg {
306 union vfpf_tlvs req;
307 union pfvf_tlvs resp;
308};
309
Ariel Elior290ca2b2013-01-01 05:22:31 +0000310struct bnx2x_vf_mbx {
311 struct bnx2x_vf_mbx_msg *msg;
312 dma_addr_t msg_mapping;
313
314 /* VF GPA address */
315 u32 vf_addr_lo;
316 u32 vf_addr_hi;
317
318 struct vfpf_first_tlv first_tlv; /* saved VF request header */
319
320 u8 flags;
321#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
322 * more then one pending msg
323 */
324};
325
Ariel Eliorb56e9672013-01-01 05:22:32 +0000326struct bnx2x_vf_sp {
327 union {
328 struct eth_classify_rules_ramrod_data e2;
329 } mac_rdata;
330
331 union {
332 struct eth_classify_rules_ramrod_data e2;
333 } vlan_rdata;
334
335 union {
336 struct eth_filter_rules_ramrod_data e2;
337 } rx_mode_rdata;
338
339 union {
340 struct eth_multicast_rules_ramrod_data e2;
341 } mcast_rdata;
342
343 union {
344 struct client_init_ramrod_data init_data;
345 struct client_update_ramrod_data update_data;
346 } q_data;
347};
348
Ariel Elior290ca2b2013-01-01 05:22:31 +0000349struct hw_dma {
350 void *addr;
351 dma_addr_t mapping;
352 size_t size;
353};
354
355struct bnx2x_vfdb {
356#define BP_VFDB(bp) ((bp)->vfdb)
357 /* vf array */
358 struct bnx2x_virtf *vfs;
359#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[(idx)]))
360#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var)
361
362 /* queue array - for all vfs */
363 struct bnx2x_vf_queue *vfqs;
364
365 /* vf HW contexts */
366 struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS];
367#define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[(i)])
368
369 /* SR-IOV information */
370 struct bnx2x_sriov sriov;
371 struct hw_dma mbx_dma;
372#define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma))
373 struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS];
374#define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[(vfid)]))
375
376 struct hw_dma sp_dma;
377#define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \
378 (vf)->index * sizeof(struct bnx2x_vf_sp) + \
379 offsetof(struct bnx2x_vf_sp, field))
380#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \
381 (vf)->index * sizeof(struct bnx2x_vf_sp) + \
382 offsetof(struct bnx2x_vf_sp, field))
383
384#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
385 u32 flrd_vfs[FLRD_VFS_DWORDS];
386};
387
Ariel Eliorfd1fc792013-01-01 05:22:33 +0000388/* queue access */
389static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
390{
391 return &(vf->vfqs[index]);
392}
393
Ariel Elior8ca5e172013-01-01 05:22:34 +0000394static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
395{
396 return (vfq->index == 0);
397}
398
399/* FW ids */
Ariel Eliorb56e9672013-01-01 05:22:32 +0000400static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
401{
402 return vf->igu_base_id + sb_idx;
403}
404
Ariel Elior8ca5e172013-01-01 05:22:34 +0000405static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx)
406{
407 return vf_igu_sb(vf, sb_idx);
408}
409
410static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
411{
412 return vf->igu_base_id + q->index;
413}
414
Ariel Elior8db573b2013-01-01 05:22:37 +0000415static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
416{
417 return vfq_cl_id(vf, q);
418}
419
Ariel Elior8ca5e172013-01-01 05:22:34 +0000420static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
421{
422 return vfq_cl_id(vf, q);
423}
424
Ariel Elior290ca2b2013-01-01 05:22:31 +0000425/* global iov routines */
426int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line);
427int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param);
428void bnx2x_iov_remove_one(struct bnx2x *bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +0000429void bnx2x_iov_free_mem(struct bnx2x *bp);
430int bnx2x_iov_alloc_mem(struct bnx2x *bp);
431int bnx2x_iov_nic_init(struct bnx2x *bp);
432void bnx2x_iov_init_dq(struct bnx2x *bp);
433void bnx2x_iov_init_dmae(struct bnx2x *bp);
Ariel Eliorfd1fc792013-01-01 05:22:33 +0000434void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
435 struct bnx2x_queue_sp_obj **q_obj);
436void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);
437int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
Ariel Elior67c431a2013-01-01 05:22:36 +0000438void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
439void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
Ariel Eliorfd1fc792013-01-01 05:22:33 +0000440void bnx2x_iov_sp_task(struct bnx2x *bp);
441/* global vf mailbox routines */
442void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event);
Ariel Eliorb56e9672013-01-01 05:22:32 +0000443void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000444/* acquire */
445int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
Ariel Eliorb93288d2013-01-01 05:22:35 +0000446 struct vf_pf_resc_request *resc);
447/* init */
448int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
449 dma_addr_t *sb_map);
Ariel Elior8db573b2013-01-01 05:22:37 +0000450
451/* VFOP generic helpers */
452#define bnx2x_vfop_default(state) do { \
453 BNX2X_ERR("Bad state %d\n", (state)); \
454 vfop->rc = -EINVAL; \
455 goto op_err; \
456 } while (0)
457
458enum {
459 VFOP_DONE,
460 VFOP_CONT,
461 VFOP_VERIFY_PEND,
462};
463
464#define bnx2x_vfop_finalize(vf, rc, next) do { \
465 if ((rc) < 0) \
466 goto op_err; \
467 else if ((rc) > 0) \
468 goto op_pending; \
469 else if ((next) == VFOP_DONE) \
470 goto op_done; \
471 else if ((next) == VFOP_VERIFY_PEND) \
472 BNX2X_ERR("expected pending\n"); \
473 else { \
474 DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n"); \
475 atomic_set(&vf->op_in_progress, 1); \
476 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
477 return; \
478 } \
479 } while (0)
480
481#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
482 do { \
483 vfop->state = first_state; \
484 vfop->op_p = &vf->op_params; \
485 vfop->transition = trans_hndlr; \
486 vfop->done = done_hndlr; \
487 } while (0)
488
Ariel Eliorfd1fc792013-01-01 05:22:33 +0000489static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
490 struct bnx2x_virtf *vf)
491{
492 WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
493 WARN_ON(list_empty(&vf->op_list_head));
494 return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
495}
496
Ariel Elior8db573b2013-01-01 05:22:37 +0000497static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
498 struct bnx2x_virtf *vf)
499{
500 struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
501
502 WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
503 if (vfop) {
504 INIT_LIST_HEAD(&vfop->link);
505 list_add(&vfop->link, &vf->op_list_head);
506 }
507 return vfop;
508}
509
510static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
511 struct bnx2x_vfop *vfop)
512{
513 /* rc < 0 - error, otherwise set to 0 */
514 DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
515 if (vfop->rc >= 0)
516 vfop->rc = 0;
517 DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
518
519 /* unlink the current op context and propagate error code
520 * must be done before invoking the 'done()' handler
521 */
522 WARN(!mutex_is_locked(&vf->op_mutex),
523 "about to access vf op linked list but mutex was not locked!");
524 list_del(&vfop->link);
525
526 if (list_empty(&vf->op_list_head)) {
527 DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
528 vf->op_rc = vfop->rc;
529 DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
530 vf->op_rc, vfop->rc);
531 } else {
532 struct bnx2x_vfop *cur_vfop;
533
534 DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
535 cur_vfop = bnx2x_vfop_cur(bp, vf);
536 cur_vfop->rc = vfop->rc;
537 DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
538 vf->op_rc, vfop->rc);
539 }
540
541 /* invoke done handler */
542 if (vfop->done) {
543 DP(BNX2X_MSG_IOV, "calling done handler\n");
544 vfop->done(bp, vf);
545 }
546
547 DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
548 vf->op_rc, vfop->rc);
549
550 /* if this is the last nested op reset the wait_blocking flag
551 * to release any blocking wrappers, only after 'done()' is invoked
552 */
553 if (list_empty(&vf->op_list_head)) {
554 DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
555 vf->op_wait_blocking = false;
556 }
557
558 kfree(vfop);
559}
560
561static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
562 struct bnx2x_virtf *vf)
563{
564 /* can take a while if any port is running */
565 int cnt = 5000;
566
567 might_sleep();
568 while (cnt--) {
569 if (vf->op_wait_blocking == false) {
570#ifdef BNX2X_STOP_ON_ERROR
571 DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
572#endif
573 return 0;
574 }
575 usleep_range(1000, 2000);
576
577 if (bp->panic)
578 return -EIO;
579 }
580
581 /* timeout! */
582#ifdef BNX2X_STOP_ON_ERROR
583 bnx2x_panic();
584#endif
585
586 return -EBUSY;
587}
588
589static inline int bnx2x_vfop_transition(struct bnx2x *bp,
590 struct bnx2x_virtf *vf,
591 vfop_handler_t transition,
592 bool block)
593{
594 if (block)
595 vf->op_wait_blocking = true;
596 transition(bp, vf);
597 if (block)
598 return bnx2x_vfop_wait_blocking(bp, vf);
599 return 0;
600}
601
602/* VFOP queue construction helpers */
603void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
604 struct bnx2x_queue_init_params *init_params,
605 struct bnx2x_queue_setup_params *setup_params,
606 u16 q_idx, u16 sb_idx);
607
608void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
609 struct bnx2x_queue_init_params *init_params,
610 struct bnx2x_queue_setup_params *setup_params,
611 u16 q_idx, u16 sb_idx);
612
613void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
614 struct bnx2x_virtf *vf,
615 struct bnx2x_vf_queue *q,
616 struct bnx2x_vfop_qctor_params *p,
617 unsigned long q_type);
618int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
619 struct bnx2x_virtf *vf,
620 struct bnx2x_vfop_cmd *cmd,
621 int qid);
622
Ariel Elior290ca2b2013-01-01 05:22:31 +0000623int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000624u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
Ariel Eliorb56e9672013-01-01 05:22:32 +0000625/* VF FLR helpers */
626int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid);
627void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
Ariel Eliorbe1f1ffa2013-01-01 05:22:24 +0000628void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
629 u16 length);
630void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
631 u16 type, u16 length);
632void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
Ariel Eliorfd1fc792013-01-01 05:22:33 +0000633
634bool bnx2x_tlv_supported(u16 tlvtype);
635
Ariel Elior1ab44342013-01-01 05:22:23 +0000636#endif /* bnx2x_sriov.h */