blob: cb342f16c13794e24af55e007ddc57919a42c9db [file] [log] [blame]
Arun Easi1e128c82017-02-15 06:28:22 -08001/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <asm/param.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/interrupt.h>
39#include <linux/kernel.h>
40#include <linux/log2.h>
41#include <linux/module.h>
42#include <linux/pci.h>
43#include <linux/slab.h>
44#include <linux/stddef.h>
45#include <linux/string.h>
Arun Easi1e128c82017-02-15 06:28:22 -080046#include <linux/workqueue.h>
47#include <linux/errno.h>
48#include <linux/list.h>
49#include <linux/spinlock.h>
50#define __PREVENT_DUMP_MEM_ARR__
51#define __PREVENT_PXP_GLOBAL_WIN__
52#include "qed.h"
53#include "qed_cxt.h"
54#include "qed_dev_api.h"
55#include "qed_fcoe.h"
56#include "qed_hsi.h"
57#include "qed_hw.h"
58#include "qed_int.h"
59#include "qed_ll2.h"
60#include "qed_mcp.h"
61#include "qed_reg_addr.h"
62#include "qed_sp.h"
63#include "qed_sriov.h"
64#include <linux/qed/qed_fcoe_if.h>
65
66struct qed_fcoe_conn {
67 struct list_head list_entry;
68 bool free_on_delete;
69
70 u16 conn_id;
71 u32 icid;
72 u32 fw_cid;
73 u8 layer_code;
74
75 dma_addr_t sq_pbl_addr;
76 dma_addr_t sq_curr_page_addr;
77 dma_addr_t sq_next_page_addr;
78 dma_addr_t xferq_pbl_addr;
79 void *xferq_pbl_addr_virt_addr;
80 dma_addr_t xferq_addr[4];
81 void *xferq_addr_virt_addr[4];
82 dma_addr_t confq_pbl_addr;
83 void *confq_pbl_addr_virt_addr;
84 dma_addr_t confq_addr[2];
85 void *confq_addr_virt_addr[2];
86
87 dma_addr_t terminate_params;
88
89 u16 dst_mac_addr_lo;
90 u16 dst_mac_addr_mid;
91 u16 dst_mac_addr_hi;
92 u16 src_mac_addr_lo;
93 u16 src_mac_addr_mid;
94 u16 src_mac_addr_hi;
95
96 u16 tx_max_fc_pay_len;
97 u16 e_d_tov_timer_val;
98 u16 rec_tov_timer_val;
99 u16 rx_max_fc_pay_len;
100 u16 vlan_tag;
101 u16 physical_q0;
102
103 struct fc_addr_nw s_id;
104 u8 max_conc_seqs_c3;
105 struct fc_addr_nw d_id;
106 u8 flags;
107 u8 def_q_idx;
108};
109
110static int
111qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
112 enum spq_mode comp_mode,
113 struct qed_spq_comp_cb *p_comp_addr)
114{
115 struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
116 struct fcoe_init_ramrod_params *p_ramrod = NULL;
117 struct fcoe_init_func_ramrod_data *p_data;
118 struct fcoe_conn_context *p_cxt = NULL;
119 struct qed_spq_entry *p_ent = NULL;
120 struct qed_sp_init_data init_data;
121 struct qed_cxt_info cxt_info;
122 u32 dummy_cid;
123 int rc = 0;
124 u16 tmp;
125 u8 i;
126
127 /* Get SPQ entry */
128 memset(&init_data, 0, sizeof(init_data));
129 init_data.cid = qed_spq_get_cid(p_hwfn);
130 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
131 init_data.comp_mode = comp_mode;
132 init_data.p_comp_data = p_comp_addr;
133
134 rc = qed_sp_init_request(p_hwfn, &p_ent,
135 FCOE_RAMROD_CMD_ID_INIT_FUNC,
136 PROTOCOLID_FCOE, &init_data);
137 if (rc)
138 return rc;
139
140 p_ramrod = &p_ent->ramrod.fcoe_init;
141 p_data = &p_ramrod->init_ramrod_data;
142 fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
143
144 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
145 tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
146 p_data->sq_num_pages_in_pbl = tmp;
147
148 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
149 if (rc)
150 return rc;
151
152 cxt_info.iid = dummy_cid;
153 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
154 if (rc) {
155 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
156 dummy_cid);
157 return rc;
158 }
159 p_cxt = cxt_info.p_cxt;
160 SET_FIELD(p_cxt->tstorm_ag_context.flags3,
161 TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
162
163 fcoe_pf_params->dummy_icid = (u16)dummy_cid;
164
165 tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
166 p_data->func_params.num_tasks = tmp;
167 p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
168 p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
169
170 DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
171 fcoe_pf_params->glbl_q_params_addr);
172
173 tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
174 p_data->q_params.cq_num_entries = tmp;
175
176 tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
177 p_data->q_params.cmdq_num_entries = tmp;
178
179 tmp = fcoe_pf_params->num_cqs;
180 p_data->q_params.num_queues = (u8)tmp;
181
182 tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
183 p_data->q_params.queue_relative_offset = (u8)tmp;
184
185 for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
186 tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id);
187 p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
188 }
189
190 p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
191 p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
192
Mintz, Yuvald0d40a72017-03-28 15:12:56 +0300193 p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
Arun Easi1e128c82017-02-15 06:28:22 -0800194
195 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
196 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
197 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
198 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
199 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
200 p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
201 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
202 p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
203
204 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
205 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
206 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
207 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
208 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
209 p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
210 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
211 p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
212 tmp = fcoe_pf_params->rq_buffer_size;
213 p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
214
215 if (fcoe_pf_params->is_target) {
216 SET_FIELD(p_data->q_params.q_validity,
217 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
218 if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
219 SET_FIELD(p_data->q_params.q_validity,
220 SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
221 SET_FIELD(p_data->q_params.q_validity,
222 SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
223 } else {
224 SET_FIELD(p_data->q_params.q_validity,
225 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
226 }
227
228 rc = qed_spq_post(p_hwfn, p_ent, NULL);
229
230 return rc;
231}
232
233static int
234qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
235 struct qed_fcoe_conn *p_conn,
236 enum spq_mode comp_mode,
237 struct qed_spq_comp_cb *p_comp_addr)
238{
239 struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
240 struct fcoe_conn_offload_ramrod_data *p_data;
241 struct qed_spq_entry *p_ent = NULL;
242 struct qed_sp_init_data init_data;
Ariel Eliorb5a9ee72017-04-03 12:21:09 +0300243 u16 physical_q0, tmp;
Arun Easi1e128c82017-02-15 06:28:22 -0800244 int rc;
245
246 /* Get SPQ entry */
247 memset(&init_data, 0, sizeof(init_data));
248 init_data.cid = p_conn->icid;
249 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
250 init_data.comp_mode = comp_mode;
251 init_data.p_comp_data = p_comp_addr;
252
253 rc = qed_sp_init_request(p_hwfn, &p_ent,
254 FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
255 PROTOCOLID_FCOE, &init_data);
256 if (rc)
257 return rc;
258
259 p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
260 p_data = &p_ramrod->offload_ramrod_data;
261
262 /* Transmission PQ is the first of the PF */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +0300263 physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
264 p_conn->physical_q0 = cpu_to_le16(physical_q0);
265 p_data->physical_q0 = cpu_to_le16(physical_q0);
Arun Easi1e128c82017-02-15 06:28:22 -0800266
267 p_data->conn_id = cpu_to_le16(p_conn->conn_id);
268 DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
269 DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
270 DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
271 DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
272 DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
273 DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
274
275 DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
276 DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
277 DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
278
279 p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
280 p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
281 p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
282 p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
283 p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
284 p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
285
286 tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
287 p_data->tx_max_fc_pay_len = tmp;
288 tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
289 p_data->e_d_tov_timer_val = tmp;
290 tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
291 p_data->rec_rr_tov_timer_val = tmp;
292 tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
293 p_data->rx_max_fc_pay_len = tmp;
294
295 p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
296 p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
297 p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
298 p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
299 p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
300 p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
301 p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
302 p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
303 p_data->flags = p_conn->flags;
304 p_data->def_q_idx = p_conn->def_q_idx;
305
306 return qed_spq_post(p_hwfn, p_ent, NULL);
307}
308
309static int
310qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
311 struct qed_fcoe_conn *p_conn,
312 enum spq_mode comp_mode,
313 struct qed_spq_comp_cb *p_comp_addr)
314{
315 struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
316 struct qed_spq_entry *p_ent = NULL;
317 struct qed_sp_init_data init_data;
318 int rc = 0;
319
320 /* Get SPQ entry */
321 memset(&init_data, 0, sizeof(init_data));
322 init_data.cid = p_conn->icid;
323 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
324 init_data.comp_mode = comp_mode;
325 init_data.p_comp_data = p_comp_addr;
326
327 rc = qed_sp_init_request(p_hwfn, &p_ent,
328 FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
329 PROTOCOLID_FCOE, &init_data);
330 if (rc)
331 return rc;
332
333 p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
334 DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
335 p_conn->terminate_params);
336
337 return qed_spq_post(p_hwfn, p_ent, NULL);
338}
339
340static int
341qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
Rahul Verma15582962017-04-06 15:58:29 +0300342 struct qed_ptt *p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -0800343 enum spq_mode comp_mode,
344 struct qed_spq_comp_cb *p_comp_addr)
345{
Arun Easi1e128c82017-02-15 06:28:22 -0800346 struct qed_spq_entry *p_ent = NULL;
347 struct qed_sp_init_data init_data;
348 u32 active_segs = 0;
349 int rc = 0;
350
351 /* Get SPQ entry */
352 memset(&init_data, 0, sizeof(init_data));
353 init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
354 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
355 init_data.comp_mode = comp_mode;
356 init_data.p_comp_data = p_comp_addr;
357
358 rc = qed_sp_init_request(p_hwfn, &p_ent,
359 FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
360 PROTOCOLID_FCOE, &init_data);
361 if (rc)
362 return rc;
363
364 active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
365 active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
366 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
367
368 return qed_spq_post(p_hwfn, p_ent, NULL);
369}
370
371static int
372qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
373 struct qed_fcoe_conn **p_out_conn)
374{
375 struct qed_fcoe_conn *p_conn = NULL;
376 void *p_addr;
377 u32 i;
378
379 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
380 if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
381 p_conn =
382 list_first_entry(&p_hwfn->p_fcoe_info->free_list,
383 struct qed_fcoe_conn, list_entry);
384 if (p_conn) {
385 list_del(&p_conn->list_entry);
386 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
387 *p_out_conn = p_conn;
388 return 0;
389 }
390 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
391
392 p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
393 if (!p_conn)
394 return -ENOMEM;
395
396 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
397 QED_CHAIN_PAGE_SIZE,
398 &p_conn->xferq_pbl_addr, GFP_KERNEL);
399 if (!p_addr)
400 goto nomem_pbl_xferq;
401 p_conn->xferq_pbl_addr_virt_addr = p_addr;
402
403 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
404 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
405 QED_CHAIN_PAGE_SIZE,
406 &p_conn->xferq_addr[i], GFP_KERNEL);
407 if (!p_addr)
408 goto nomem_xferq;
409 p_conn->xferq_addr_virt_addr[i] = p_addr;
410
411 p_addr = p_conn->xferq_pbl_addr_virt_addr;
412 ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
413 }
414
415 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
416 QED_CHAIN_PAGE_SIZE,
417 &p_conn->confq_pbl_addr, GFP_KERNEL);
418 if (!p_addr)
419 goto nomem_xferq;
420 p_conn->confq_pbl_addr_virt_addr = p_addr;
421
422 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
423 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
424 QED_CHAIN_PAGE_SIZE,
425 &p_conn->confq_addr[i], GFP_KERNEL);
426 if (!p_addr)
427 goto nomem_confq;
428 p_conn->confq_addr_virt_addr[i] = p_addr;
429
430 p_addr = p_conn->confq_pbl_addr_virt_addr;
431 ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
432 }
433
434 p_conn->free_on_delete = true;
435 *p_out_conn = p_conn;
436 return 0;
437
438nomem_confq:
439 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
440 QED_CHAIN_PAGE_SIZE,
441 p_conn->confq_pbl_addr_virt_addr,
442 p_conn->confq_pbl_addr);
443 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
444 if (p_conn->confq_addr_virt_addr[i])
445 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
446 QED_CHAIN_PAGE_SIZE,
447 p_conn->confq_addr_virt_addr[i],
448 p_conn->confq_addr[i]);
449nomem_xferq:
450 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
451 QED_CHAIN_PAGE_SIZE,
452 p_conn->xferq_pbl_addr_virt_addr,
453 p_conn->xferq_pbl_addr);
454 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
455 if (p_conn->xferq_addr_virt_addr[i])
456 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
457 QED_CHAIN_PAGE_SIZE,
458 p_conn->xferq_addr_virt_addr[i],
459 p_conn->xferq_addr[i]);
460nomem_pbl_xferq:
461 kfree(p_conn);
462 return -ENOMEM;
463}
464
465static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
466 struct qed_fcoe_conn *p_conn)
467{
468 u32 i;
469
470 if (!p_conn)
471 return;
472
473 if (p_conn->confq_pbl_addr_virt_addr)
474 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
475 QED_CHAIN_PAGE_SIZE,
476 p_conn->confq_pbl_addr_virt_addr,
477 p_conn->confq_pbl_addr);
478
479 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
480 if (!p_conn->confq_addr_virt_addr[i])
481 continue;
482 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
483 QED_CHAIN_PAGE_SIZE,
484 p_conn->confq_addr_virt_addr[i],
485 p_conn->confq_addr[i]);
486 }
487
488 if (p_conn->xferq_pbl_addr_virt_addr)
489 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
490 QED_CHAIN_PAGE_SIZE,
491 p_conn->xferq_pbl_addr_virt_addr,
492 p_conn->xferq_pbl_addr);
493
494 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
495 if (!p_conn->xferq_addr_virt_addr[i])
496 continue;
497 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
498 QED_CHAIN_PAGE_SIZE,
499 p_conn->xferq_addr_virt_addr[i],
500 p_conn->xferq_addr[i]);
501 }
502 kfree(p_conn);
503}
504
505static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
506{
507 return (u8 __iomem *)p_hwfn->doorbells +
508 qed_db_addr(cid, DQ_DEMS_LEGACY);
509}
510
511static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
512 u8 bdq_id)
513{
Mintz, Yuvald0d40a72017-03-28 15:12:56 +0300514 if (RESC_NUM(p_hwfn, QED_BDQ)) {
515 return (u8 __iomem *)p_hwfn->regview +
516 GTT_BAR0_MAP_REG_MSDM_RAM +
517 MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
518 QED_BDQ),
519 bdq_id);
520 } else {
521 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
522 return NULL;
523 }
Arun Easi1e128c82017-02-15 06:28:22 -0800524}
525
526static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
527 u8 bdq_id)
528{
Mintz, Yuvald0d40a72017-03-28 15:12:56 +0300529 if (RESC_NUM(p_hwfn, QED_BDQ)) {
530 return (u8 __iomem *)p_hwfn->regview +
531 GTT_BAR0_MAP_REG_TSDM_RAM +
532 TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
533 QED_BDQ),
534 bdq_id);
535 } else {
536 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
537 return NULL;
538 }
Arun Easi1e128c82017-02-15 06:28:22 -0800539}
540
Tomer Tayar3587cb82017-05-21 12:10:56 +0300541int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
Arun Easi1e128c82017-02-15 06:28:22 -0800542{
543 struct qed_fcoe_info *p_fcoe_info;
544
545 /* Allocate LL2's set struct */
546 p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
547 if (!p_fcoe_info) {
548 DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
Tomer Tayar3587cb82017-05-21 12:10:56 +0300549 return -ENOMEM;
Arun Easi1e128c82017-02-15 06:28:22 -0800550 }
551 INIT_LIST_HEAD(&p_fcoe_info->free_list);
Tomer Tayar3587cb82017-05-21 12:10:56 +0300552
553 p_hwfn->p_fcoe_info = p_fcoe_info;
554 return 0;
Arun Easi1e128c82017-02-15 06:28:22 -0800555}
556
Tomer Tayar3587cb82017-05-21 12:10:56 +0300557void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
Arun Easi1e128c82017-02-15 06:28:22 -0800558{
559 struct fcoe_task_context *p_task_ctx = NULL;
560 int rc;
561 u32 i;
562
Tomer Tayar3587cb82017-05-21 12:10:56 +0300563 spin_lock_init(&p_hwfn->p_fcoe_info->lock);
Arun Easi1e128c82017-02-15 06:28:22 -0800564 for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
565 rc = qed_cxt_get_task_ctx(p_hwfn, i,
566 QED_CTX_WORKING_MEM,
567 (void **)&p_task_ctx);
568 if (rc)
569 continue;
570
571 memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
572 SET_FIELD(p_task_ctx->timer_context.logical_client_0,
573 TIMERS_CONTEXT_VALIDLC0, 1);
574 SET_FIELD(p_task_ctx->timer_context.logical_client_1,
575 TIMERS_CONTEXT_VALIDLC1, 1);
576 SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
577 TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
578 }
579}
580
Tomer Tayar3587cb82017-05-21 12:10:56 +0300581void qed_fcoe_free(struct qed_hwfn *p_hwfn)
Arun Easi1e128c82017-02-15 06:28:22 -0800582{
583 struct qed_fcoe_conn *p_conn = NULL;
584
Tomer Tayar3587cb82017-05-21 12:10:56 +0300585 if (!p_hwfn->p_fcoe_info)
Arun Easi1e128c82017-02-15 06:28:22 -0800586 return;
587
Tomer Tayar3587cb82017-05-21 12:10:56 +0300588 while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
589 p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
Arun Easi1e128c82017-02-15 06:28:22 -0800590 struct qed_fcoe_conn, list_entry);
591 if (!p_conn)
592 break;
593 list_del(&p_conn->list_entry);
594 qed_fcoe_free_connection(p_hwfn, p_conn);
595 }
596
Tomer Tayar3587cb82017-05-21 12:10:56 +0300597 kfree(p_hwfn->p_fcoe_info);
598 p_hwfn->p_fcoe_info = NULL;
Arun Easi1e128c82017-02-15 06:28:22 -0800599}
600
601static int
602qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
603 struct qed_fcoe_conn *p_in_conn,
604 struct qed_fcoe_conn **p_out_conn)
605{
606 struct qed_fcoe_conn *p_conn = NULL;
607 int rc = 0;
608 u32 icid;
609
610 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
611 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
612 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
613 if (rc)
614 return rc;
615
616 /* Use input connection [if provided] or allocate a new one */
617 if (p_in_conn) {
618 p_conn = p_in_conn;
619 } else {
620 rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
621 if (rc) {
622 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
623 qed_cxt_release_cid(p_hwfn, icid);
624 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
625 return rc;
626 }
627 }
628
629 p_conn->icid = icid;
630 p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
631 *p_out_conn = p_conn;
632
633 return rc;
634}
635
636static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
637 struct qed_fcoe_conn *p_conn)
638{
639 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
640 list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
641 qed_cxt_release_cid(p_hwfn, p_conn->icid);
642 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
643}
644
645static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
646 struct qed_ptt *p_ptt,
647 struct qed_fcoe_stats *p_stats)
648{
649 struct fcoe_rx_stat tstats;
650 u32 tstats_addr;
651
652 memset(&tstats, 0, sizeof(tstats));
653 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
654 TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
655 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
656
657 p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
658 p_stats->fcoe_rx_data_pkt_cnt =
659 HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
660 p_stats->fcoe_rx_xfer_pkt_cnt =
661 HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
662 p_stats->fcoe_rx_other_pkt_cnt =
663 HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
664
665 p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
666 le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
667 p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
668 le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
669 p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
670 le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
671 p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
672 le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
673 p_stats->fcoe_silent_drop_total_pkt_cnt =
674 le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
675}
676
677static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
678 struct qed_ptt *p_ptt,
679 struct qed_fcoe_stats *p_stats)
680{
681 struct fcoe_tx_stat pstats;
682 u32 pstats_addr;
683
684 memset(&pstats, 0, sizeof(pstats));
685 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
686 PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
687 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
688
689 p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
690 p_stats->fcoe_tx_data_pkt_cnt =
691 HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
692 p_stats->fcoe_tx_xfer_pkt_cnt =
693 HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
694 p_stats->fcoe_tx_other_pkt_cnt =
695 HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
696}
697
698static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
699 struct qed_fcoe_stats *p_stats)
700{
701 struct qed_ptt *p_ptt;
702
703 memset(p_stats, 0, sizeof(*p_stats));
704
705 p_ptt = qed_ptt_acquire(p_hwfn);
706
707 if (!p_ptt) {
708 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
709 return -EINVAL;
710 }
711
712 _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
713 _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
714
715 qed_ptt_release(p_hwfn, p_ptt);
716
717 return 0;
718}
719
720struct qed_hash_fcoe_con {
721 struct hlist_node node;
722 struct qed_fcoe_conn *con;
723};
724
725static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
726 struct qed_dev_fcoe_info *info)
727{
728 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
729 int rc;
730
731 memset(info, 0, sizeof(*info));
732 rc = qed_fill_dev_info(cdev, &info->common);
733
734 info->primary_dbq_rq_addr =
735 qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
736 info->secondary_bdq_rq_addr =
737 qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
738
739 return rc;
740}
741
742static void qed_register_fcoe_ops(struct qed_dev *cdev,
743 struct qed_fcoe_cb_ops *ops, void *cookie)
744{
745 cdev->protocol_ops.fcoe = ops;
746 cdev->ops_cookie = cookie;
747}
748
749static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
750 u32 handle)
751{
752 struct qed_hash_fcoe_con *hash_con = NULL;
753
754 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
755 return NULL;
756
757 hash_for_each_possible(cdev->connections, hash_con, node, handle) {
758 if (hash_con->con->icid == handle)
759 break;
760 }
761
762 if (!hash_con || (hash_con->con->icid != handle))
763 return NULL;
764
765 return hash_con;
766}
767
768static int qed_fcoe_stop(struct qed_dev *cdev)
769{
Rahul Verma15582962017-04-06 15:58:29 +0300770 struct qed_ptt *p_ptt;
Arun Easi1e128c82017-02-15 06:28:22 -0800771 int rc;
772
773 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
774 DP_NOTICE(cdev, "fcoe already stopped\n");
775 return 0;
776 }
777
778 if (!hash_empty(cdev->connections)) {
779 DP_NOTICE(cdev,
780 "Can't stop fcoe - not all connections were returned\n");
781 return -EINVAL;
782 }
783
Rahul Verma15582962017-04-06 15:58:29 +0300784 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
785 if (!p_ptt)
786 return -EAGAIN;
787
Arun Easi1e128c82017-02-15 06:28:22 -0800788 /* Stop the fcoe */
Rahul Verma15582962017-04-06 15:58:29 +0300789 rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -0800790 QED_SPQ_MODE_EBLOCK, NULL);
791 cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
Rahul Verma15582962017-04-06 15:58:29 +0300792 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
Arun Easi1e128c82017-02-15 06:28:22 -0800793
794 return rc;
795}
796
797static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
798{
799 int rc;
800
801 if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
802 DP_NOTICE(cdev, "fcoe already started;\n");
803 return 0;
804 }
805
806 rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev),
807 QED_SPQ_MODE_EBLOCK, NULL);
808 if (rc) {
809 DP_NOTICE(cdev, "Failed to start fcoe\n");
810 return rc;
811 }
812
813 cdev->flags |= QED_FLAG_STORAGE_STARTED;
814 hash_init(cdev->connections);
815
816 if (tasks) {
817 struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
818 GFP_ATOMIC);
819
820 if (!tid_info) {
821 DP_NOTICE(cdev,
822 "Failed to allocate tasks information\n");
823 qed_fcoe_stop(cdev);
824 return -ENOMEM;
825 }
826
827 rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info);
828 if (rc) {
829 DP_NOTICE(cdev, "Failed to gather task information\n");
830 qed_fcoe_stop(cdev);
831 kfree(tid_info);
832 return rc;
833 }
834
835 /* Fill task information */
836 tasks->size = tid_info->tid_size;
837 tasks->num_tids_per_block = tid_info->num_tids_per_block;
838 memcpy(tasks->blocks, tid_info->blocks,
839 MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
840
841 kfree(tid_info);
842 }
843
844 return 0;
845}
846
847static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
848 u32 *handle,
849 u32 *fw_cid, void __iomem **p_doorbell)
850{
851 struct qed_hash_fcoe_con *hash_con;
852 int rc;
853
854 /* Allocate a hashed connection */
855 hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
856 if (!hash_con) {
857 DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
858 return -ENOMEM;
859 }
860
861 /* Acquire the connection */
862 rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
863 &hash_con->con);
864 if (rc) {
865 DP_NOTICE(cdev, "Failed to acquire Connection\n");
866 kfree(hash_con);
867 return rc;
868 }
869
870 /* Added the connection to hash table */
871 *handle = hash_con->con->icid;
872 *fw_cid = hash_con->con->fw_cid;
873 hash_add(cdev->connections, &hash_con->node, *handle);
874
875 if (p_doorbell)
876 *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev),
877 *handle);
878
879 return 0;
880}
881
882static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
883{
884 struct qed_hash_fcoe_con *hash_con;
885
886 hash_con = qed_fcoe_get_hash(cdev, handle);
887 if (!hash_con) {
888 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
889 handle);
890 return -EINVAL;
891 }
892
893 hlist_del(&hash_con->node);
894 qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
895 kfree(hash_con);
896
897 return 0;
898}
899
900static int qed_fcoe_offload_conn(struct qed_dev *cdev,
901 u32 handle,
902 struct qed_fcoe_params_offload *conn_info)
903{
904 struct qed_hash_fcoe_con *hash_con;
905 struct qed_fcoe_conn *con;
906
907 hash_con = qed_fcoe_get_hash(cdev, handle);
908 if (!hash_con) {
909 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
910 handle);
911 return -EINVAL;
912 }
913
914 /* Update the connection with information from the params */
915 con = hash_con->con;
916
917 con->sq_pbl_addr = conn_info->sq_pbl_addr;
918 con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
919 con->sq_next_page_addr = conn_info->sq_next_page_addr;
920 con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
921 con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
922 con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
923 con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
924 con->vlan_tag = conn_info->vlan_tag;
925 con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
926 con->flags = conn_info->flags;
927 con->def_q_idx = conn_info->def_q_idx;
928
929 con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
930 conn_info->src_mac[4];
931 con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
932 conn_info->src_mac[2];
933 con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
934 conn_info->src_mac[0];
935 con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
936 conn_info->dst_mac[4];
937 con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
938 conn_info->dst_mac[2];
939 con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
940 conn_info->dst_mac[0];
941
942 con->s_id.addr_hi = conn_info->s_id.addr_hi;
943 con->s_id.addr_mid = conn_info->s_id.addr_mid;
944 con->s_id.addr_lo = conn_info->s_id.addr_lo;
945 con->d_id.addr_hi = conn_info->d_id.addr_hi;
946 con->d_id.addr_mid = conn_info->d_id.addr_mid;
947 con->d_id.addr_lo = conn_info->d_id.addr_lo;
948
949 return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con,
950 QED_SPQ_MODE_EBLOCK, NULL);
951}
952
953static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
954 u32 handle, dma_addr_t terminate_params)
955{
956 struct qed_hash_fcoe_con *hash_con;
957 struct qed_fcoe_conn *con;
958
959 hash_con = qed_fcoe_get_hash(cdev, handle);
960 if (!hash_con) {
961 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
962 handle);
963 return -EINVAL;
964 }
965
966 /* Update the connection with information from the params */
967 con = hash_con->con;
968 con->terminate_params = terminate_params;
969
970 return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con,
971 QED_SPQ_MODE_EBLOCK, NULL);
972}
973
974static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
975{
976 return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats);
977}
978
979void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
980 struct qed_mcp_fcoe_stats *stats)
981{
982 struct qed_fcoe_stats proto_stats;
983
984 /* Retrieve FW statistics */
985 memset(&proto_stats, 0, sizeof(proto_stats));
986 if (qed_fcoe_stats(cdev, &proto_stats)) {
987 DP_VERBOSE(cdev, QED_MSG_STORAGE,
988 "Failed to collect FCoE statistics\n");
989 return;
990 }
991
992 /* Translate FW statistics into struct */
993 stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
994 proto_stats.fcoe_rx_xfer_pkt_cnt +
995 proto_stats.fcoe_rx_other_pkt_cnt;
996 stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
997 proto_stats.fcoe_tx_xfer_pkt_cnt +
998 proto_stats.fcoe_tx_other_pkt_cnt;
999 stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
1000
1001 /* Request protocol driver to fill-in the rest */
1002 if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
1003 struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
1004 void *cookie = cdev->ops_cookie;
1005
1006 if (ops->get_login_failures)
1007 stats->login_failure = ops->get_login_failures(cookie);
1008 }
1009}
1010
1011static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
1012 .common = &qed_common_ops_pass,
1013 .ll2 = &qed_ll2_ops_pass,
1014 .fill_dev_info = &qed_fill_fcoe_dev_info,
1015 .start = &qed_fcoe_start,
1016 .stop = &qed_fcoe_stop,
1017 .register_ops = &qed_register_fcoe_ops,
1018 .acquire_conn = &qed_fcoe_acquire_conn,
1019 .release_conn = &qed_fcoe_release_conn,
1020 .offload_conn = &qed_fcoe_offload_conn,
1021 .destroy_conn = &qed_fcoe_destroy_conn,
1022 .get_stats = &qed_fcoe_stats,
1023};
1024
1025const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
1026{
1027 return &qed_fcoe_ops_pass;
1028}
1029EXPORT_SYMBOL(qed_get_fcoe_ops);
1030
1031void qed_put_fcoe_ops(void)
1032{
1033}
1034EXPORT_SYMBOL(qed_put_fcoe_ops);